scsi: lpfc: Synchronize hardware queues with SCSI MQ interface
[linux-block.git] / drivers / scsi / lpfc / lpfc_init.c
index c1c36812c3d29938805569ae9aacfeb8b202dccd..88b1c3ca26dc22df4c450d5d66f71f4e1cb1163e 100644 (file)
@@ -1039,12 +1039,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
 {
        struct lpfc_scsi_buf *psb, *psb_next;
        struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
+       struct lpfc_sli4_hdw_queue *qp;
        LIST_HEAD(aborts);
        LIST_HEAD(nvme_aborts);
        LIST_HEAD(nvmet_aborts);
-       unsigned long iflag = 0;
        struct lpfc_sglq *sglq_entry = NULL;
-       int cnt;
+       int cnt, idx;
 
 
        lpfc_sli_hbqbuf_free_all(phba);
@@ -1071,55 +1071,65 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
 
 
        spin_unlock(&phba->sli4_hba.sgl_list_lock);
-       /* abts_scsi_buf_list_lock required because worker thread uses this
+
+       /* abts_xxxx_buf_list_lock required because worker thread uses this
         * list.
         */
-       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
-               spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
-               list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
-                                &aborts);
-               spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
-       }
-
-       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-               spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
-               list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
-                                &nvme_aborts);
-               list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
-                                &nvmet_aborts);
-               spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
-       }
-
-       spin_unlock_irq(&phba->hbalock);
+       cnt = 0;
+       for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+               qp = &phba->sli4_hba.hdwq[idx];
 
-       list_for_each_entry_safe(psb, psb_next, &aborts, list) {
-               psb->pCmd = NULL;
-               psb->status = IOSTAT_SUCCESS;
-       }
-       spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
-       list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
-       spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
+               spin_lock(&qp->abts_scsi_buf_list_lock);
+               list_splice_init(&qp->lpfc_abts_scsi_buf_list,
+                                &aborts);
 
-       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-               cnt = 0;
-               list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
+               list_for_each_entry_safe(psb, psb_next, &aborts, list) {
                        psb->pCmd = NULL;
                        psb->status = IOSTAT_SUCCESS;
                        cnt++;
                }
-               spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
-               phba->put_nvme_bufs += cnt;
-               list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
-               spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
+               spin_lock(&qp->io_buf_list_put_lock);
+               list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
+               qp->put_io_bufs += qp->abts_scsi_io_bufs;
+               qp->abts_scsi_io_bufs = 0;
+               spin_unlock(&qp->io_buf_list_put_lock);
+               spin_unlock(&qp->abts_scsi_buf_list_lock);
+
+               if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+                       spin_lock(&qp->abts_nvme_buf_list_lock);
+                       list_splice_init(&qp->lpfc_abts_nvme_buf_list,
+                                        &nvme_aborts);
+                       list_for_each_entry_safe(psb, psb_next, &nvme_aborts,
+                                                list) {
+                               psb->pCmd = NULL;
+                               psb->status = IOSTAT_SUCCESS;
+                               cnt++;
+                       }
+                       spin_lock(&qp->io_buf_list_put_lock);
+                       qp->put_io_bufs += qp->abts_nvme_io_bufs;
+                       qp->abts_nvme_io_bufs = 0;
+                       list_splice_init(&nvme_aborts,
+                                        &qp->lpfc_io_buf_list_put);
+                       spin_unlock(&qp->io_buf_list_put_lock);
+                       spin_unlock(&qp->abts_nvme_buf_list_lock);
+
+               }
+       }
 
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+               spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+               list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
+                                &nvmet_aborts);
+               spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
                list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
                        ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
                        lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
                }
        }
 
+       spin_unlock_irq(&phba->hbalock);
        lpfc_sli4_free_sp_events(phba);
-       return 0;
+       return cnt;
 }
 
 /**
@@ -1272,7 +1282,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
        struct lpfc_register reg_data;
        struct nvme_fc_local_port *localport;
        struct lpfc_nvme_lport *lport;
-       struct lpfc_nvme_ctrl_stat *cstat;
+       struct lpfc_fc4_ctrl_stat *cstat;
        void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
 
        vports = lpfc_create_vport_work_array(phba);
@@ -1313,31 +1323,28 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                        localport->private;
                                tot = 0;
                                for (i = 0;
-                                       i < phba->cfg_nvme_io_channel; i++) {
-                                       cstat = &lport->cstat[i];
-                                       data1 = atomic_read(
-                                               &cstat->fc4NvmeInputRequests);
-                                       data2 = atomic_read(
-                                               &cstat->fc4NvmeOutputRequests);
-                                       data3 = atomic_read(
-                                               &cstat->fc4NvmeControlRequests);
+                                       i < phba->cfg_hdw_queue; i++) {
+                                       cstat =
+                                            &phba->sli4_hba.hdwq[i].nvme_cstat;
+                                       data1 = cstat->input_requests;
+                                       data2 = cstat->output_requests;
+                                       data3 = cstat->control_requests;
                                        tot += (data1 + data2 + data3);
-                                       tot -= atomic_read(
-                                               &cstat->fc4NvmeIoCmpls);
+                                       tot -= cstat->io_cmpls;
                                }
                        }
                }
 
                /* Interrupts per sec per EQ */
-               val = phba->cfg_fcp_imax / phba->io_channel_irqs;
+               val = phba->cfg_fcp_imax / phba->cfg_hdw_queue;
                tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
 
                /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
                max_cqe = time_elapsed * tick_cqe;
 
-               for (i = 0; i < phba->io_channel_irqs; i++) {
+               for (i = 0; i < phba->cfg_hdw_queue; i++) {
                        /* Fast-path EQ */
-                       qp = phba->sli4_hba.hba_eq[i];
+                       qp = phba->sli4_hba.hdwq[i].hba_eq;
                        if (!qp)
                                continue;
 
@@ -1359,7 +1366,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                if (val) {
                                        /* First, interrupts per sec per EQ */
                                        val = phba->cfg_fcp_imax /
-                                               phba->io_channel_irqs;
+                                               phba->cfg_hdw_queue;
 
                                        /* us delay between each interrupt */
                                        val = LPFC_SEC_TO_USEC / val;
@@ -2943,7 +2950,8 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
 void
 lpfc_stop_hba_timers(struct lpfc_hba *phba)
 {
-       lpfc_stop_vport_timers(phba->pport);
+       if (phba->pport)
+               lpfc_stop_vport_timers(phba->pport);
        del_timer_sync(&phba->sli.mbox_tmo);
        del_timer_sync(&phba->fabric_block_timer);
        del_timer_sync(&phba->eratt_poll);
@@ -3355,50 +3363,57 @@ lpfc_scsi_free(struct lpfc_hba *phba)
        spin_unlock(&phba->scsi_buf_list_get_lock);
        spin_unlock_irq(&phba->hbalock);
 }
+
 /**
- * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists
+ * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is to free all the NVME buffers and IOCBs from the driver
+ * This routine is to free all the IO buffers and IOCBs from the driver
  * list back to kernel. It is called from lpfc_pci_remove_one to free
  * the internal resources before the device is removed from the system.
  **/
 static void
-lpfc_nvme_free(struct lpfc_hba *phba)
+lpfc_io_free(struct lpfc_hba *phba)
 {
        struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
-
-       if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
-               return;
+       struct lpfc_sli4_hdw_queue *qp;
+       int idx;
 
        spin_lock_irq(&phba->hbalock);
 
-       /* Release all the lpfc_nvme_bufs maintained by this host. */
-       spin_lock(&phba->nvme_buf_list_put_lock);
-       list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
-                                &phba->lpfc_nvme_buf_list_put, list) {
-               list_del(&lpfc_ncmd->list);
-               phba->put_nvme_bufs--;
-               dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
-                             lpfc_ncmd->dma_handle);
-               kfree(lpfc_ncmd);
-               phba->total_nvme_bufs--;
+       for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+               qp = &phba->sli4_hba.hdwq[idx];
+               /* Release all the lpfc_nvme_bufs maintained by this host. */
+               spin_lock(&qp->io_buf_list_put_lock);
+               list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+                                        &qp->lpfc_io_buf_list_put,
+                                        list) {
+                       list_del(&lpfc_ncmd->list);
+                       qp->put_io_bufs--;
+                       dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+                                     lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+                       kfree(lpfc_ncmd);
+                       qp->total_io_bufs--;
+               }
+               spin_unlock(&qp->io_buf_list_put_lock);
+
+               spin_lock(&qp->io_buf_list_get_lock);
+               list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+                                        &qp->lpfc_io_buf_list_get,
+                                        list) {
+                       list_del(&lpfc_ncmd->list);
+                       qp->get_io_bufs--;
+                       dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+                                     lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+                       kfree(lpfc_ncmd);
+                       qp->total_io_bufs--;
+               }
+               spin_unlock(&qp->io_buf_list_get_lock);
        }
-       spin_unlock(&phba->nvme_buf_list_put_lock);
 
-       spin_lock(&phba->nvme_buf_list_get_lock);
-       list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
-                                &phba->lpfc_nvme_buf_list_get, list) {
-               list_del(&lpfc_ncmd->list);
-               phba->get_nvme_bufs--;
-               dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
-                             lpfc_ncmd->dma_handle);
-               kfree(lpfc_ncmd);
-               phba->total_nvme_bufs--;
-       }
-       spin_unlock(&phba->nvme_buf_list_get_lock);
        spin_unlock_irq(&phba->hbalock);
 }
+
 /**
  * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
  * @phba: pointer to lpfc hba data structure.
@@ -3640,8 +3655,102 @@ out_free_mem:
        return rc;
 }
 
+int
+lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
+{
+       LIST_HEAD(blist);
+       struct lpfc_sli4_hdw_queue *qp;
+       struct lpfc_scsi_buf *lpfc_cmd;
+       struct lpfc_scsi_buf *iobufp, *prev_iobufp;
+       int idx, cnt, xri, inserted;
+
+       cnt = 0;
+       for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+               qp = &phba->sli4_hba.hdwq[idx];
+               spin_lock_irq(&qp->io_buf_list_get_lock);
+               spin_lock(&qp->io_buf_list_put_lock);
+
+               /* Take everything off the get and put lists */
+               list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
+               list_splice(&qp->lpfc_io_buf_list_put, &blist);
+               INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
+               INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
+               cnt += qp->get_io_bufs + qp->put_io_bufs;
+               qp->get_io_bufs = 0;
+               qp->put_io_bufs = 0;
+               qp->total_io_bufs = 0;
+               spin_unlock(&qp->io_buf_list_put_lock);
+               spin_unlock_irq(&qp->io_buf_list_get_lock);
+       }
+
+       /*
+        * Take IO buffers off blist and put on cbuf sorted by XRI.
+        * This is because POST_SGL takes a sequential range of XRIs
+        * to post to the firmware.
+        */
+       for (idx = 0; idx < cnt; idx++) {
+               list_remove_head(&blist, lpfc_cmd, struct lpfc_scsi_buf, list);
+               if (!lpfc_cmd)
+                       return cnt;
+               if (idx == 0) {
+                       list_add_tail(&lpfc_cmd->list, cbuf);
+                       continue;
+               }
+               xri = lpfc_cmd->cur_iocbq.sli4_xritag;
+               inserted = 0;
+               prev_iobufp = NULL;
+               list_for_each_entry(iobufp, cbuf, list) {
+                       if (xri < iobufp->cur_iocbq.sli4_xritag) {
+                               if (prev_iobufp)
+                                       list_add(&lpfc_cmd->list,
+                                                &prev_iobufp->list);
+                               else
+                                       list_add(&lpfc_cmd->list, cbuf);
+                               inserted = 1;
+                               break;
+                       }
+                       prev_iobufp = iobufp;
+               }
+               if (!inserted)
+                       list_add_tail(&lpfc_cmd->list, cbuf);
+       }
+       return cnt;
+}
+
+int
+lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
+{
+       struct lpfc_sli4_hdw_queue *qp;
+       struct lpfc_scsi_buf *lpfc_cmd;
+       int idx, cnt;
+
+       qp = phba->sli4_hba.hdwq;
+       cnt = 0;
+       while (!list_empty(cbuf)) {
+               for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+                       list_remove_head(cbuf, lpfc_cmd,
+                                        struct lpfc_scsi_buf, list);
+                       if (!lpfc_cmd)
+                               return cnt;
+                       cnt++;
+                       qp = &phba->sli4_hba.hdwq[idx];
+                       lpfc_cmd->hdwq_no = idx;
+                       lpfc_cmd->hdwq = qp;
+                       lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
+                       lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
+                       spin_lock(&qp->io_buf_list_put_lock);
+                       list_add_tail(&lpfc_cmd->list,
+                                     &qp->lpfc_io_buf_list_put);
+                       qp->put_io_bufs++;
+                       qp->total_io_bufs++;
+                       spin_unlock(&qp->io_buf_list_put_lock);
+               }
+       }
+       return cnt;
+}
+
 /**
- * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
+ * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine first calculates the sizes of the current els and allocated
@@ -3653,94 +3762,192 @@ out_free_mem:
  *   0 - successful (for now, it always returns 0)
  **/
 int
-lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
+lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
 {
-       struct lpfc_scsi_buf *psb, *psb_next;
-       uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt;
-       LIST_HEAD(scsi_sgl_list);
-       int rc;
-
-       /*
-        * update on pci function's els xri-sgl list
-        */
-       els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
-       phba->total_scsi_bufs = 0;
+       struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
+       uint16_t i, lxri, els_xri_cnt;
+       uint16_t io_xri_cnt, io_xri_max;
+       LIST_HEAD(io_sgl_list);
+       int rc, cnt;
 
        /*
-        * update on pci function's allocated scsi xri-sgl list
+        * update on pci function's allocated nvme xri-sgl list
         */
-       /* maximum number of xris available for scsi buffers */
-       phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
-                                     els_xri_cnt;
 
-       if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
-               return 0;
+       /* maximum number of xris available for nvme buffers */
+       els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+       io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
+       phba->sli4_hba.io_xri_max = io_xri_max;
 
-       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
-               phba->sli4_hba.scsi_xri_max =  /* Split them up */
-                       (phba->sli4_hba.scsi_xri_max *
-                        phba->cfg_xri_split) / 100;
+       lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+                       "6074 Current allocated XRI sgl count:%d, "
+                       "maximum XRI count:%d\n",
+                       phba->sli4_hba.io_xri_cnt,
+                       phba->sli4_hba.io_xri_max);
 
-       spin_lock_irq(&phba->scsi_buf_list_get_lock);
-       spin_lock(&phba->scsi_buf_list_put_lock);
-       list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
-       list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
-       spin_unlock(&phba->scsi_buf_list_put_lock);
-       spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+       cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
 
-       lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-                       "6060 Current allocated SCSI xri-sgl count:%d, "
-                       "maximum  SCSI xri count:%d (split:%d)\n",
-                       phba->sli4_hba.scsi_xri_cnt,
-                       phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split);
-
-       if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
-               /* max scsi xri shrinked below the allocated scsi buffers */
-               scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
-                                       phba->sli4_hba.scsi_xri_max;
-               /* release the extra allocated scsi buffers */
-               for (i = 0; i < scsi_xri_cnt; i++) {
-                       list_remove_head(&scsi_sgl_list, psb,
-                                        struct lpfc_scsi_buf, list);
-                       if (psb) {
+       if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
+               /* max nvme xri shrunk below the allocated nvme buffers */
+               io_xri_cnt = phba->sli4_hba.io_xri_cnt -
+                                       phba->sli4_hba.io_xri_max;
+               /* release the extra allocated nvme buffers */
+               for (i = 0; i < io_xri_cnt; i++) {
+                       list_remove_head(&io_sgl_list, lpfc_ncmd,
+                                        struct lpfc_nvme_buf, list);
+                       if (lpfc_ncmd) {
                                dma_pool_free(phba->lpfc_sg_dma_buf_pool,
-                                             psb->data, psb->dma_handle);
-                               kfree(psb);
+                                             lpfc_ncmd->data,
+                                             lpfc_ncmd->dma_handle);
+                               kfree(lpfc_ncmd);
                        }
                }
-               spin_lock_irq(&phba->scsi_buf_list_get_lock);
-               phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
-               spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+               phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
        }
 
-       /* update xris associated to remaining allocated scsi buffers */
-       psb = NULL;
-       psb_next = NULL;
-       list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
+       /* update xris associated to remaining allocated nvme buffers */
+       lpfc_ncmd = NULL;
+       lpfc_ncmd_next = NULL;
+       phba->sli4_hba.io_xri_cnt = cnt;
+       list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+                                &io_sgl_list, list) {
                lxri = lpfc_sli4_next_xritag(phba);
                if (lxri == NO_XRI) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                                       "2560 Failed to allocate xri for "
-                                       "scsi buffer\n");
+                                       "6075 Failed to allocate xri for "
+                                       "nvme buffer\n");
                        rc = -ENOMEM;
                        goto out_free_mem;
                }
-               psb->cur_iocbq.sli4_lxritag = lxri;
-               psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+               lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
+               lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
        }
-       spin_lock_irq(&phba->scsi_buf_list_get_lock);
-       spin_lock(&phba->scsi_buf_list_put_lock);
-       list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
-       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
-       spin_unlock(&phba->scsi_buf_list_put_lock);
-       spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+       cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
        return 0;
 
 out_free_mem:
-       lpfc_scsi_free(phba);
+       lpfc_io_free(phba);
        return rc;
 }
 
+/**
+ * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine allocates nvme buffers for device with SLI-4 interface spec,
+ * the nvme buffer contains all the necessary information needed to initiate
+ * an I/O. After allocating up to @num_to_allocate IO buffers and put
+ * them on a list, it post them to the port by using SGL block post.
+ *
+ * Return codes:
+ *   int - number of IO buffers that were allocated and posted.
+ *   0 = failure, less than num_to_alloc is a partial failure.
+ **/
+int
+lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
+{
+       struct lpfc_nvme_buf *lpfc_ncmd;
+       struct lpfc_iocbq *pwqeq;
+       uint16_t iotag, lxri = 0;
+       int bcnt, num_posted;
+       LIST_HEAD(prep_nblist);
+       LIST_HEAD(post_nblist);
+       LIST_HEAD(nvme_nblist);
+
+       /* Sanity check to ensure our sizing is right for both SCSI and NVME */
+       if ((sizeof(struct lpfc_scsi_buf) > LPFC_COMMON_IO_BUF_SZ) ||
+           (sizeof(struct lpfc_nvme_buf) > LPFC_COMMON_IO_BUF_SZ)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+                               "6426 Common buffer size mismatch: %ld %ld\n",
+                               sizeof(struct lpfc_scsi_buf),
+                               sizeof(struct lpfc_nvme_buf));
+               return 0;
+       }
+
+       phba->sli4_hba.io_xri_cnt = 0;
+       for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+               lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL);
+               if (!lpfc_ncmd)
+                       break;
+               /*
+                * Get memory from the pci pool to map the virt space to
+                * pci bus space for an I/O. The DMA buffer includes the
+                * number of SGE's necessary to support the sg_tablesize.
+                */
+               lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
+                               GFP_KERNEL,
+                               &lpfc_ncmd->dma_handle);
+               if (!lpfc_ncmd->data) {
+                       kfree(lpfc_ncmd);
+                       break;
+               }
+               memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size);
+
+               /*
+                * 4K Page alignment is CRITICAL to BlockGuard, double check
+                * to be sure.
+                */
+               if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+                   (((unsigned long)(lpfc_ncmd->data) &
+                   (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+                                       "3369 Memory alignment err: addr=%lx\n",
+                                       (unsigned long)lpfc_ncmd->data);
+                       dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+                                     lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+                       kfree(lpfc_ncmd);
+                       break;
+               }
+
+               lxri = lpfc_sli4_next_xritag(phba);
+               if (lxri == NO_XRI) {
+                       dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+                                     lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+                       kfree(lpfc_ncmd);
+                       break;
+               }
+               pwqeq = &lpfc_ncmd->cur_iocbq;
+
+               /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
+               iotag = lpfc_sli_next_iotag(phba, pwqeq);
+               if (iotag == 0) {
+                       dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+                                     lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+                       kfree(lpfc_ncmd);
+                       lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+                                       "6121 Failed to allocate IOTAG for"
+                                       " XRI:0x%x\n", lxri);
+                       lpfc_sli4_free_xri(phba, lxri);
+                       break;
+               }
+               pwqeq->sli4_lxritag = lxri;
+               pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+               pwqeq->context1 = lpfc_ncmd;
+
+               /* Initialize local short-hand pointers. */
+               lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
+               lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
+               lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
+
+               /* add the nvme buffer to a post list */
+               list_add_tail(&lpfc_ncmd->list, &post_nblist);
+               phba->sli4_hba.io_xri_cnt++;
+       }
+       lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
+                       "6114 Allocate %d out of %d requested new NVME "
+                       "buffers\n", bcnt, num_to_alloc);
+
+       /* post the list of nvme buffer sgls to port if available */
+       if (!list_empty(&post_nblist))
+               num_posted = lpfc_sli4_post_io_sgl_list(
+                               phba, &post_nblist, bcnt);
+       else
+               num_posted = 0;
+
+       return num_posted;
+}
+
 static uint64_t
 lpfc_get_wwpn(struct lpfc_hba *phba)
 {
@@ -3776,111 +3983,6 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
                return rol64(wwn, 32);
 }
 
-/**
- * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine first calculates the sizes of the current els and allocated
- * scsi sgl lists, and then goes through all sgls to updates the physical
- * XRIs assigned due to port function reset. During port initialization, the
- * current els and allocated scsi sgl lists are 0s.
- *
- * Return codes
- *   0 - successful (for now, it always returns 0)
- **/
-int
-lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
-{
-       struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
-       uint16_t i, lxri, els_xri_cnt;
-       uint16_t nvme_xri_cnt, nvme_xri_max;
-       LIST_HEAD(nvme_sgl_list);
-       int rc, cnt;
-
-       phba->total_nvme_bufs = 0;
-       phba->get_nvme_bufs = 0;
-       phba->put_nvme_bufs = 0;
-
-       if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
-               return 0;
-       /*
-        * update on pci function's allocated nvme xri-sgl list
-        */
-
-       /* maximum number of xris available for nvme buffers */
-       els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
-       nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
-       phba->sli4_hba.nvme_xri_max = nvme_xri_max;
-       phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max;
-
-       lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-                       "6074 Current allocated NVME xri-sgl count:%d, "
-                       "maximum  NVME xri count:%d\n",
-                       phba->sli4_hba.nvme_xri_cnt,
-                       phba->sli4_hba.nvme_xri_max);
-
-       spin_lock_irq(&phba->nvme_buf_list_get_lock);
-       spin_lock(&phba->nvme_buf_list_put_lock);
-       list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
-       list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
-       cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
-       phba->get_nvme_bufs = 0;
-       phba->put_nvme_bufs = 0;
-       spin_unlock(&phba->nvme_buf_list_put_lock);
-       spin_unlock_irq(&phba->nvme_buf_list_get_lock);
-
-       if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) {
-               /* max nvme xri shrunk below the allocated nvme buffers */
-               spin_lock_irq(&phba->nvme_buf_list_get_lock);
-               nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt -
-                                       phba->sli4_hba.nvme_xri_max;
-               spin_unlock_irq(&phba->nvme_buf_list_get_lock);
-               /* release the extra allocated nvme buffers */
-               for (i = 0; i < nvme_xri_cnt; i++) {
-                       list_remove_head(&nvme_sgl_list, lpfc_ncmd,
-                                        struct lpfc_nvme_buf, list);
-                       if (lpfc_ncmd) {
-                               dma_pool_free(phba->lpfc_sg_dma_buf_pool,
-                                             lpfc_ncmd->data,
-                                             lpfc_ncmd->dma_handle);
-                               kfree(lpfc_ncmd);
-                       }
-               }
-               spin_lock_irq(&phba->nvme_buf_list_get_lock);
-               phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt;
-               spin_unlock_irq(&phba->nvme_buf_list_get_lock);
-       }
-
-       /* update xris associated to remaining allocated nvme buffers */
-       lpfc_ncmd = NULL;
-       lpfc_ncmd_next = NULL;
-       list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
-                                &nvme_sgl_list, list) {
-               lxri = lpfc_sli4_next_xritag(phba);
-               if (lxri == NO_XRI) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                                       "6075 Failed to allocate xri for "
-                                       "nvme buffer\n");
-                       rc = -ENOMEM;
-                       goto out_free_mem;
-               }
-               lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
-               lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
-       }
-       spin_lock_irq(&phba->nvme_buf_list_get_lock);
-       spin_lock(&phba->nvme_buf_list_put_lock);
-       list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
-       phba->get_nvme_bufs = cnt;
-       INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
-       spin_unlock(&phba->nvme_buf_list_put_lock);
-       spin_unlock_irq(&phba->nvme_buf_list_get_lock);
-       return 0;
-
-out_free_mem:
-       lpfc_nvme_free(phba);
-       return rc;
-}
-
 /**
  * lpfc_create_port - Create an FC port
  * @phba: pointer to lpfc hba data structure.
@@ -3961,12 +4063,16 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
        shost->max_lun = vport->cfg_max_luns;
        shost->this_id = -1;
        shost->max_cmd_len = 16;
-       shost->nr_hw_queues = phba->cfg_fcp_io_channel;
        if (phba->sli_rev == LPFC_SLI_REV4) {
+               shost->nr_hw_queues = phba->cfg_hdw_queue;
                shost->dma_boundary =
                        phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
                shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
-       }
+       } else
+               /* SLI-3 has a limited number of hardware queues (3),
+                * thus there is only one for FCP processing.
+                */
+               shost->nr_hw_queues = 1;
 
        /*
         * Set initial can_queue value since 0 is no longer supported and
@@ -4220,7 +4326,8 @@ lpfc_stop_port_s4(struct lpfc_hba *phba)
 {
        /* Reset some HBA SLI4 setup states */
        lpfc_stop_hba_timers(phba);
-       phba->pport->work_port_events = 0;
+       if (phba->pport)
+               phba->pport->work_port_events = 0;
        phba->sli4_hba.intr_enable = 0;
 }
 
@@ -5819,24 +5926,11 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
                                "NVME" : " "),
                        (phba->nvmet_support ? "NVMET" : " "));
 
-       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
-               /* Initialize the scsi buffer list used by driver for scsi IO */
-               spin_lock_init(&phba->scsi_buf_list_get_lock);
-               INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
-               spin_lock_init(&phba->scsi_buf_list_put_lock);
-               INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
-       }
-
-       if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
-               (phba->nvmet_support == 0)) {
-               /* Initialize the NVME buffer list used by driver for NVME IO */
-               spin_lock_init(&phba->nvme_buf_list_get_lock);
-               INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
-               phba->get_nvme_bufs = 0;
-               spin_lock_init(&phba->nvme_buf_list_put_lock);
-               INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
-               phba->put_nvme_bufs = 0;
-       }
+       /* Initialize the IO buffer list used by driver for SLI3 SCSI */
+       spin_lock_init(&phba->scsi_buf_list_get_lock);
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
+       spin_lock_init(&phba->scsi_buf_list_put_lock);
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
 
        /* Initialize the fabric iocb list */
        INIT_LIST_HEAD(&phba->fabric_iocb_list);
@@ -5877,7 +5971,7 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
 static int
 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
 {
-       int rc;
+       int rc, entry_sz;
 
        /*
         * Initialize timers used by driver
@@ -5922,6 +6016,11 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
        lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
        lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
 
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               entry_sz = sizeof(struct sli4_sge);
+       else
+               entry_sz = sizeof(struct ulp_bde64);
+
        /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
        if (phba->cfg_enable_bg) {
                /*
@@ -5935,7 +6034,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
                 */
                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
                        sizeof(struct fcp_rsp) +
-                       (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
+                       (LPFC_MAX_SG_SEG_CNT * entry_sz);
 
                if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
                        phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
@@ -5950,7 +6049,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
                 */
                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
                        sizeof(struct fcp_rsp) +
-                       ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+                       ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
 
                /* Total BDEs in BPL for scsi_sg_list */
                phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
@@ -6031,7 +6130,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
        struct lpfc_mqe *mqe;
        int longs;
-       int fof_vectors = 0;
        int extra;
        uint64_t wwn;
        u32 if_type;
@@ -6200,8 +6298,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
                /* Initialize the Abort nvme buffer list used by driver */
-               spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
-               INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
+               spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
        }
@@ -6405,8 +6502,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 
        /* Verify OAS is supported */
        lpfc_sli4_oas_verify(phba);
-       if (phba->cfg_fof)
-               fof_vectors = 1;
 
        /* Verify RAS support on adapter */
        lpfc_sli4_ras_init(phba);
@@ -6450,9 +6545,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                goto out_remove_rpi_hdrs;
        }
 
-       phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs,
-                                               sizeof(struct lpfc_hba_eq_hdl),
-                                               GFP_KERNEL);
+       phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_hdw_queue,
+                                           sizeof(struct lpfc_hba_eq_hdl),
+                                           GFP_KERNEL);
        if (!phba->sli4_hba.hba_eq_hdl) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "2572 Failed allocate memory for "
@@ -6875,11 +6970,8 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
        /* els xri-sgl book keeping */
        phba->sli4_hba.els_xri_cnt = 0;
 
-       /* scsi xri-buffer book keeping */
-       phba->sli4_hba.scsi_xri_cnt = 0;
-
        /* nvme xri-buffer book keeping */
-       phba->sli4_hba.nvme_xri_cnt = 0;
+       phba->sli4_hba.io_xri_cnt = 0;
 }
 
 /**
@@ -7093,6 +7185,9 @@ lpfc_hba_alloc(struct pci_dev *pdev)
 static void
 lpfc_hba_free(struct lpfc_hba *phba)
 {
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               kfree(phba->sli4_hba.hdwq);
+
        /* Release the driver assigned board number */
        idr_remove(&lpfc_hba_index, phba->brd_no);
 
@@ -7128,10 +7223,6 @@ lpfc_create_shost(struct lpfc_hba *phba)
        phba->fc_arbtov = FF_DEF_ARBTOV;
 
        atomic_set(&phba->sdev_cnt, 0);
-       atomic_set(&phba->fc4ScsiInputRequests, 0);
-       atomic_set(&phba->fc4ScsiOutputRequests, 0);
-       atomic_set(&phba->fc4ScsiControlRequests, 0);
-       atomic_set(&phba->fc4ScsiIoCmpls, 0);
        vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
        if (!vport)
                return -ENODEV;
@@ -8023,25 +8114,27 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                        /*
                         * Whats left after this can go toward NVME.
                         * The minus 6 accounts for ELS, NVME LS, MBOX
-                        * fof plus a couple extra. When configured for
+                        * plus a couple extra. When configured for
                         * NVMET, FCP io channel WQs are not created.
                         */
                        length -= 6;
+
+                       /* Take off FCP queues */
                        if (!phba->nvmet_support)
-                               length -= phba->cfg_fcp_io_channel;
+                               length -= phba->cfg_hdw_queue;
 
-                       if (phba->cfg_nvme_io_channel > length) {
+                       /* Check to see if there is enough for NVME */
+                       if (phba->cfg_hdw_queue > length) {
                                lpfc_printf_log(
                                        phba, KERN_ERR, LOG_SLI,
                                        "2005 Reducing NVME IO channel to %d: "
-                                       "WQ %d CQ %d NVMEIO %d FCPIO %d\n",
+                                       "WQ %d CQ %d CommonIO %d\n",
                                        length,
                                        phba->sli4_hba.max_cfg_param.max_wq,
                                        phba->sli4_hba.max_cfg_param.max_cq,
-                                       phba->cfg_nvme_io_channel,
-                                       phba->cfg_fcp_io_channel);
+                                       phba->cfg_hdw_queue);
 
-                               phba->cfg_nvme_io_channel = length;
+                               phba->cfg_hdw_queue = length;
                        }
                }
        }
@@ -8254,53 +8347,30 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
 static int
 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
 {
-       int io_channel;
-       int fof_vectors = phba->cfg_fof ? 1 : 0;
-
        /*
         * Sanity check for configured queue parameters against the run-time
         * device parameters
         */
 
-       /* Sanity check on HBA EQ parameters */
-       io_channel = phba->io_channel_irqs;
-
-       if (phba->sli4_hba.num_online_cpu < io_channel) {
-               lpfc_printf_log(phba,
-                               KERN_ERR, LOG_INIT,
-                               "3188 Reducing IO channels to match number of "
-                               "online CPUs: from %d to %d\n",
-                               io_channel, phba->sli4_hba.num_online_cpu);
-               io_channel = phba->sli4_hba.num_online_cpu;
-       }
-
-       if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) {
+       if (phba->cfg_hdw_queue > phba->sli4_hba.max_cfg_param.max_eq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "2575 Reducing IO channels to match number of "
                                "available EQs: from %d to %d\n",
-                               io_channel,
+                               phba->cfg_hdw_queue,
                                phba->sli4_hba.max_cfg_param.max_eq);
-               io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors;
+               phba->cfg_hdw_queue = phba->sli4_hba.max_cfg_param.max_eq;
        }
 
-       /* The actual number of FCP / NVME event queues adopted */
-       if (io_channel != phba->io_channel_irqs)
-               phba->io_channel_irqs = io_channel;
-       if (phba->cfg_fcp_io_channel > io_channel)
-               phba->cfg_fcp_io_channel = io_channel;
-       if (phba->cfg_nvme_io_channel > io_channel)
-               phba->cfg_nvme_io_channel = io_channel;
        if (phba->nvmet_support) {
-               if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
-                       phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+               if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
+                       phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
        }
        if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
                phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
 
        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                       "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
-                       phba->io_channel_irqs, phba->cfg_fcp_io_channel,
-                       phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
+                       "2574 IO channels: hdwQ %d MRQ: %d\n",
+                       phba->cfg_hdw_queue, phba->cfg_nvmet_mrq);
 
        /* Get EQ depth from module parameter, fake the default for now */
        phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -8327,7 +8397,8 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
                return 1;
        }
        qdesc->qe_valid = 1;
-       phba->sli4_hba.nvme_cq[wqidx] = qdesc;
+       qdesc->hdwq = wqidx;
+       phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc;
 
        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
                                      LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
@@ -8337,7 +8408,8 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
                                wqidx);
                return 1;
        }
-       phba->sli4_hba.nvme_wq[wqidx] = qdesc;
+       qdesc->hdwq = wqidx;
+       phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc;
        list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
        return 0;
 }
@@ -8365,7 +8437,8 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
                return 1;
        }
        qdesc->qe_valid = 1;
-       phba->sli4_hba.fcp_cq[wqidx] = qdesc;
+       qdesc->hdwq = wqidx;
+       phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc;
 
        /* Create Fast Path FCP WQs */
        if (phba->enab_exp_wqcq_pages) {
@@ -8386,7 +8459,8 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
                                wqidx);
                return 1;
        }
-       phba->sli4_hba.fcp_wq[wqidx] = qdesc;
+       qdesc->hdwq = wqidx;
+       phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc;
        list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
        return 0;
 }
@@ -8409,16 +8483,13 @@ int
 lpfc_sli4_queue_create(struct lpfc_hba *phba)
 {
        struct lpfc_queue *qdesc;
-       int idx, io_channel;
+       int idx;
+       struct lpfc_sli4_hdw_queue *qp;
 
        /*
         * Create HBA Record arrays.
         * Both NVME and FCP will share that same vectors / EQs
         */
-       io_channel = phba->io_channel_irqs;
-       if (!io_channel)
-               return -ERANGE;
-
        phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
        phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
        phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
@@ -8430,87 +8501,36 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
        phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
 
-       phba->sli4_hba.hba_eq =  kcalloc(io_channel,
-                                       sizeof(struct lpfc_queue *),
-                                       GFP_KERNEL);
-       if (!phba->sli4_hba.hba_eq) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                       "2576 Failed allocate memory for "
-                       "fast-path EQ record array\n");
-               goto out_error;
-       }
-
-       if (phba->cfg_fcp_io_channel) {
-               phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
-                                               sizeof(struct lpfc_queue *),
-                                               GFP_KERNEL);
-               if (!phba->sli4_hba.fcp_cq) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2577 Failed allocate memory for "
-                                       "fast-path CQ record array\n");
-                       goto out_error;
-               }
-               phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
-                                               sizeof(struct lpfc_queue *),
-                                               GFP_KERNEL);
-               if (!phba->sli4_hba.fcp_wq) {
+       if (!phba->sli4_hba.hdwq) {
+               phba->sli4_hba.hdwq = kcalloc(
+                       phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
+                       GFP_KERNEL);
+               if (!phba->sli4_hba.hdwq) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2578 Failed allocate memory for "
-                                       "fast-path FCP WQ record array\n");
+                                       "6427 Failed allocate memory for "
+                                       "fast-path Hardware Queue array\n");
                        goto out_error;
                }
-               /*
-                * Since the first EQ can have multiple CQs associated with it,
-                * this array is used to quickly see if we have a FCP fast-path
-                * CQ match.
-                */
-               phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
-                                                       sizeof(uint16_t),
-                                                       GFP_KERNEL);
-               if (!phba->sli4_hba.fcp_cq_map) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2545 Failed allocate memory for "
-                                       "fast-path CQ map\n");
-                       goto out_error;
+               /* Prepare hardware queues to take IO buffers */
+               for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+                       qp = &phba->sli4_hba.hdwq[idx];
+                       spin_lock_init(&qp->io_buf_list_get_lock);
+                       spin_lock_init(&qp->io_buf_list_put_lock);
+                       INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
+                       INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
+                       qp->get_io_bufs = 0;
+                       qp->put_io_bufs = 0;
+                       qp->total_io_bufs = 0;
+                       spin_lock_init(&qp->abts_scsi_buf_list_lock);
+                       INIT_LIST_HEAD(&qp->lpfc_abts_scsi_buf_list);
+                       qp->abts_scsi_io_bufs = 0;
+                       spin_lock_init(&qp->abts_nvme_buf_list_lock);
+                       INIT_LIST_HEAD(&qp->lpfc_abts_nvme_buf_list);
+                       qp->abts_nvme_io_bufs = 0;
                }
        }
 
-       if (phba->cfg_nvme_io_channel) {
-               phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
-                                               sizeof(struct lpfc_queue *),
-                                               GFP_KERNEL);
-               if (!phba->sli4_hba.nvme_cq) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "6077 Failed allocate memory for "
-                                       "fast-path CQ record array\n");
-                       goto out_error;
-               }
-
-               phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
-                                               sizeof(struct lpfc_queue *),
-                                               GFP_KERNEL);
-               if (!phba->sli4_hba.nvme_wq) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2581 Failed allocate memory for "
-                                       "fast-path NVME WQ record array\n");
-                       goto out_error;
-               }
-
-               /*
-                * Since the first EQ can have multiple CQs associated with it,
-                * this array is used to quickly see if we have a NVME fast-path
-                * CQ match.
-                */
-               phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
-                                                       sizeof(uint16_t),
-                                                       GFP_KERNEL);
-               if (!phba->sli4_hba.nvme_cq_map) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "6078 Failed allocate memory for "
-                                       "fast-path CQ map\n");
-                       goto out_error;
-               }
-
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
                if (phba->nvmet_support) {
                        phba->sli4_hba.nvmet_cqset = kcalloc(
                                        phba->cfg_nvmet_mrq,
@@ -8548,7 +8568,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
 
        /* Create HBA Event Queues (EQs) */
-       for (idx = 0; idx < io_channel; idx++) {
+       for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
                /* Create EQs */
                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
                                              phba->sli4_hba.eq_esize,
@@ -8559,33 +8579,40 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                        goto out_error;
                }
                qdesc->qe_valid = 1;
-               phba->sli4_hba.hba_eq[idx] = qdesc;
+               qdesc->hdwq = idx;
+               phba->sli4_hba.hdwq[idx].hba_eq = qdesc;
        }
 
-       /* FCP and NVME io channels are not required to be balanced */
 
-       for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+       /* Allocate SCSI SLI4 CQ/WQs */
+       for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
                if (lpfc_alloc_fcp_wq_cq(phba, idx))
                        goto out_error;
 
-       for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
-               if (lpfc_alloc_nvme_wq_cq(phba, idx))
-                       goto out_error;
+       /* Allocate NVME SLI4 CQ/WQs */
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+               for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
+                       if (lpfc_alloc_nvme_wq_cq(phba, idx))
+                               goto out_error;
 
-       if (phba->nvmet_support) {
-               for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
-                       qdesc = lpfc_sli4_queue_alloc(phba,
+               if (phba->nvmet_support) {
+                       for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+                               qdesc = lpfc_sli4_queue_alloc(
+                                                     phba,
                                                      LPFC_DEFAULT_PAGE_SIZE,
                                                      phba->sli4_hba.cq_esize,
                                                      phba->sli4_hba.cq_ecount);
-                       if (!qdesc) {
-                               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "3142 Failed allocate NVME "
-                                       "CQ Set (%d)\n", idx);
-                               goto out_error;
+                               if (!qdesc) {
+                                       lpfc_printf_log(
+                                               phba, KERN_ERR, LOG_INIT,
+                                               "3142 Failed allocate NVME "
+                                               "CQ Set (%d)\n", idx);
+                                       goto out_error;
+                               }
+                               qdesc->qe_valid = 1;
+                               qdesc->hdwq = idx;
+                               phba->sli4_hba.nvmet_cqset[idx] = qdesc;
                        }
-                       qdesc->qe_valid = 1;
-                       phba->sli4_hba.nvmet_cqset[idx] = qdesc;
                }
        }
 
@@ -8702,7 +8729,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        }
        phba->sli4_hba.dat_rq = qdesc;
 
-       if (phba->nvmet_support) {
+       if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
+           phba->nvmet_support) {
                for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
                        /* Create NVMET Receive Queue for header */
                        qdesc = lpfc_sli4_queue_alloc(phba,
@@ -8715,6 +8743,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                                                "receive HRQ\n");
                                goto out_error;
                        }
+                       qdesc->hdwq = idx;
                        phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
 
                        /* Only needed for header of RQ pair */
@@ -8741,13 +8770,29 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                                                "receive DRQ\n");
                                goto out_error;
                        }
+                       qdesc->hdwq = idx;
                        phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
                }
        }
 
-       /* Create the Queues needed for Flash Optimized Fabric operations */
-       if (phba->cfg_fof)
-               lpfc_fof_queue_create(phba);
+#if defined(BUILD_NVME)
+       /* Clear NVME stats */
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+               for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+                       memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
+                              sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
+               }
+       }
+#endif
+
+       /* Clear SCSI stats */
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+               for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+                       memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
+                              sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
+               }
+       }
+
        return 0;
 
 out_error:
@@ -8780,11 +8825,21 @@ lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
 }
 
 static inline void
-lpfc_sli4_release_queue_map(uint16_t **qmap)
+lpfc_sli4_release_hdwq(struct lpfc_sli4_hdw_queue *hdwq, int max)
 {
-       if (*qmap != NULL) {
-               kfree(*qmap);
-               *qmap = NULL;
+       uint32_t idx;
+
+       for (idx = 0; idx < max; idx++) {
+               lpfc_sli4_queue_free(hdwq[idx].hba_eq);
+               lpfc_sli4_queue_free(hdwq[idx].fcp_cq);
+               lpfc_sli4_queue_free(hdwq[idx].nvme_cq);
+               lpfc_sli4_queue_free(hdwq[idx].fcp_wq);
+               lpfc_sli4_queue_free(hdwq[idx].nvme_wq);
+               hdwq[idx].hba_eq = NULL;
+               hdwq[idx].fcp_cq = NULL;
+               hdwq[idx].nvme_cq = NULL;
+               hdwq[idx].fcp_wq = NULL;
+               hdwq[idx].nvme_wq = NULL;
        }
 }
 
@@ -8803,33 +8858,10 @@ lpfc_sli4_release_queue_map(uint16_t **qmap)
 void
 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
 {
-       if (phba->cfg_fof)
-               lpfc_fof_queue_destroy(phba);
-
        /* Release HBA eqs */
-       lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
-
-       /* Release FCP cqs */
-       lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
-                                phba->cfg_fcp_io_channel);
-
-       /* Release FCP wqs */
-       lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
-                                phba->cfg_fcp_io_channel);
-
-       /* Release FCP CQ mapping array */
-       lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
-
-       /* Release NVME cqs */
-       lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
-                                       phba->cfg_nvme_io_channel);
-
-       /* Release NVME wqs */
-       lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
-                                       phba->cfg_nvme_io_channel);
-
-       /* Release NVME CQ mapping array */
-       lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
+       if (phba->sli4_hba.hdwq)
+               lpfc_sli4_release_hdwq(phba->sli4_hba.hdwq,
+                                      phba->cfg_hdw_queue);
 
        if (phba->nvmet_support) {
                lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
@@ -8913,7 +8945,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
        cq->chann = qidx;
 
        if (qtype != LPFC_MBOX) {
-               /* Setup nvme_cq_map for fast lookup */
+               /* Setup cq_map for fast lookup */
                if (cq_map)
                        *cq_map = cq->queue_id;
 
@@ -8976,9 +9008,10 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 {
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
+       struct lpfc_sli4_hdw_queue *qp;
        LPFC_MBOXQ_t *mboxq;
        int qidx;
-       uint32_t length, io_channel;
+       uint32_t length;
        int rc = -ENOMEM;
 
        /* Check for dual-ULP support */
@@ -9029,25 +9062,25 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
        /*
         * Set up HBA Event Queues (EQs)
         */
-       io_channel = phba->io_channel_irqs;
+       qp = phba->sli4_hba.hdwq;
 
        /* Set up HBA event queue */
-       if (io_channel && !phba->sli4_hba.hba_eq) {
+       if (!qp) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "3147 Fast-path EQs not allocated\n");
                rc = -ENOMEM;
                goto out_error;
        }
-       for (qidx = 0; qidx < io_channel; qidx++) {
-               if (!phba->sli4_hba.hba_eq[qidx]) {
+       for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+               if (!qp[qidx].hba_eq) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0522 Fast-path EQ (%d) not "
                                        "allocated\n", qidx);
                        rc = -ENOMEM;
                        goto out_destroy;
                }
-               rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
-                                               phba->cfg_fcp_imax);
+               rc = lpfc_eq_create(phba, qp[qidx].hba_eq,
+                                   phba->cfg_fcp_imax);
                if (rc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0523 Failed setup of fast-path EQ "
@@ -9056,26 +9089,17 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        goto out_destroy;
                }
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "2584 HBA EQ setup: queue[%d]-id=%d\n",
-                               qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
+                               "2584 HBA EQ setup: queue[%d]-id=%d\n", qidx,
+                               qp[qidx].hba_eq->queue_id);
        }
 
-       if (phba->cfg_nvme_io_channel) {
-               if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "6084 Fast-path NVME %s array not allocated\n",
-                               (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
-                       rc = -ENOMEM;
-                       goto out_destroy;
-               }
-
-               for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+               for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
                        rc = lpfc_create_wq_cq(phba,
-                                       phba->sli4_hba.hba_eq[
-                                               qidx % io_channel],
-                                       phba->sli4_hba.nvme_cq[qidx],
-                                       phba->sli4_hba.nvme_wq[qidx],
-                                       &phba->sli4_hba.nvme_cq_map[qidx],
+                                       qp[qidx].hba_eq,
+                                       qp[qidx].nvme_cq,
+                                       qp[qidx].nvme_wq,
+                                       &phba->sli4_hba.hdwq[qidx].nvme_cq_map,
                                        qidx, LPFC_NVME);
                        if (rc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9087,31 +9111,19 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                }
        }
 
-       if (phba->cfg_fcp_io_channel) {
-               /* Set up fast-path FCP Response Complete Queue */
-               if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
+       for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+               rc = lpfc_create_wq_cq(phba,
+                                      qp[qidx].hba_eq,
+                                      qp[qidx].fcp_cq,
+                                      qp[qidx].fcp_wq,
+                                      &phba->sli4_hba.hdwq[qidx].fcp_cq_map,
+                                      qidx, LPFC_FCP);
+               if (rc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "3148 Fast-path FCP %s array not allocated\n",
-                               phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
-                       rc = -ENOMEM;
-                       goto out_destroy;
-               }
-
-               for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
-                       rc = lpfc_create_wq_cq(phba,
-                                       phba->sli4_hba.hba_eq[
-                                               qidx % io_channel],
-                                       phba->sli4_hba.fcp_cq[qidx],
-                                       phba->sli4_hba.fcp_wq[qidx],
-                                       &phba->sli4_hba.fcp_cq_map[qidx],
-                                       qidx, LPFC_FCP);
-                       if (rc) {
-                               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0535 Failed to setup fastpath "
                                        "FCP WQ/CQ (%d), rc = 0x%x\n",
                                        qidx, (uint32_t)rc);
-                               goto out_destroy;
-                       }
+                       goto out_destroy;
                }
        }
 
@@ -9130,7 +9142,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                goto out_destroy;
        }
 
-       rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
+       rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
                               phba->sli4_hba.mbx_cq,
                               phba->sli4_hba.mbx_wq,
                               NULL, 0, LPFC_MBOX);
@@ -9151,7 +9163,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                if (phba->cfg_nvmet_mrq > 1) {
                        rc = lpfc_cq_create_set(phba,
                                        phba->sli4_hba.nvmet_cqset,
-                                       phba->sli4_hba.hba_eq,
+                                       qp,
                                        LPFC_WCQ, LPFC_NVMET);
                        if (rc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9163,7 +9175,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                } else {
                        /* Set up NVMET Receive Complete Queue */
                        rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
-                                           phba->sli4_hba.hba_eq[0],
+                                           qp[0].hba_eq,
                                            LPFC_WCQ, LPFC_NVMET);
                        if (rc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9177,7 +9189,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                                        "6090 NVMET CQ setup: cq-id=%d, "
                                        "parent eq-id=%d\n",
                                        phba->sli4_hba.nvmet_cqset[0]->queue_id,
-                                       phba->sli4_hba.hba_eq[0]->queue_id);
+                                       qp[0].hba_eq->queue_id);
                }
        }
 
@@ -9189,14 +9201,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                rc = -ENOMEM;
                goto out_destroy;
        }
-       rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
-                                       phba->sli4_hba.els_cq,
-                                       phba->sli4_hba.els_wq,
-                                       NULL, 0, LPFC_ELS);
+       rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
+                              phba->sli4_hba.els_cq,
+                              phba->sli4_hba.els_wq,
+                              NULL, 0, LPFC_ELS);
        if (rc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                       "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
-                       (uint32_t)rc);
+                               "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
+                               (uint32_t)rc);
                goto out_destroy;
        }
        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -9204,7 +9216,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        phba->sli4_hba.els_wq->queue_id,
                        phba->sli4_hba.els_cq->queue_id);
 
-       if (phba->cfg_nvme_io_channel) {
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
                /* Set up NVME LS Complete Queue */
                if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9213,14 +9225,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        rc = -ENOMEM;
                        goto out_destroy;
                }
-               rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
-                                       phba->sli4_hba.nvmels_cq,
-                                       phba->sli4_hba.nvmels_wq,
-                                       NULL, 0, LPFC_NVME_LS);
+               rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
+                                      phba->sli4_hba.nvmels_cq,
+                                      phba->sli4_hba.nvmels_wq,
+                                      NULL, 0, LPFC_NVME_LS);
                if (rc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0529 Failed setup of NVVME LS WQ/CQ: "
-                               "rc = 0x%x\n", (uint32_t)rc);
+                                       "0526 Failed setup of NVVME LS WQ/CQ: "
+                                       "rc = 0x%x\n", (uint32_t)rc);
                        goto out_destroy;
                }
 
@@ -9306,17 +9318,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        phba->sli4_hba.dat_rq->queue_id,
                        phba->sli4_hba.els_cq->queue_id);
 
-       if (phba->cfg_fof) {
-               rc = lpfc_fof_queue_setup(phba);
-               if (rc) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "0549 Failed setup of FOF Queues: "
-                                       "rc = 0x%x\n", rc);
-                       goto out_destroy;
-               }
-       }
-
-       for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
+       for (qidx = 0; qidx < phba->cfg_hdw_queue;
+            qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
                lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
                                         phba->cfg_fcp_imax);
 
@@ -9343,12 +9346,9 @@ out_error:
 void
 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
 {
+       struct lpfc_sli4_hdw_queue *qp;
        int qidx;
 
-       /* Unset the queues created for Flash Optimized Fabric operations */
-       if (phba->cfg_fof)
-               lpfc_fof_queue_destroy(phba);
-
        /* Unset mailbox command work queue */
        if (phba->sli4_hba.mbx_wq)
                lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
@@ -9366,17 +9366,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
                lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
                                phba->sli4_hba.dat_rq);
 
-       /* Unset FCP work queue */
-       if (phba->sli4_hba.fcp_wq)
-               for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
-                       lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
-
-       /* Unset NVME work queue */
-       if (phba->sli4_hba.nvme_wq) {
-               for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
-                       lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
-       }
-
        /* Unset mailbox command complete queue */
        if (phba->sli4_hba.mbx_cq)
                lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
@@ -9389,11 +9378,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
        if (phba->sli4_hba.nvmels_cq)
                lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
 
-       /* Unset NVME response complete queue */
-       if (phba->sli4_hba.nvme_cq)
-               for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
-                       lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
-
        if (phba->nvmet_support) {
                /* Unset NVMET MRQ queue */
                if (phba->sli4_hba.nvmet_mrq_hdr) {
@@ -9412,15 +9396,17 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
                }
        }
 
-       /* Unset FCP response complete queue */
-       if (phba->sli4_hba.fcp_cq)
-               for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
-                       lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
-
-       /* Unset fast-path event queue */
-       if (phba->sli4_hba.hba_eq)
-               for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
-                       lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
+       /* Unset fast-path SLI4 queues */
+       if (phba->sli4_hba.hdwq) {
+               for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+                       qp = &phba->sli4_hba.hdwq[qidx];
+                       lpfc_wq_destroy(phba, qp->fcp_wq);
+                       lpfc_wq_destroy(phba, qp->nvme_wq);
+                       lpfc_cq_destroy(phba, qp->fcp_cq);
+                       lpfc_cq_destroy(phba, qp->nvme_cq);
+                       lpfc_eq_destroy(phba, qp->hba_eq);
+               }
+       }
 }
 
 /**
@@ -10246,7 +10232,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
                if (vec >= vectors)
                        vec = 0;
                index++;
-               if (index >= phba->cfg_fcp_io_channel)
+               if (index >= phba->cfg_hdw_queue)
                        index = 0;
                cpup++;
        }
@@ -10271,9 +10257,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
        char *name;
 
        /* Set up MSI-X multi-message vectors */
-       vectors = phba->io_channel_irqs;
-       if (phba->cfg_fof)
-               vectors++;
+       vectors = phba->cfg_hdw_queue;
 
        rc = pci_alloc_irq_vectors(phba->pcidev,
                                (phba->nvmet_support) ? 1 : 2,
@@ -10295,16 +10279,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
                phba->sli4_hba.hba_eq_hdl[index].idx = index;
                phba->sli4_hba.hba_eq_hdl[index].phba = phba;
                atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
-               if (phba->cfg_fof && (index == (vectors - 1)))
-                       rc = request_irq(pci_irq_vector(phba->pcidev, index),
-                                &lpfc_sli4_fof_intr_handler, 0,
-                                name,
-                                &phba->sli4_hba.hba_eq_hdl[index]);
-               else
-                       rc = request_irq(pci_irq_vector(phba->pcidev, index),
-                                &lpfc_sli4_hba_intr_handler, 0,
-                                name,
-                                &phba->sli4_hba.hba_eq_hdl[index]);
+               rc = request_irq(pci_irq_vector(phba->pcidev, index),
+                        &lpfc_sli4_hba_intr_handler, 0,
+                        name,
+                        &phba->sli4_hba.hba_eq_hdl[index]);
                if (rc) {
                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
                                        "0486 MSI-X fast-path (%d) "
@@ -10313,22 +10291,15 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
                }
        }
 
-       if (phba->cfg_fof)
-               vectors--;
-
-       if (vectors != phba->io_channel_irqs) {
+       if (vectors != phba->cfg_hdw_queue) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "3238 Reducing IO channels to match number of "
                                "MSI-X vectors, requested %d got %d\n",
-                               phba->io_channel_irqs, vectors);
-               if (phba->cfg_fcp_io_channel > vectors)
-                       phba->cfg_fcp_io_channel = vectors;
-               if (phba->cfg_nvme_io_channel > vectors)
-                       phba->cfg_nvme_io_channel = vectors;
-               if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
-                       phba->io_channel_irqs = phba->cfg_fcp_io_channel;
-               else
-                       phba->io_channel_irqs = phba->cfg_nvme_io_channel;
+                               phba->cfg_hdw_queue, vectors);
+               if (phba->cfg_hdw_queue > vectors)
+                       phba->cfg_hdw_queue = vectors;
+               if (phba->cfg_nvmet_mrq > vectors)
+                       phba->cfg_nvmet_mrq = vectors;
        }
        lpfc_cpu_affinity_check(phba, vectors);
 
@@ -10385,15 +10356,11 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
                return rc;
        }
 
-       for (index = 0; index < phba->io_channel_irqs; index++) {
+       for (index = 0; index < phba->cfg_hdw_queue; index++) {
                phba->sli4_hba.hba_eq_hdl[index].idx = index;
                phba->sli4_hba.hba_eq_hdl[index].phba = phba;
        }
 
-       if (phba->cfg_fof) {
-               phba->sli4_hba.hba_eq_hdl[index].idx = index;
-               phba->sli4_hba.hba_eq_hdl[index].phba = phba;
-       }
        return 0;
 }
 
@@ -10454,13 +10421,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
                        phba->intr_type = INTx;
                        intr_mode = 0;
 
-                       for (idx = 0; idx < phba->io_channel_irqs; idx++) {
-                               eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
-                               eqhdl->idx = idx;
-                               eqhdl->phba = phba;
-                               atomic_set(&eqhdl->hba_eq_in_use, 1);
-                       }
-                       if (phba->cfg_fof) {
+                       for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
                                eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
                                eqhdl->idx = idx;
                                eqhdl->phba = phba;
@@ -10488,11 +10449,7 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
                int index;
 
                /* Free up MSI-X multi-message vectors */
-               for (index = 0; index < phba->io_channel_irqs; index++)
-                       free_irq(pci_irq_vector(phba->pcidev, index),
-                                       &phba->sli4_hba.hba_eq_hdl[index]);
-
-               if (phba->cfg_fof)
+               for (index = 0; index < phba->cfg_hdw_queue; index++)
                        free_irq(pci_irq_vector(phba->pcidev, index),
                                        &phba->sli4_hba.hba_eq_hdl[index]);
        } else {
@@ -10555,8 +10512,10 @@ lpfc_unset_hba(struct lpfc_hba *phba)
 static void
 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
 {
+       struct lpfc_sli4_hdw_queue *qp;
+       int idx, ccnt, fcnt;
        int wait_time = 0;
-       int nvme_xri_cmpl = 1;
+       int io_xri_cmpl = 1;
        int nvmet_xri_cmpl = 1;
        int fcp_xri_cmpl = 1;
        int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
@@ -10571,17 +10530,32 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
                lpfc_nvme_wait_for_io_drain(phba);
 
-       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
-               fcp_xri_cmpl =
-                       list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+       ccnt = 0;
+       fcnt = 0;
+       for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+               qp = &phba->sli4_hba.hdwq[idx];
+               fcp_xri_cmpl = list_empty(
+                       &qp->lpfc_abts_scsi_buf_list);
+               if (!fcp_xri_cmpl) /* if list is NOT empty */
+                       fcnt++;
+               if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+                       io_xri_cmpl = list_empty(
+                               &qp->lpfc_abts_nvme_buf_list);
+                       if (!io_xri_cmpl) /* if list is NOT empty */
+                               ccnt++;
+               }
+       }
+       if (ccnt)
+               io_xri_cmpl = 0;
+       if (fcnt)
+               fcp_xri_cmpl = 0;
+
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-               nvme_xri_cmpl =
-                       list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
                nvmet_xri_cmpl =
                        list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
        }
 
-       while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
+       while (!fcp_xri_cmpl || !els_xri_cmpl || !io_xri_cmpl ||
               !nvmet_xri_cmpl) {
                if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
                        if (!nvmet_xri_cmpl)
@@ -10589,7 +10563,7 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
                                                "6424 NVMET XRI exchange busy "
                                                "wait time: %d seconds.\n",
                                                wait_time/1000);
-                       if (!nvme_xri_cmpl)
+                       if (!io_xri_cmpl)
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                                "6100 NVME XRI exchange busy "
                                                "wait time: %d seconds.\n",
@@ -10610,17 +10584,31 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
                        msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
                        wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
                }
+
+               ccnt = 0;
+               fcnt = 0;
+               for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+                       qp = &phba->sli4_hba.hdwq[idx];
+                       fcp_xri_cmpl = list_empty(
+                               &qp->lpfc_abts_scsi_buf_list);
+                       if (!fcp_xri_cmpl) /* if list is NOT empty */
+                               fcnt++;
+                       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+                               io_xri_cmpl = list_empty(
+                                   &qp->lpfc_abts_nvme_buf_list);
+                               if (!io_xri_cmpl) /* if list is NOT empty */
+                                       ccnt++;
+                       }
+               }
+               if (ccnt)
+                       io_xri_cmpl = 0;
+               if (fcnt)
+                       fcp_xri_cmpl = 0;
+
                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-                       nvme_xri_cmpl = list_empty(
-                               &phba->sli4_hba.lpfc_abts_nvme_buf_list);
                        nvmet_xri_cmpl = list_empty(
                                &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
                }
-
-               if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
-                       fcp_xri_cmpl = list_empty(
-                               &phba->sli4_hba.lpfc_abts_scsi_buf_list);
-
                els_xri_cmpl =
                        list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
 
@@ -10645,7 +10633,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
        struct pci_dev *pdev = phba->pcidev;
 
        lpfc_stop_hba_timers(phba);
-       phba->sli4_hba.intr_enable = 0;
+       if (phba->pport)
+               phba->sli4_hba.intr_enable = 0;
 
        /*
         * Gracefully wait out the potential current outstanding asynchronous
@@ -10864,8 +10853,6 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                phba->nvme_support = 0;
                phba->nvmet_support = 0;
                phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
-               phba->cfg_nvme_io_channel = 0;
-               phba->io_channel_irqs = phba->cfg_fcp_io_channel;
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
                                "6101 Disabling NVME support: "
                                "Not supported by firmware: %d %d\n",
@@ -11190,6 +11177,8 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
         * corresponding pools here.
         */
        lpfc_scsi_free(phba);
+       lpfc_free_iocb_list(phba);
+
        lpfc_mem_free_all(phba);
 
        dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
@@ -11767,7 +11756,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        struct lpfc_hba   *phba;
        struct lpfc_vport *vport = NULL;
        struct Scsi_Host  *shost = NULL;
-       int error;
+       int error, len;
        uint32_t cfg_mode, intr_mode;
 
        /* Allocate memory for HBA structure */
@@ -11815,28 +11804,11 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        /* Get the default values for Model Name and Description */
        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
 
-       /* Create SCSI host to the physical port */
-       error = lpfc_create_shost(phba);
-       if (error) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "1415 Failed to create scsi host.\n");
-               goto out_unset_driver_resource;
-       }
-
-       /* Configure sysfs attributes */
-       vport = phba->pport;
-       error = lpfc_alloc_sysfs_attr(vport);
-       if (error) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "1416 Failed to allocate sysfs attr\n");
-               goto out_destroy_shost;
-       }
-
-       shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
        /* Now, trying to enable interrupt and bring up the device */
        cfg_mode = phba->cfg_use_msi;
 
        /* Put device to a known state before enabling interrupt */
+       phba->pport = NULL;
        lpfc_stop_port(phba);
 
        /* Configure and enable interrupt */
@@ -11845,18 +11817,33 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0426 Failed to enable interrupt.\n");
                error = -ENODEV;
-               goto out_free_sysfs_attr;
+               goto out_unset_driver_resource;
        }
        /* Default to single EQ for non-MSI-X */
        if (phba->intr_type != MSIX) {
-               if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
-                       phba->cfg_fcp_io_channel = 1;
+               phba->cfg_hdw_queue = 1;
                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-                       phba->cfg_nvme_io_channel = 1;
                        if (phba->nvmet_support)
                                phba->cfg_nvmet_mrq = 1;
                }
-               phba->io_channel_irqs = 1;
+       }
+
+       /* Create SCSI host to the physical port */
+       error = lpfc_create_shost(phba);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1415 Failed to create scsi host.\n");
+               goto out_disable_intr;
+       }
+       vport = phba->pport;
+       shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
+
+       /* Configure sysfs attributes */
+       error = lpfc_alloc_sysfs_attr(vport);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1416 Failed to allocate sysfs attr\n");
+               goto out_destroy_shost;
        }
 
        /* Set up SLI-4 HBA */
@@ -11864,7 +11851,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "1421 Failed to set up hba\n");
                error = -ENODEV;
-               goto out_disable_intr;
+               goto out_free_sysfs_attr;
        }
 
        /* Log the current active interrupt mode */
@@ -11877,19 +11864,31 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        /* NVME support in FW earlier in the driver load corrects the
         * FC4 type making a check for nvme_support unnecessary.
         */
-       if ((phba->nvmet_support == 0) &&
-           (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
-               /* Create NVME binding with nvme_fc_transport. This
-                * ensures the vport is initialized.  If the localport
-                * create fails, it should not unload the driver to
-                * support field issues.
+       if (phba->nvmet_support == 0) {
+               if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+                       /* Create NVME binding with nvme_fc_transport. This
+                        * ensures the vport is initialized.  If the localport
+                        * create fails, it should not unload the driver to
+                        * support field issues.
+                        */
+                       error = lpfc_nvme_create_localport(vport);
+                       if (error) {
+                               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                               "6004 NVME registration "
+                                               "failed, error x%x\n",
+                                               error);
+                       }
+               }
+               /* Don't post more new bufs if repost already recovered
+                * the nvme sgls.
                 */
-               error = lpfc_nvme_create_localport(vport);
-               if (error) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "6004 NVME registration failed, "
-                                       "error x%x\n",
-                                       error);
+               if (phba->sli4_hba.io_xri_cnt == 0) {
+                       len = lpfc_new_io_buf(
+                               phba, phba->sli4_hba.io_xri_max);
+                       if (len == 0) {
+                               error = -ENOMEM;
+                               goto out_free_sysfs_attr;
+                       }
                }
        }
 
@@ -11905,12 +11904,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
 
        return 0;
 
-out_disable_intr:
-       lpfc_sli4_disable_intr(phba);
 out_free_sysfs_attr:
        lpfc_free_sysfs_attr(vport);
 out_destroy_shost:
        lpfc_destroy_shost(phba);
+out_disable_intr:
+       lpfc_sli4_disable_intr(phba);
 out_unset_driver_resource:
        lpfc_unset_driver_resource_phase2(phba);
 out_unset_driver_resource_s4:
@@ -11979,7 +11978,6 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
         * the HBA FCoE function.
         */
        lpfc_debugfs_terminate(vport);
-       lpfc_sli4_hba_unset(phba);
 
        lpfc_stop_hba_timers(phba);
        spin_lock_irq(&phba->port_list_lock);
@@ -11989,9 +11987,9 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
        /* Perform scsi free before driver resource_unset since scsi
         * buffers are released to their corresponding pools here.
         */
-       lpfc_scsi_free(phba);
-       lpfc_nvme_free(phba);
+       lpfc_io_free(phba);
        lpfc_free_iocb_list(phba);
+       lpfc_sli4_hba_unset(phba);
 
        lpfc_unset_driver_resource_phase2(phba);
        lpfc_sli4_driver_resource_unset(phba);
@@ -12653,165 +12651,6 @@ lpfc_sli4_ras_init(struct lpfc_hba *phba)
        }
 }
 
-/**
- * lpfc_fof_queue_setup - Set up all the fof queues
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to set up all the fof queues for the FC HBA
- * operation.
- *
- * Return codes
- *      0 - successful
- *      -ENOMEM - No available memory
- **/
-int
-lpfc_fof_queue_setup(struct lpfc_hba *phba)
-{
-       struct lpfc_sli_ring *pring;
-       int rc;
-
-       rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
-       if (rc)
-               return -ENOMEM;
-
-       if (phba->cfg_fof) {
-
-               rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
-                                   phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
-               if (rc)
-                       goto out_oas_cq;
-
-               rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
-                                   phba->sli4_hba.oas_cq, LPFC_FCP);
-               if (rc)
-                       goto out_oas_wq;
-
-               /* Bind this CQ/WQ to the NVME ring */
-               pring = phba->sli4_hba.oas_wq->pring;
-               pring->sli.sli4.wqp =
-                       (void *)phba->sli4_hba.oas_wq;
-               phba->sli4_hba.oas_cq->pring = pring;
-       }
-
-       return 0;
-
-out_oas_wq:
-       lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
-out_oas_cq:
-       lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
-       return rc;
-
-}
-
-/**
- * lpfc_fof_queue_create - Create all the fof queues
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to allocate all the fof queues for the FC HBA
- * operation. For each SLI4 queue type, the parameters such as queue entry
- * count (queue depth) shall be taken from the module parameter. For now,
- * we just use some constant number as place holder.
- *
- * Return codes
- *      0 - successful
- *      -ENOMEM - No availble memory
- *      -EIO - The mailbox failed to complete successfully.
- **/
-int
-lpfc_fof_queue_create(struct lpfc_hba *phba)
-{
-       struct lpfc_queue *qdesc;
-       uint32_t wqesize;
-
-       /* Create FOF EQ */
-       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
-                                     phba->sli4_hba.eq_esize,
-                                     phba->sli4_hba.eq_ecount);
-       if (!qdesc)
-               goto out_error;
-
-       qdesc->qe_valid = 1;
-       phba->sli4_hba.fof_eq = qdesc;
-
-       if (phba->cfg_fof) {
-
-               /* Create OAS CQ */
-               if (phba->enab_exp_wqcq_pages)
-                       qdesc = lpfc_sli4_queue_alloc(phba,
-                                                     LPFC_EXPANDED_PAGE_SIZE,
-                                                     phba->sli4_hba.cq_esize,
-                                                     LPFC_CQE_EXP_COUNT);
-               else
-                       qdesc = lpfc_sli4_queue_alloc(phba,
-                                                     LPFC_DEFAULT_PAGE_SIZE,
-                                                     phba->sli4_hba.cq_esize,
-                                                     phba->sli4_hba.cq_ecount);
-               if (!qdesc)
-                       goto out_error;
-
-               qdesc->qe_valid = 1;
-               phba->sli4_hba.oas_cq = qdesc;
-
-               /* Create OAS WQ */
-               if (phba->enab_exp_wqcq_pages) {
-                       wqesize = (phba->fcp_embed_io) ?
-                               LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
-                       qdesc = lpfc_sli4_queue_alloc(phba,
-                                                     LPFC_EXPANDED_PAGE_SIZE,
-                                                     wqesize,
-                                                     LPFC_WQE_EXP_COUNT);
-               } else
-                       qdesc = lpfc_sli4_queue_alloc(phba,
-                                                     LPFC_DEFAULT_PAGE_SIZE,
-                                                     phba->sli4_hba.wq_esize,
-                                                     phba->sli4_hba.wq_ecount);
-
-               if (!qdesc)
-                       goto out_error;
-
-               phba->sli4_hba.oas_wq = qdesc;
-               list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
-
-       }
-       return 0;
-
-out_error:
-       lpfc_fof_queue_destroy(phba);
-       return -ENOMEM;
-}
-
-/**
- * lpfc_fof_queue_destroy - Destroy all the fof queues
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to release all the SLI4 queues with the FC HBA
- * operation.
- *
- * Return codes
- *      0 - successful
- **/
-int
-lpfc_fof_queue_destroy(struct lpfc_hba *phba)
-{
-       /* Release FOF Event queue */
-       if (phba->sli4_hba.fof_eq != NULL) {
-               lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
-               phba->sli4_hba.fof_eq = NULL;
-       }
-
-       /* Release OAS Completion queue */
-       if (phba->sli4_hba.oas_cq != NULL) {
-               lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
-               phba->sli4_hba.oas_cq = NULL;
-       }
-
-       /* Release OAS Work queue */
-       if (phba->sli4_hba.oas_wq != NULL) {
-               lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
-               phba->sli4_hba.oas_wq = NULL;
-       }
-       return 0;
-}
 
 MODULE_DEVICE_TABLE(pci, lpfc_id_table);