nvme: introduce nvme_start_request
authorSagi Grimberg <sagi@grimberg.me>
Mon, 3 Oct 2022 09:43:43 +0000 (12:43 +0300)
committerChristoph Hellwig <hch@lst.de>
Tue, 6 Dec 2022 08:16:57 +0000 (09:16 +0100)
In preparation for nvme-multipath IO stats accounting, we want the
accounting to happen in a centralized place. The request completion
is already centralized, but we need a common helper to request I/O
start.

Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Hannes Reinecke <hare@suse.de>
drivers/nvme/host/apple.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/loop.c

index cab69516af5b83bab825c131f142c8897e8c7a9c..94ef797e8b4a5f3a69eb1b7de1cb560ee652a6d0 100644 (file)
@@ -763,7 +763,7 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
                        goto out_free_cmd;
        }
 
-       blk_mq_start_request(req);
+       nvme_start_request(req);
        apple_nvme_submit_cmd(q, cmnd);
        return BLK_STS_OK;
 
index aa5fb56c07d990754631f030e307d801cbd574f4..489f5e79720469f483d6a189361af0f5cc18ba52 100644 (file)
@@ -2733,7 +2733,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
        atomic_set(&op->state, FCPOP_STATE_ACTIVE);
 
        if (!(op->flags & FCOP_FLAGS_AEN))
-               blk_mq_start_request(op->rq);
+               nvme_start_request(op->rq);
 
        cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
        ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
index b3a1c595d144b80bc3f154a3d9549082708a654e..8522d6dc93e8a63c899020c93ceff2d94a3814ce 100644 (file)
@@ -1012,6 +1012,11 @@ static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
 }
 #endif
 
+static inline void nvme_start_request(struct request *rq)
+{
+       blk_mq_start_request(rq);
+}
+
 static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
 {
        return ctrl->sgls & ((1 << 0) | (1 << 1));
index e0da4a6719a7525768dee0d5bb0b5b61988f37e6..ac734c8f6640e5b5a84c0fbd89bdc0438bc83cc6 100644 (file)
@@ -907,7 +907,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
                        goto out_unmap_data;
        }
 
-       blk_mq_start_request(req);
+       nvme_start_request(req);
        return BLK_STS_OK;
 out_unmap_data:
        nvme_unmap_data(dev, req);
index de591cdf78f3b4b7038342eb2b6e1737e8b9b509..448abf8cdf1f6e8b5a0b82554208b70cb3134e85 100644 (file)
@@ -2040,7 +2040,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (ret)
                goto unmap_qe;
 
-       blk_mq_start_request(rq);
+       nvme_start_request(rq);
 
        if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
            queue->pi_support &&
index 776b8d9dfca79645f6dc3eb63bf232c013b49c7d..79789daddeac522f05241ed61e48e1b815fb16bf 100644 (file)
@@ -2405,7 +2405,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (unlikely(ret))
                return ret;
 
-       blk_mq_start_request(rq);
+       nvme_start_request(rq);
 
        nvme_tcp_queue_request(req, true, bd->last);
 
index 4173099ef9a45551b8c5ab0a6d0ff5b78eff5f40..6d176621f46dc6974523d21d6244d3044f1ef38a 100644 (file)
@@ -145,7 +145,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (ret)
                return ret;
 
-       blk_mq_start_request(req);
+       nvme_start_request(req);
        iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
        iod->req.port = queue->ctrl->port;
        if (!nvmet_req_init(&iod->req, &queue->nvme_cq,