nvmet/rdma: Use sgl_alloc() and sgl_free()
authorBart Van Assche <bart.vanassche@wdc.com>
Fri, 5 Jan 2018 16:26:49 +0000 (08:26 -0800)
committerJens Axboe <axboe@kernel.dk>
Sat, 6 Jan 2018 16:18:00 +0000 (09:18 -0700)
Use the sgl_alloc() and sgl_free() functions instead of open coding
these functions.

Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/target/Kconfig
drivers/nvme/target/rdma.c

index 4d9715630e210fad5bd5aff83c346c17f041f83f..5f4f8b16685f4ff9225b5f040e67c1a48dd94c14 100644 (file)
@@ -29,6 +29,7 @@ config NVME_TARGET_RDMA
        tristate "NVMe over Fabrics RDMA target support"
        depends on INFINIBAND
        depends on NVME_TARGET
+       select SGL_ALLOC
        help
          This enables the NVMe RDMA target support, which allows exporting NVMe
          devices over RDMA.
index 49912909c2986a8d22b01c1aedb411d425fd4d20..0e4c15754c58fe4d0932ce3f3e3be56f0ec6f91b 100644 (file)
@@ -185,59 +185,6 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
        spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
 }
 
-static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents)
-{
-       struct scatterlist *sg;
-       int count;
-
-       if (!sgl || !nents)
-               return;
-
-       for_each_sg(sgl, sg, nents, count)
-               __free_page(sg_page(sg));
-       kfree(sgl);
-}
-
-static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
-               u32 length)
-{
-       struct scatterlist *sg;
-       struct page *page;
-       unsigned int nent;
-       int i = 0;
-
-       nent = DIV_ROUND_UP(length, PAGE_SIZE);
-       sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
-       if (!sg)
-               goto out;
-
-       sg_init_table(sg, nent);
-
-       while (length) {
-               u32 page_len = min_t(u32, length, PAGE_SIZE);
-
-               page = alloc_page(GFP_KERNEL);
-               if (!page)
-                       goto out_free_pages;
-
-               sg_set_page(&sg[i], page, page_len, 0);
-               length -= page_len;
-               i++;
-       }
-       *sgl = sg;
-       *nents = nent;
-       return 0;
-
-out_free_pages:
-       while (i > 0) {
-               i--;
-               __free_page(sg_page(&sg[i]));
-       }
-       kfree(sg);
-out:
-       return NVME_SC_INTERNAL;
-}
-
 static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
                        struct nvmet_rdma_cmd *c, bool admin)
 {
@@ -484,7 +431,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
        }
 
        if (rsp->req.sg != &rsp->cmd->inline_sg)
-               nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt);
+               sgl_free(rsp->req.sg);
 
        if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
                nvmet_rdma_process_wr_wait_list(queue);
@@ -621,16 +568,14 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
        u32 len = get_unaligned_le24(sgl->length);
        u32 key = get_unaligned_le32(sgl->key);
        int ret;
-       u16 status;
 
        /* no data command? */
        if (!len)
                return 0;
 
-       status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
-                       len);
-       if (status)
-               return status;
+       rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
+       if (!rsp->req.sg)
+               return NVME_SC_INTERNAL;
 
        ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
                        rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,