nvmet: use inline bio for passthru fast path
authorChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Tue, 10 Nov 2020 02:24:05 +0000 (18:24 -0800)
committerChristoph Hellwig <hch@lst.de>
Tue, 1 Dec 2020 19:36:36 +0000 (20:36 +0100)
In nvmet_passthru_execute_cmd() which is a high frequency function
it uses bio_alloc() which leads to memory allocation from the fs pool
for each I/O.

For NVMeoF nvmet_req we already have inline_bvec allocated as a part of
request allocation that can be used with preallocated bio when we
already know the size of request before bio allocation with bio_alloc(),
which we already do.

Introduce a bio member for the nvmet_req passthru anon union. In the
fast path, check if we can get away with inline bvec and bio from
nvmet_req with bio_init() call before actually allocating from the
bio_alloc().

This will be useful to avoid any new memory allocation under high
memory pressure situation and get rid of any extra work of
allocation (bio_alloc()) vs initialization (bio_init()) when
transfer len is < NVMET_MAX_INLINE_DATA_LEN that user can configure at
compile time.

Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Reviewed-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/target/nvmet.h
drivers/nvme/target/passthru.c

index 2f9635273629287fb7686b7fe942b3dba5e1bb2b..e89ec280e91a1c4b8a254d64e1859dbffde90d10 100644 (file)
@@ -332,6 +332,7 @@ struct nvmet_req {
                        struct work_struct      work;
                } f;
                struct {
+                       struct bio              inline_bio;
                        struct request          *rq;
                        struct work_struct      work;
                        bool                    use_workqueue;
index 2b24205ee79d8e1c0c919b38163535fbe325166f..b9776fc8f08f4277f00b6befbd5500d24da656ef 100644 (file)
@@ -194,14 +194,20 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
        if (req->sg_cnt > BIO_MAX_PAGES)
                return -EINVAL;
 
-       bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
-       bio->bi_end_io = bio_put;
+       if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
+               bio = &req->p.inline_bio;
+               bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+       } else {
+               bio = bio_alloc(GFP_KERNEL, min(req->sg_cnt, BIO_MAX_PAGES));
+               bio->bi_end_io = bio_put;
+       }
        bio->bi_opf = req_op(rq);
 
        for_each_sg(req->sg, sg, req->sg_cnt, i) {
                if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
                                    sg->offset) < sg->length) {
-                       bio_put(bio);
+                       if (bio != &req->p.inline_bio)
+                               bio_put(bio);
                        return -EINVAL;
                }
        }