nvme: Add async passthru polling support nvme-passthru-wip.2
authorPankaj Raghav <p.raghav@samsung.com>
Mon, 20 Dec 2021 14:17:34 +0000 (19:47 +0530)
committerJens Axboe <axboe@kernel.dk>
Tue, 21 Dec 2021 16:38:25 +0000 (09:38 -0700)
IO_URING already has polling support for read and write. This patch
extends that support for uring cmd passthu. The unused flag in
uring_cmd struct is used to indicate if the completion should be polled.
If device side polling is not enabled, then the submission request will
fallback to a non-polled request.

Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
Link: https://lore.kernel.org/r/20211220141734.12206-14-joshi.k@samsung.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c
drivers/nvme/host/core.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
fs/io_uring.c
include/linux/blk-mq.h
include/linux/io_uring.h

index c77991688bfd22fd9ae3a91c4e9db98beea6afba..acfa55c96a4350523c9105496e3f1c8728f220a2 100644 (file)
@@ -1193,7 +1193,7 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
 }
 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 
-static bool blk_rq_is_poll(struct request *rq)
+bool blk_rq_is_poll(struct request *rq)
 {
        if (!rq->mq_hctx)
                return false;
@@ -1203,6 +1203,7 @@ static bool blk_rq_is_poll(struct request *rq)
                return false;
        return true;
 }
+EXPORT_SYMBOL_GPL(blk_rq_is_poll);
 
 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
 {
index 5199adf7ae925b730f52a187a27301a1506dc99b..f0697cbe2bf1f0356f5c6451ca41bce82c285b96 100644 (file)
@@ -3676,6 +3676,7 @@ static const struct file_operations nvme_ns_chr_fops = {
        .unlocked_ioctl = nvme_ns_chr_ioctl,
        .compat_ioctl   = compat_ptr_ioctl,
        .async_cmd      = nvme_ns_chr_async_cmd,
+       .iopoll         = nvme_iopoll,
 };
 
 static int nvme_add_ns_cdev(struct nvme_ns *ns)
index bdaf8f317aa83c7385a599cc284f3a00c3f37938..ce2fe94df3ad712ca642d7166d994cde0a910ca5 100644 (file)
@@ -31,6 +31,12 @@ struct nvme_uring_cmd {
        void __user *meta_buffer;
 };
 
+static inline bool is_polling_enabled(struct io_uring_cmd *ioucmd,
+                                     struct request *req)
+{
+       return (ioucmd->flags & URING_CMD_POLLED) && blk_rq_is_poll(req);
+}
+
 static struct nvme_uring_cmd *nvme_uring_cmd(struct io_uring_cmd *ioucmd)
 {
        return (struct nvme_uring_cmd *)&ioucmd->pdu;
@@ -76,8 +82,16 @@ static void nvme_end_async_pt(struct request *req, blk_status_t err)
 
        cmd->req = req;
        req->bio = bio;
-       /* this takes care of setting up task-work */
-       io_uring_cmd_complete_in_task(ioucmd, nvme_pt_task_cb);
+
+       /*IO can be completed immediately when the callback
+        * is in the same task context
+        */
+       if (is_polling_enabled(ioucmd, req)) {
+               nvme_pt_task_cb(ioucmd);
+       } else {
+               /* this takes care of setting up task-work */
+               io_uring_cmd_complete_in_task(ioucmd, nvme_pt_task_cb);
+       }
 }
 
 static void nvme_setup_uring_cmd_data(struct request *rq,
@@ -183,6 +197,12 @@ static int nvme_submit_user_cmd(struct request_queue *q,
                }
        }
        if (ioucmd) { /* async dispatch */
+
+               if (bio && is_polling_enabled(ioucmd, req)) {
+                       ioucmd->bio = bio;
+                       bio->bi_opf |= REQ_POLLED;
+               }
+
                nvme_setup_uring_cmd_data(req, ioucmd, meta, meta_buffer,
                                meta_len, write);
                blk_execute_rq_nowait(req, 0, nvme_end_async_pt);
@@ -496,6 +516,32 @@ int nvme_ns_chr_async_cmd(struct io_uring_cmd *ioucmd,
        return nvme_ns_async_ioctl(ns, ioucmd);
 }
 
+int nvme_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+               unsigned int flags)
+{
+       struct bio *bio = NULL;
+       struct nvme_ns *ns = NULL;
+       struct request_queue *q = NULL;
+       int ret = 0;
+
+       rcu_read_lock();
+       bio = READ_ONCE(kiocb->private);
+       ns = container_of(file_inode(kiocb->ki_filp)->i_cdev, struct nvme_ns,
+                         cdev);
+       q = ns->queue;
+
+       /* bio and driver_cb are a part of the same union type in io_uring_cmd
+        * struct. When there are no poll queues, driver_cb is used for IRQ cb
+        * but polling is performed from the io_uring side. To avoid unnecessary
+        * polling, a check is added to see if it is a polled queue and return 0
+        * if it is not.
+        */
+       if ((test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) && bio && bio->bi_bdev)
+               ret = bio_poll(bio, iob, flags);
+       rcu_read_unlock();
+       return ret;
+}
+
 #ifdef CONFIG_NVME_MULTIPATH
 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
                void __user *argp, struct nvme_ns_head *head, int srcu_idx)
@@ -577,6 +623,35 @@ out_unlock:
        srcu_read_unlock(&head->srcu, srcu_idx);
        return ret;
 }
+
+int nvme_ns_head_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+                       unsigned int flags)
+{
+       struct bio *bio = NULL;
+       struct request_queue *q = NULL;
+       struct cdev *cdev = file_inode(kiocb->ki_filp)->i_cdev;
+       struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
+       int srcu_idx = srcu_read_lock(&head->srcu);
+       struct nvme_ns *ns = nvme_find_path(head);
+       int ret = -EWOULDBLOCK;
+
+       if (ns) {
+               bio = READ_ONCE(kiocb->private);
+               q = ns->queue;
+           /* bio and driver_cb are a part of the same union type in io_uring_cmd
+            * struct. When there are no poll queues, driver_cb is used for IRQ cb
+            * but polling is performed from the io_uring side. To avoid unnecessary
+            * polling, a check is added to see if it is a polled queue and return 0
+            * if it is not.
+            */
+               if ((test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) && bio &&
+                   bio->bi_bdev)
+                       ret = bio_poll(bio, iob, flags);
+       }
+
+       srcu_read_unlock(&head->srcu, srcu_idx);
+       return ret;
+}
 #endif /* CONFIG_NVME_MULTIPATH */
 
 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
index 1e59c8e0662230b7ea3b466935bcc8318e2006ce..df91b2953932614998b1dfa6f6b9e9b342c74293 100644 (file)
@@ -424,6 +424,7 @@ static const struct file_operations nvme_ns_head_chr_fops = {
        .unlocked_ioctl = nvme_ns_head_chr_ioctl,
        .compat_ioctl   = compat_ptr_ioctl,
        .async_cmd      = nvme_ns_head_chr_async_cmd,
+       .iopoll         = nvme_ns_head_iopoll,
 };
 
 static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
index 56a7cc8421fc033479d30eaf3a32b56de954e336..730ada8a3e8eef14f8b6615b4cdbfaa8bd4b8787 100644 (file)
@@ -752,8 +752,12 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
                unsigned long arg);
 int nvme_ns_chr_async_cmd(struct io_uring_cmd *ucmd,
                enum io_uring_cmd_flags flags);
+int nvme_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+               unsigned int flags);
 int nvme_ns_head_chr_async_cmd(struct io_uring_cmd *ucmd,
                enum io_uring_cmd_flags flags);
+int nvme_ns_head_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+                       unsigned int flags);
 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
 
 extern const struct attribute_group *nvme_ns_id_attr_groups[];
index f77dde1bdc75cf507039a4d186a32036012d34be..ae2e7666622ec4b4d4dfc6fc2eb629f63bf720c4 100644 (file)
@@ -2655,7 +2655,20 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
                if (READ_ONCE(req->iopoll_completed))
                        break;
 
-               ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags);
+               if (req->opcode == IORING_OP_URING_CMD ||
+                   req->opcode == IORING_OP_URING_CMD_FIXED) {
+                       /* uring_cmd structure does not contain kiocb struct */
+                       struct kiocb kiocb_uring_cmd;
+
+                       kiocb_uring_cmd.private = req->uring_cmd.bio;
+                       kiocb_uring_cmd.ki_filp = req->uring_cmd.file;
+                       ret = req->uring_cmd.file->f_op->iopoll(&kiocb_uring_cmd,
+                             &iob, poll_flags);
+               } else {
+                       ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob,
+                                                          poll_flags);
+               }
+
                if (unlikely(ret < 0))
                        return ret;
                else if (ret)
@@ -2768,6 +2781,15 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
                            wq_list_empty(&ctx->iopoll_list))
                                break;
                }
+
+               /*
+                * In some scenarios, completion callback has been queued up to be
+                * completed in-task context but polling happens in the same task
+                * not giving a chance for the completion callback to complete.
+                */
+               if (current->task_works)
+                       io_run_task_work();
+
                ret = io_do_iopoll(ctx, !min);
                if (ret < 0)
                        break;
@@ -4122,6 +4144,14 @@ static int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
        return 0;
 }
 
+static void io_complete_uring_cmd_iopoll(struct io_kiocb *req, long res)
+{
+       WRITE_ONCE(req->result, res);
+       /* order with io_iopoll_complete() checking ->result */
+       smp_wmb();
+       WRITE_ONCE(req->iopoll_completed, 1);
+}
+
 /*
  * Called by consumers of io_uring_cmd, if they originally returned
  * -EIOCBQUEUED upon receiving the command.
@@ -4132,7 +4162,11 @@ void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret)
 
        if (ret < 0)
                req_set_fail(req);
-       io_req_complete(req, ret);
+
+       if (req->uring_cmd.flags & URING_CMD_POLLED)
+               io_complete_uring_cmd_iopoll(req, ret);
+       else
+               io_req_complete(req, ret);
 }
 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
 
@@ -4147,8 +4181,11 @@ static int io_uring_cmd_prep(struct io_kiocb *req,
                return -EOPNOTSUPP;
 
        if (req->ctx->flags & IORING_SETUP_IOPOLL) {
-               printk_once(KERN_WARNING "io_uring: iopoll not supported!\n");
-               return -EOPNOTSUPP;
+               req->uring_cmd.flags = URING_CMD_POLLED;
+               req->uring_cmd.bio = NULL;
+               req->iopoll_completed = 0;
+       } else {
+               req->uring_cmd.flags = 0;
        }
 
        cmd->op = READ_ONCE(csqe->op);
index e35a5d835b1faa86257b3649ed35106261ccf68c..2233ccf41c1983edeafa28be9c3842829cec2c93 100644 (file)
@@ -933,6 +933,7 @@ int blk_rq_map_kern(struct request_queue *, struct request *, void *,
 int blk_rq_append_bio(struct request *rq, struct bio *bio);
 void blk_execute_rq_nowait(struct request *rq, bool at_head,
                rq_end_io_fn *end_io);
+bool blk_rq_is_poll(struct request *rq);
 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
 
 struct req_iterator {
index 07732bc850af70fe754db28a8bd451c05536e0ca..bbc9c4ea19c39423da3bc69b649884a419133ad1 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/xarray.h>
 
 enum {
+       URING_CMD_POLLED = (1 << 0),
        URING_CMD_FIXEDBUFS = (1 << 1),
 };
 /*
@@ -17,8 +18,13 @@ struct io_uring_cmd {
        __u16           op;
        __u16           flags;
        __u32           len;
-       /* used if driver requires update in task context*/
-       void (*driver_cb)(struct io_uring_cmd *cmd);
+       union {
+               void *bio; // Used for polling based completion
+
+               /* used if driver requires update in task context for IRQ based completion*/
+               void (*driver_cb)(struct io_uring_cmd *cmd);
+       };
+
        __u64           pdu[5]; /* 40 bytes available inline for free use */
 };