diff options
author | Pankaj Raghav <p.raghav@samsung.com> | 2021-12-20 19:47:34 +0530 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-12-21 09:38:25 -0700 |
commit | 7567defdb3c6a81e4114a03ddf3d6c158df81a76 (patch) | |
tree | 58db2804294104872c606b4f29a7e44543370b3c /fs/io_uring.c | |
parent | 8d684aa889d335d791e6c7adeb070345bfbba26a (diff) |
nvme: Add async passthru polling supportnvme-passthru-wip.2
IO_URING already has polling support for read and write. This patch
extends that support for uring cmd passthu. The unused flag in
uring_cmd struct is used to indicate if the completion should be polled.
If device side polling is not enabled, then the submission request will
fallback to a non-polled request.
Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
Link: https://lore.kernel.org/r/20211220141734.12206-14-joshi.k@samsung.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 45 |
1 files changed, 41 insertions, 4 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index f77dde1bdc75..ae2e7666622e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2655,7 +2655,20 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) if (READ_ONCE(req->iopoll_completed)) break; - ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags); + if (req->opcode == IORING_OP_URING_CMD || + req->opcode == IORING_OP_URING_CMD_FIXED) { + /* uring_cmd structure does not contain kiocb struct */ + struct kiocb kiocb_uring_cmd; + + kiocb_uring_cmd.private = req->uring_cmd.bio; + kiocb_uring_cmd.ki_filp = req->uring_cmd.file; + ret = req->uring_cmd.file->f_op->iopoll(&kiocb_uring_cmd, + &iob, poll_flags); + } else { + ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, + poll_flags); + } + if (unlikely(ret < 0)) return ret; else if (ret) @@ -2768,6 +2781,15 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) wq_list_empty(&ctx->iopoll_list)) break; } + + /* + * In some scenarios, completion callback has been queued up to be + * completed in-task context but polling happens in the same task + * not giving a chance for the completion callback to complete. + */ + if (current->task_works) + io_run_task_work(); + ret = io_do_iopoll(ctx, !min); if (ret < 0) break; @@ -4122,6 +4144,14 @@ static int io_linkat(struct io_kiocb *req, unsigned int issue_flags) return 0; } +static void io_complete_uring_cmd_iopoll(struct io_kiocb *req, long res) +{ + WRITE_ONCE(req->result, res); + /* order with io_iopoll_complete() checking ->result */ + smp_wmb(); + WRITE_ONCE(req->iopoll_completed, 1); +} + /* * Called by consumers of io_uring_cmd, if they originally returned * -EIOCBQUEUED upon receiving the command. @@ -4132,7 +4162,11 @@ void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret) if (ret < 0) req_set_fail(req); - io_req_complete(req, ret); + + if (req->uring_cmd.flags & URING_CMD_POLLED) + io_complete_uring_cmd_iopoll(req, ret); + else + io_req_complete(req, ret); } EXPORT_SYMBOL_GPL(io_uring_cmd_done); @@ -4147,8 +4181,11 @@ static int io_uring_cmd_prep(struct io_kiocb *req, return -EOPNOTSUPP; if (req->ctx->flags & IORING_SETUP_IOPOLL) { - printk_once(KERN_WARNING "io_uring: iopoll not supported!\n"); - return -EOPNOTSUPP; + req->uring_cmd.flags = URING_CMD_POLLED; + req->uring_cmd.bio = NULL; + req->iopoll_completed = 0; + } else { + req->uring_cmd.flags = 0; } cmd->op = READ_ONCE(csqe->op); |