nvme/io_uring: use helper for polled completions
authorJens Axboe <axboe@kernel.dk>
Tue, 19 Mar 2024 23:10:50 +0000 (17:10 -0600)
committerJens Axboe <axboe@kernel.dk>
Mon, 15 Apr 2024 14:10:24 +0000 (08:10 -0600)
NVMe is making up issue_flags, which is a no-no in general, and to make
matters worse, they are completely the wrong ones. For a pure polled
request, which it does check for, we're already inside the
ctx->uring_lock when the completions are run off io_do_iopoll(). Hence
the correct flag would be '0' rather than IO_URING_F_UNLOCKED.

Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/host/ioctl.c
include/linux/io_uring/cmd.h

index 3dfd5ae99ae05e892eb793cb3b21ba0b75dd6e98..499a8bb7cac7d13e618021f9c6b95d94d974f0bf 100644 (file)
@@ -423,13 +423,20 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
        pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
 
        /*
-        * For iopoll, complete it directly.
+        * For iopoll, complete it directly. Note that using the uring_cmd
+        * helper for this is safe only because we check blk_rq_is_poll().
+        * As that returns false if we're NOT on a polled queue, then it's
+        * safe to use the polled completion helper.
+        *
         * Otherwise, move the completion to task work.
         */
-       if (blk_rq_is_poll(req))
-               nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
-       else
+       if (blk_rq_is_poll(req)) {
+               if (pdu->bio)
+                       blk_rq_unmap_user(pdu->bio);
+               io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
+       } else {
                io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
+       }
 
        return RQ_END_IO_FREE;
 }
index 01b95505538af7b7e2b9751819fa3745476940c9..447fbfd322154a71f1ea3558edd30dadff33cc78 100644 (file)
@@ -69,6 +69,17 @@ static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
 }
 #endif
 
+/*
+ * Polled completions must ensure they are coming from a poll queue, and
+ * hence are completed inside the usual poll handling loops.
+ */
+static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
+                                           ssize_t ret, ssize_t res2)
+{
+       lockdep_assert(in_task());
+       io_uring_cmd_done(ioucmd, ret, res2, 0);
+}
+
 /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
 static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
                        void (*task_work_cb)(struct io_uring_cmd *, unsigned))