io_uring/rw: avoid punting to io-wq directly
authorPavel Begunkov <asml.silence@gmail.com>
Mon, 18 Mar 2024 22:00:28 +0000 (22:00 +0000)
committerJens Axboe <axboe@kernel.dk>
Mon, 15 Apr 2024 14:10:24 +0000 (08:10 -0600)
kiocb_done() should care to specifically redirecting requests to io-wq.
Remove the hopping to tw to then queue an io-wq, return -EAGAIN and let
the core code io_uring handle offloading.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Tested-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/413564e550fe23744a970e1783dfa566291b0e6f.1710799188.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/rw.c

index d5ac91b5b171700774cbe99d58477ed9f292b81d..e9da2ad67c2e821412e84e9ff394a5721ce673ab 100644 (file)
@@ -492,7 +492,7 @@ static void io_prep_async_link(struct io_kiocb *req)
        }
 }
 
-void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use)
+static void io_queue_iowq(struct io_kiocb *req)
 {
        struct io_kiocb *link = io_prep_linked_timeout(req);
        struct io_uring_task *tctx = req->task->io_uring;
@@ -1499,7 +1499,7 @@ void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
        if (unlikely(req->task->flags & PF_EXITING))
                io_req_defer_failed(req, -EFAULT);
        else if (req->flags & REQ_F_FORCE_ASYNC)
-               io_queue_iowq(req, ts);
+               io_queue_iowq(req);
        else
                io_queue_sqe(req);
 }
@@ -2087,7 +2087,7 @@ static void io_queue_async(struct io_kiocb *req, int ret)
                break;
        case IO_APOLL_ABORTED:
                io_kbuf_recycle(req, 0);
-               io_queue_iowq(req, NULL);
+               io_queue_iowq(req);
                break;
        case IO_APOLL_OK:
                break;
@@ -2134,7 +2134,7 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
                if (unlikely(req->ctx->drain_active))
                        io_drain_req(req);
                else
-                       io_queue_iowq(req, NULL);
+                       io_queue_iowq(req);
        }
 }
 
index 935d8d0747dc31b7f5c3025c2dc6d4c9c5a3c0a2..0861d49e83de031035a7ffd9dc46a93fb7001139 100644 (file)
@@ -79,7 +79,6 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
 bool io_alloc_async_data(struct io_kiocb *req);
 void io_req_task_queue(struct io_kiocb *req);
-void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use);
 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
index c8d48287439e5a06d19113ecb07f1c05db47dc3f..0afe4f9e0e3f7a5e7fa82ea2e764dfd1afea32f6 100644 (file)
@@ -187,12 +187,6 @@ static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
        return NULL;
 }
 
-static void io_req_task_queue_reissue(struct io_kiocb *req)
-{
-       req->io_task_work.func = io_queue_iowq;
-       io_req_task_work_add(req);
-}
-
 #ifdef CONFIG_BLOCK
 static bool io_resubmit_prep(struct io_kiocb *req)
 {
@@ -405,7 +399,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
        if (req->flags & REQ_F_REISSUE) {
                req->flags &= ~REQ_F_REISSUE;
                if (io_resubmit_prep(req))
-                       io_req_task_queue_reissue(req);
+                       return -EAGAIN;
                else
                        io_req_task_queue_fail(req, final_ret);
        }