io_uring/rw: ensure retry condition isn't lost
authorJens Axboe <axboe@kernel.dk>
Wed, 17 Apr 2024 15:23:55 +0000 (09:23 -0600)
committerJens Axboe <axboe@kernel.dk>
Wed, 17 Apr 2024 15:23:55 +0000 (09:23 -0600)
A previous commit removed the checking on whether or not it was possible
to retry a request, since it's now possible to retry any of them. This
would previously have caused the request to have been ended with an error,
but now the retry condition can simply get lost instead.

Cleanup the retry handling and always just punt it to task_work, which
will queue it with io-wq appropriately.

Reported-by: Changhui Zhong <czhong@redhat.com>
Tested-by: Ming Lei <ming.lei@redhat.com>
Fixes: cca6571381a0 ("io_uring/rw: cleanup retry path")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/rw.c

index 3c9087f37c43527ce39cadabd671c80778b3b2e5..c67ae6e36c4f3e25f792d9fb3e933f92c4d06072 100644 (file)
@@ -527,6 +527,19 @@ static void io_queue_iowq(struct io_kiocb *req)
                io_queue_linked_timeout(link);
 }
 
+static void io_tw_requeue_iowq(struct io_kiocb *req, struct io_tw_state *ts)
+{
+       req->flags &= ~REQ_F_REISSUE;
+       io_queue_iowq(req);
+}
+
+void io_tw_queue_iowq(struct io_kiocb *req)
+{
+       req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
+       req->io_task_work.func = io_tw_requeue_iowq;
+       io_req_task_work_add(req);
+}
+
 static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
 {
        while (!list_empty(&ctx->defer_list)) {
index 624ca9076a50beba7c60c73d9beb394a9326d709..b83a719c54436be32b500d29d45ee8f61cbb34fb 100644 (file)
@@ -75,6 +75,7 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
 bool io_alloc_async_data(struct io_kiocb *req);
 void io_req_task_queue(struct io_kiocb *req);
+void io_tw_queue_iowq(struct io_kiocb *req);
 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
index 3134a6ece1be552001b3e1f8aa080a9c8170688e..4fed829fe97cc4ae51e414a9587f5d140a4aa73b 100644 (file)
@@ -455,7 +455,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
                         * current cycle.
                         */
                        io_req_io_end(req);
-                       req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
+                       io_tw_queue_iowq(req);
                        return true;
                }
                req_set_fail(req);
@@ -521,7 +521,7 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
                io_req_end_write(req);
        if (unlikely(res != req->cqe.res)) {
                if (res == -EAGAIN && io_rw_should_reissue(req)) {
-                       req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
+                       io_tw_queue_iowq(req);
                        return;
                }
                req->cqe.res = res;
@@ -839,7 +839,8 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
        ret = io_iter_do_read(rw, &io->iter);
 
        if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
-               req->flags &= ~REQ_F_REISSUE;
+               if (req->flags & REQ_F_REISSUE)
+                       return IOU_ISSUE_SKIP_COMPLETE;
                /* If we can poll, just do that. */
                if (io_file_can_poll(req))
                        return -EAGAIN;
@@ -1034,10 +1035,8 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
        else
                ret2 = -EINVAL;
 
-       if (req->flags & REQ_F_REISSUE) {
-               req->flags &= ~REQ_F_REISSUE;
-               ret2 = -EAGAIN;
-       }
+       if (req->flags & REQ_F_REISSUE)
+               return IOU_ISSUE_SKIP_COMPLETE;
 
        /*
         * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just