io_uring: clean iowq submit work cancellation
authorPavel Begunkov <asml.silence@gmail.com>
Sat, 23 Oct 2021 11:13:57 +0000 (12:13 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 25 Oct 2021 13:42:29 +0000 (07:42 -0600)
If we've got IO_WQ_WORK_CANCEL in io_wq_submit_work(), handle the error
on the same lines as the check instead of having a weird code flow. The
main loop doesn't change but goes one indention left.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/ff4a09cf41f7a22bbb294b6f1faea721e21fe615.1634987320.git.asml.silence@gmail.com
Reviewed-by: Hao Xu <haoxu@linux.alibaba.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 7f92523c1282d19339cc2e6098cbe1a4e5e5bcb0..58cb3a14d58e81a6fbdc201a9ea45c758b3a8559 100644 (file)
@@ -6721,6 +6721,8 @@ static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
 static void io_wq_submit_work(struct io_wq_work *work)
 {
        struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+       unsigned int issue_flags = IO_URING_F_UNLOCKED;
+       bool needs_poll = false;
        struct io_kiocb *timeout;
        int ret = 0;
 
@@ -6735,40 +6737,37 @@ static void io_wq_submit_work(struct io_wq_work *work)
                io_queue_linked_timeout(timeout);
 
        /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
-       if (work->flags & IO_WQ_WORK_CANCEL)
-               ret = -ECANCELED;
+       if (work->flags & IO_WQ_WORK_CANCEL) {
+               io_req_task_queue_fail(req, -ECANCELED);
+               return;
+       }
 
-       if (!ret) {
-               bool needs_poll = false;
-               unsigned int issue_flags = IO_URING_F_UNLOCKED;
+       if (req->flags & REQ_F_FORCE_ASYNC) {
+               needs_poll = req->file && file_can_poll(req->file);
+               if (needs_poll)
+                       issue_flags |= IO_URING_F_NONBLOCK;
+       }
 
-               if (req->flags & REQ_F_FORCE_ASYNC) {
-                       needs_poll = req->file && file_can_poll(req->file);
-                       if (needs_poll)
-                               issue_flags |= IO_URING_F_NONBLOCK;
+       do {
+               ret = io_issue_sqe(req, issue_flags);
+               if (ret != -EAGAIN)
+                       break;
+               /*
+                * We can get EAGAIN for iopolled IO even though we're
+                * forcing a sync submission from here, since we can't
+                * wait for request slots on the block side.
+                */
+               if (!needs_poll) {
+                       cond_resched();
+                       continue;
                }
 
-               do {
-                       ret = io_issue_sqe(req, issue_flags);
-                       if (ret != -EAGAIN)
-                               break;
-                       /*
-                        * We can get EAGAIN for iopolled IO even though we're
-                        * forcing a sync submission from here, since we can't
-                        * wait for request slots on the block side.
-                        */
-                       if (!needs_poll) {
-                               cond_resched();
-                               continue;
-                       }
-
-                       if (io_arm_poll_handler(req) == IO_APOLL_OK)
-                               return;
-                       /* aborted or ready, in either case retry blocking */
-                       needs_poll = false;
-                       issue_flags &= ~IO_URING_F_NONBLOCK;
-               } while (1);
-       }
+               if (io_arm_poll_handler(req) == IO_APOLL_OK)
+                       return;
+               /* aborted or ready, in either case retry blocking */
+               needs_poll = false;
+               issue_flags &= ~IO_URING_F_NONBLOCK;
+       } while (1);
 
        /* avoid locking problems by failing it from a clean context */
        if (ret)