io_uring: small optimisation of tctx_task_work
authorPavel Begunkov <asml.silence@gmail.com>
Mon, 21 Mar 2022 22:02:19 +0000 (22:02 +0000)
committerJens Axboe <axboe@kernel.dk>
Sun, 24 Apr 2022 23:34:16 +0000 (17:34 -0600)
There should be no completions stashed when we first get into
tctx_task_work(), so move completion flushing checks a bit later
after we had a chance to execute some task works.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/c6765c804f3c438591b9825ab9c43d22039073c4.1647897811.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 7625b29153b923ad757bd41265876f937b6e7a38..7fefdb9c4ae37a047d6e6cdfdfe3875f271c9457 100644 (file)
@@ -2475,10 +2475,6 @@ static void tctx_task_work(struct callback_head *cb)
        while (1) {
                struct io_wq_work_node *node1, *node2;
 
-               if (!tctx->task_list.first &&
-                   !tctx->prior_task_list.first && uring_locked)
-                       io_submit_flush_completions(ctx);
-
                spin_lock_irq(&tctx->task_lock);
                node1 = tctx->prior_task_list.first;
                node2 = tctx->task_list.first;
@@ -2492,10 +2488,13 @@ static void tctx_task_work(struct callback_head *cb)
 
                if (node1)
                        handle_prev_tw_list(node1, &ctx, &uring_locked);
-
                if (node2)
                        handle_tw_list(node2, &ctx, &uring_locked);
                cond_resched();
+
+               if (!tctx->task_list.first &&
+                   !tctx->prior_task_list.first && uring_locked)
+                       io_submit_flush_completions(ctx);
        }
 
        ctx_flush_and_put(ctx, &uring_locked);