io_uring: parse check_cq out of wq waiting
authorPavel Begunkov <asml.silence@gmail.com>
Thu, 5 Jan 2023 11:22:24 +0000 (11:22 +0000)
committerJens Axboe <axboe@kernel.dk>
Sun, 29 Jan 2023 22:17:39 +0000 (15:17 -0700)
We already avoid flushing overflows in io_cqring_wait_schedule() but
only return an error for the outer loop to handle it. Minimise it even
further by moving all ->check_cq parsing there.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/9dfcec3121013f98208dbf79368d636d74e1231a.1672916894.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c

index 97b749203ba8ae8219551fad98e18bc3baf14c77..524ef5a2bb9c3dafe7d8c5cade299641b64b9013 100644 (file)
@@ -2471,21 +2471,13 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
                                          ktime_t *timeout)
 {
        int ret;
-       unsigned long check_cq;
 
+       if (unlikely(READ_ONCE(ctx->check_cq)))
+               return 1;
        /* make sure we run task_work before checking for signals */
        ret = io_run_task_work_sig(ctx);
        if (ret || io_should_wake(iowq))
                return ret;
-
-       check_cq = READ_ONCE(ctx->check_cq);
-       if (unlikely(check_cq)) {
-               /* let the caller flush overflows, retry */
-               if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
-                       return 1;
-               if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
-                       return -EBADR;
-       }
        if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
                return -ETIME;
 
@@ -2551,13 +2543,25 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
 
        trace_io_uring_cqring_wait(ctx, min_events);
        do {
-               if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
-                       finish_wait(&ctx->cq_wait, &iowq.wq);
-                       io_cqring_do_overflow_flush(ctx);
-               }
+               unsigned long check_cq;
+
                prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
                                                TASK_INTERRUPTIBLE);
                ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
+
+               check_cq = READ_ONCE(ctx->check_cq);
+               if (unlikely(check_cq)) {
+                       /* let the caller flush overflows, retry */
+                       if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) {
+                               finish_wait(&ctx->cq_wait, &iowq.wq);
+                               io_cqring_do_overflow_flush(ctx);
+                       }
+                       if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) {
+                               ret = -EBADR;
+                               break;
+                       }
+               }
+
                if (__io_cqring_events_user(ctx) >= min_events)
                        break;
                cond_resched();