io_uring: keep timeout in io_wait_queue
authorPavel Begunkov <asml.silence@gmail.com>
Thu, 5 Jan 2023 11:22:29 +0000 (11:22 +0000)
committerJens Axboe <axboe@kernel.dk>
Sun, 29 Jan 2023 22:17:40 +0000 (15:17 -0700)
Move waiting timeout into io_wait_queue

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e4b48a9e26a3b1cf97c80121e62d4b5ab873d28d.1672916894.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c

index 6229a49c0c33a1051ad5459c7440d969c167cc84..fdea6fbc3fadde94fd6bd429b05527ce99effe50 100644 (file)
@@ -2414,6 +2414,7 @@ struct io_wait_queue {
        struct io_ring_ctx *ctx;
        unsigned cq_tail;
        unsigned nr_timeouts;
+       ktime_t timeout;
 };
 
 static inline bool io_has_work(struct io_ring_ctx *ctx)
@@ -2466,8 +2467,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
 
 /* when returns >0, the caller should retry */
 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
-                                         struct io_wait_queue *iowq,
-                                         ktime_t *timeout)
+                                         struct io_wait_queue *iowq)
 {
        if (unlikely(READ_ONCE(ctx->check_cq)))
                return 1;
@@ -2479,9 +2479,9 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
                return -EINTR;
        if (unlikely(io_should_wake(iowq)))
                return 0;
-       if (*timeout == KTIME_MAX)
+       if (iowq->timeout == KTIME_MAX)
                schedule();
-       else if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
+       else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
                return -ETIME;
        return 0;
 }
@@ -2496,7 +2496,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
 {
        struct io_wait_queue iowq;
        struct io_rings *rings = ctx->rings;
-       ktime_t timeout = KTIME_MAX;
        int ret;
 
        if (!io_allowed_run_tw(ctx))
@@ -2522,20 +2521,21 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                        return ret;
        }
 
-       if (uts) {
-               struct timespec64 ts;
-
-               if (get_timespec64(&ts, uts))
-                       return -EFAULT;
-               timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
-       }
-
        init_waitqueue_func_entry(&iowq.wq, io_wake_function);
        iowq.wq.private = current;
        INIT_LIST_HEAD(&iowq.wq.entry);
        iowq.ctx = ctx;
        iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
        iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
+       iowq.timeout = KTIME_MAX;
+
+       if (uts) {
+               struct timespec64 ts;
+
+               if (get_timespec64(&ts, uts))
+                       return -EFAULT;
+               iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
+       }
 
        trace_io_uring_cqring_wait(ctx, min_events);
        do {
@@ -2543,7 +2543,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
 
                prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
                                                TASK_INTERRUPTIBLE);
-               ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
+               ret = io_cqring_wait_schedule(ctx, &iowq);
                if (ret < 0)
                        break;
                __set_current_state(TASK_RUNNING);