io_uring: fix stopping iopoll'ing too early
authorPavel Begunkov <asml.silence@gmail.com>
Mon, 6 Jul 2020 14:59:30 +0000 (17:59 +0300)
committerJens Axboe <axboe@kernel.dk>
Mon, 6 Jul 2020 15:06:20 +0000 (09:06 -0600)
Nobody adjusts *nr_events (number of completed requests) before calling
io_iopoll_getevents(), so the passed @min shouldn't be adjusted as well.
Othewise it can return less than initially asked @min without hitting
need_resched().

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 60f1a81c6c354f67ea9117eeab49430cd817e51e..332008f346e31a12cf65de3d0ee2e4a532474129 100644 (file)
@@ -2044,7 +2044,7 @@ static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
                ret = io_do_iopoll(ctx, nr_events, min);
                if (ret < 0)
                        return ret;
-               if (!min || *nr_events >= min)
+               if (*nr_events >= min)
                        return 0;
        }
 
@@ -2087,8 +2087,6 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
         */
        mutex_lock(&ctx->uring_lock);
        do {
-               int tmin = 0;
-
                /*
                 * Don't enter poll loop if we already have events pending.
                 * If we do, we can potentially be spinning for commands that
@@ -2113,10 +2111,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
                        mutex_lock(&ctx->uring_lock);
                }
 
-               if (*nr_events < min)
-                       tmin = min - *nr_events;
-
-               ret = io_iopoll_getevents(ctx, nr_events, tmin);
+               ret = io_iopoll_getevents(ctx, nr_events, min);
                if (ret <= 0)
                        break;
                ret = 0;