io_uring: pass in EPOLL_URING_WAKE for eventfd signaling and wakeups 5.10-stable-backport.3
authorJens Axboe <axboe@kernel.dk>
Fri, 23 Dec 2022 14:04:49 +0000 (07:04 -0700)
committerJens Axboe <axboe@kernel.dk>
Fri, 23 Dec 2022 21:51:46 +0000 (14:51 -0700)
[ Upstream commit 4464853277d0ccdb9914608dd1332f0fa2f9846f ]

Pass in EPOLL_URING_WAKE when signaling eventfd or doing poll related
wakups, so that we can check for a circular event dependency between
eventfd and epoll. If this flag is set when our wakeup handlers are
called, then we know we have a dependency that needs to terminate
multishot requests.

eventfd and epoll are the only such possible dependencies.

Cc: stable@vger.kernel.org # 6.0
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c

index 473dbd1830a3b62cf7f97eb24024397fe4f55819..945faf036ad0f8d102948fc27d6835e597783c48 100644 (file)
@@ -1626,13 +1626,15 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
         * wake as many waiters as we need to.
         */
        if (wq_has_sleeper(&ctx->cq_wait))
-               wake_up_all(&ctx->cq_wait);
+               __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
+                               poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
        if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
                wake_up(&ctx->sq_data->wait);
        if (io_should_trigger_evfd(ctx))
-               eventfd_signal(ctx->cq_ev_fd, 1);
+               eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE);
        if (waitqueue_active(&ctx->poll_wait))
-               wake_up_interruptible(&ctx->poll_wait);
+               __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0,
+                               poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
 }
 
 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
@@ -1642,12 +1644,14 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
 
        if (ctx->flags & IORING_SETUP_SQPOLL) {
                if (waitqueue_active(&ctx->cq_wait))
-                       wake_up_all(&ctx->cq_wait);
+                       __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
+                                 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
        }
        if (io_should_trigger_evfd(ctx))
-               eventfd_signal(ctx->cq_ev_fd, 1);
+               eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE);
        if (waitqueue_active(&ctx->poll_wait))
-               wake_up_interruptible(&ctx->poll_wait);
+               __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0,
+                               poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
 }
 
 /* Returns true if there are no backlogged entries after the flush */
@@ -5477,8 +5481,17 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
        if (mask && !(mask & poll->events))
                return 0;
 
-       if (io_poll_get_ownership(req))
+       if (io_poll_get_ownership(req)) {
+               /*
+                * If we trigger a multishot poll off our own wakeup path,
+                * disable multishot as there is a circular dependency between
+                * CQ posting and triggering the event.
+                */
+               if (mask & EPOLL_URING_WAKE)
+                       poll->events |= EPOLLONESHOT;
+
                __io_poll_execute(req, mask);
+       }
        return 1;
 }