io_uring: remove ->flush_cqes optimisation
authorPavel Begunkov <asml.silence@gmail.com>
Sun, 19 Jun 2022 11:26:08 +0000 (12:26 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 25 Jul 2022 00:39:14 +0000 (18:39 -0600)
It's not clear how widely used IOSQE_CQE_SKIP_SUCCESS is, and how often
->flush_cqes flag prevents from completion being flushed. Sometimes it's
high level of concurrency that enables it at least for one CQE, but
sometimes it doesn't save much because nobody waiting on the CQ.

Remove ->flush_cqes flag and the optimisation, it should benefit the
normal use case. Note, that there is no spurious eventfd problem with
that as checks for spuriousness were incorporated into
io_eventfd_signal().

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/692e81eeddccc096f449a7960365fa7b4a18f8e6.1655637157.git.asml.silence@gmail.com
[axboe: remove now dead state->flush_cqes variable]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
include/linux/io_uring_types.h
io_uring/io_uring.c
io_uring/io_uring.h

index 2015f3ea7cb76b2823dcec7a2a826845ce1024e3..6bcd7bff6479bebd8869eb83a9ebd7fa03fa80b6 100644 (file)
@@ -148,7 +148,6 @@ struct io_submit_state {
 
        bool                    plug_started;
        bool                    need_plug;
-       bool                    flush_cqes;
        unsigned short          submit_nr;
        struct blk_plug         plug;
 };
index 61d4e6d0731a20452780fdfbd9d205aa0da5651b..16a625e854ec33e1904727ae4728df1f09cf053c 100644 (file)
@@ -1250,22 +1250,19 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
        struct io_wq_work_node *node, *prev;
        struct io_submit_state *state = &ctx->submit_state;
 
-       if (state->flush_cqes) {
-               spin_lock(&ctx->completion_lock);
-               wq_list_for_each(node, prev, &state->compl_reqs) {
-                       struct io_kiocb *req = container_of(node, struct io_kiocb,
-                                                   comp_list);
-
-                       if (!(req->flags & REQ_F_CQE_SKIP))
-                               __io_fill_cqe_req(ctx, req);
-               }
+       spin_lock(&ctx->completion_lock);
+       wq_list_for_each(node, prev, &state->compl_reqs) {
+               struct io_kiocb *req = container_of(node, struct io_kiocb,
+                                           comp_list);
 
-               io_commit_cqring(ctx);
-               spin_unlock(&ctx->completion_lock);
-               io_cqring_ev_posted(ctx);
-               state->flush_cqes = false;
+               if (!(req->flags & REQ_F_CQE_SKIP))
+                       __io_fill_cqe_req(ctx, req);
        }
 
+       io_commit_cqring(ctx);
+       spin_unlock(&ctx->completion_lock);
+       io_cqring_ev_posted(ctx);
+
        io_free_batch_list(ctx, state->compl_reqs.first);
        INIT_WQ_LIST(&state->compl_reqs);
 }
index 7b2055b342dfab89facb10775d85101627c0dde7..bdc62727638be8156bb5986b4f9c1198f9b458ac 100644 (file)
@@ -219,8 +219,6 @@ static inline void io_req_add_compl_list(struct io_kiocb *req)
 {
        struct io_submit_state *state = &req->ctx->submit_state;
 
-       if (!(req->flags & REQ_F_CQE_SKIP))
-               state->flush_cqes = true;
        wq_list_add_tail(&req->comp_list, &state->compl_reqs);
 }