summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-06-19 12:26:08 +0100
committerJens Axboe <axboe@kernel.dk>2022-06-22 11:32:32 -0600
commitabb9030d1f03c310238a99bf42a9677217858fd0 (patch)
tree0e86c8e2f8b645aff6ac890b62410bcd53468d31
parent3d4a210d8d4ad954bc72d58a848e5d33f642efd8 (diff)
io_uring: remove ->flush_cqes optimisation
It's not clear how widely used IOSQE_CQE_SKIP_SUCCESS is, and how often ->flush_cqes flag prevents from completion being flushed. Sometimes it's high level of concurrency that enables it at least for one CQE, but sometimes it doesn't save much because nobody waiting on the CQ. Remove ->flush_cqes flag and the optimisation, it should benefit the normal use case. Note, that there is no spurious eventfd problem with that as checks for spuriousness were incorporated into io_eventfd_signal(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/692e81eeddccc096f449a7960365fa7b4a18f8e6.1655637157.git.asml.silence@gmail.com [axboe: remove now dead state->flush_cqes variable] Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--include/linux/io_uring_types.h1
-rw-r--r--io_uring/io_uring.c23
-rw-r--r--io_uring/io_uring.h2
3 files changed, 10 insertions, 16 deletions
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 2015f3ea7cb7..6bcd7bff6479 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -148,7 +148,6 @@ struct io_submit_state {
bool plug_started;
bool need_plug;
- bool flush_cqes;
unsigned short submit_nr;
struct blk_plug plug;
};
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index c5530fb8cee7..0f18a86f3f8c 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1250,22 +1250,19 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
struct io_wq_work_node *node, *prev;
struct io_submit_state *state = &ctx->submit_state;
- if (state->flush_cqes) {
- spin_lock(&ctx->completion_lock);
- wq_list_for_each(node, prev, &state->compl_reqs) {
- struct io_kiocb *req = container_of(node, struct io_kiocb,
- comp_list);
-
- if (!(req->flags & REQ_F_CQE_SKIP))
- __io_fill_cqe_req(ctx, req);
- }
+ spin_lock(&ctx->completion_lock);
+ wq_list_for_each(node, prev, &state->compl_reqs) {
+ struct io_kiocb *req = container_of(node, struct io_kiocb,
+ comp_list);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
- state->flush_cqes = false;
+ if (!(req->flags & REQ_F_CQE_SKIP))
+ __io_fill_cqe_req(ctx, req);
}
+ io_commit_cqring(ctx);
+ spin_unlock(&ctx->completion_lock);
+ io_cqring_ev_posted(ctx);
+
io_free_batch_list(ctx, state->compl_reqs.first);
INIT_WQ_LIST(&state->compl_reqs);
}
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 7b2055b342df..bdc62727638b 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -219,8 +219,6 @@ static inline void io_req_add_compl_list(struct io_kiocb *req)
{
struct io_submit_state *state = &req->ctx->submit_state;
- if (!(req->flags & REQ_F_CQE_SKIP))
- state->flush_cqes = true;
wq_list_add_tail(&req->comp_list, &state->compl_reqs);
}