summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-06-15 17:33:55 +0100
committerJens Axboe <axboe@kernel.dk>2022-06-22 11:30:59 -0600
commit9dcb6b610a18f669f5895c737f32966c779f8c20 (patch)
treebaaa9d296cae52425ab6f006008331f4e22f9b27
parent9bbc74fd432c5a11fbc145d56b602901efa7e7db (diff)
io_uring: remove check_cq checking from hot paths
All ctx->check_cq events are slow path, don't test every single flag one by one in the hot path, but add a common guarding if. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/dff026585cea7ff3a172a7c83894a3b0111bbf6a.1655310733.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--io_uring/io_uring.c34
1 files changed, 19 insertions, 15 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 03408309c8c0..0c64e0db9042 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1259,24 +1259,25 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
int ret = 0;
unsigned long check_cq;
+ check_cq = READ_ONCE(ctx->check_cq);
+ if (unlikely(check_cq)) {
+ if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
+ __io_cqring_overflow_flush(ctx, false);
+ /*
+ * Similarly do not spin if we have not informed the user of any
+ * dropped CQE.
+ */
+ if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
+ return -EBADR;
+ }
/*
* Don't enter poll loop if we already have events pending.
* If we do, we can potentially be spinning for commands that
* already triggered a CQE (eg in error).
*/
- check_cq = READ_ONCE(ctx->check_cq);
- if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
- __io_cqring_overflow_flush(ctx, false);
if (io_cqring_events(ctx))
return 0;
- /*
- * Similarly do not spin if we have not informed the user of any
- * dropped CQE.
- */
- if (unlikely(check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)))
- return -EBADR;
-
do {
/*
* If a submit got punted to a workqueue, we can have the
@@ -2203,12 +2204,15 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
ret = io_run_task_work_sig();
if (ret || io_should_wake(iowq))
return ret;
+
check_cq = READ_ONCE(ctx->check_cq);
- /* let the caller flush overflows, retry */
- if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
- return 1;
- if (unlikely(check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)))
- return -EBADR;
+ if (unlikely(check_cq)) {
+ /* let the caller flush overflows, retry */
+ if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
+ return 1;
+ if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
+ return -EBADR;
+ }
if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
return -ETIME;
return 1;