From 7bfa9badc793ba5e8b530dc3a3cb092a1c22685b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 15 Apr 2022 22:08:28 +0100 Subject: [PATCH 1/1] io_uring: refactor io_queue_sqe() io_queue_sqe() is a part of the submission path and we try hard to keep it inlined, so shed some extra bytes from it by moving the error checking part into io_queue_sqe_arm_apoll() and renaming it accordingly. note: io_queue_sqe_arm_apoll() is not inlined, thus the patch doesn't change the number of function calls for the apoll path. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/9b79edd246336decfaca79b949a15ac69123490d.1650056133.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5d3f1b21ee76..5a99a2fd8d2d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7508,10 +7508,17 @@ static void io_queue_linked_timeout(struct io_kiocb *req) io_put_req(req); } -static void io_queue_sqe_arm_apoll(struct io_kiocb *req) +static void io_queue_async(struct io_kiocb *req, int ret) __must_hold(&req->ctx->uring_lock) { - struct io_kiocb *linked_timeout = io_prep_linked_timeout(req); + struct io_kiocb *linked_timeout; + + if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) { + io_req_complete_failed(req, ret); + return; + } + + linked_timeout = io_prep_linked_timeout(req); switch (io_arm_poll_handler(req, 0)) { case IO_APOLL_READY: @@ -7547,13 +7554,10 @@ static inline void io_queue_sqe(struct io_kiocb *req) * We async punt it if the file wasn't marked NOWAIT, or if the file * doesn't support non-blocking read/write attempts */ - if (likely(!ret)) { + if (likely(!ret)) io_arm_ltimeout(req); - } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { - io_queue_sqe_arm_apoll(req); - } else { - io_req_complete_failed(req, ret); - } + else + io_queue_async(req, ret); } static void io_queue_sqe_fallback(struct io_kiocb *req) -- 2.25.1