io_uring: kill dead code in io_req_complete_post
authorMing Lei <ming.lei@redhat.com>
Fri, 5 Apr 2024 15:50:02 +0000 (16:50 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 15 Apr 2024 14:10:26 +0000 (08:10 -0600)
Since commit 8f6c829491fe ("io_uring: remove struct io_tw_state::locked"),
io_req_complete_post() is only called from io-wq submit work, where the
request reference is guaranteed to be grabbed and won't drop to zero
in io_req_complete_post().

Kill the dead code, meantime add req_ref_put() to put the reference.

Cc: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/1d8297e2046553153e763a52574f0e0f4d512f86.1712331455.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c
io_uring/refs.h

index cc5fa4e1b34461f1e7b254547bd8413c1dc8e48b..8bd5db2056eee61d922ec309c864419414aaef8d 100644 (file)
@@ -928,7 +928,6 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
 static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       struct io_rsrc_node *rsrc_node = NULL;
 
        /*
         * Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
@@ -945,42 +944,10 @@ static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
                if (!io_fill_cqe_req(ctx, req))
                        io_req_cqe_overflow(req);
        }
-
-       /*
-        * If we're the last reference to this request, add to our locked
-        * free_list cache.
-        */
-       if (req_ref_put_and_test(req)) {
-               if (req->flags & IO_REQ_LINK_FLAGS) {
-                       if (req->flags & IO_DISARM_MASK)
-                               io_disarm_next(req);
-                       if (req->link) {
-                               io_req_task_queue(req->link);
-                               req->link = NULL;
-                       }
-               }
-               io_put_kbuf_comp(req);
-               if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
-                       io_clean_op(req);
-               io_put_file(req);
-
-               rsrc_node = req->rsrc_node;
-               /*
-                * Selected buffer deallocation in io_clean_op() assumes that
-                * we don't hold ->completion_lock. Clean them here to avoid
-                * deadlocks.
-                */
-               io_put_task_remote(req->task);
-               wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
-               ctx->locked_free_nr++;
-       }
        io_cq_unlock_post(ctx);
 
-       if (rsrc_node) {
-               io_ring_submit_lock(ctx, issue_flags);
-               io_put_rsrc_node(ctx, rsrc_node);
-               io_ring_submit_unlock(ctx, issue_flags);
-       }
+       /* called from io-wq submit work only, the ref won't drop to zero */
+       req_ref_put(req);
 }
 
 void io_req_defer_failed(struct io_kiocb *req, s32 res)
index 1336de3f2a30aa26cc51a6bd2b8dc67a9d0e5fe9..63982ead9f7dabcfbdbab74777729816647ff368 100644 (file)
@@ -33,6 +33,13 @@ static inline void req_ref_get(struct io_kiocb *req)
        atomic_inc(&req->refs);
 }
 
+static inline void req_ref_put(struct io_kiocb *req)
+{
+       WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
+       WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
+       atomic_dec(&req->refs);
+}
+
 static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
 {
        if (!(req->flags & REQ_F_REFCOUNT)) {