io_uring/kbuf: move locking into io_kbuf_drop()
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 5 Feb 2025 11:36:44 +0000 (11:36 +0000)
committerJens Axboe <axboe@kernel.dk>
Mon, 17 Feb 2025 12:34:45 +0000 (05:34 -0700)
Move the burden of locking out of the caller into io_kbuf_drop(), that
will help with furher refactoring.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/530f0cf1f06963029399f819a9a58b1a34bebef3.1738724373.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c
io_uring/kbuf.h

index 6fa1e88e40fbe44474bf837fe28b3ec2069775a7..ed7c9081352a48b3ae4bad13d663668e07b9f352 100644 (file)
@@ -398,11 +398,8 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq)
 
 static void io_clean_op(struct io_kiocb *req)
 {
-       if (req->flags & REQ_F_BUFFER_SELECTED) {
-               spin_lock(&req->ctx->completion_lock);
+       if (unlikely(req->flags & REQ_F_BUFFER_SELECTED))
                io_kbuf_drop(req);
-               spin_unlock(&req->ctx->completion_lock);
-       }
 
        if (req->flags & REQ_F_NEED_CLEANUP) {
                const struct io_cold_def *def = &io_cold_defs[req->opcode];
index bd80c44c5af1e9ca955e766c26c6f39239163452..310f94a0727a6f1bcdbce88c058edf835d808e44 100644 (file)
@@ -174,13 +174,13 @@ static inline void __io_put_kbuf_list(struct io_kiocb *req, int len,
 
 static inline void io_kbuf_drop(struct io_kiocb *req)
 {
-       lockdep_assert_held(&req->ctx->completion_lock);
-
        if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
                return;
 
+       spin_lock(&req->ctx->completion_lock);
        /* len == 0 is fine here, non-ring will always drop all of it */
        __io_put_kbuf_list(req, 0, &req->ctx->io_buffers_comp);
+       spin_unlock(&req->ctx->completion_lock);
 }
 
 static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int len,