io_uring/kbuf: cleanup passing back cflags
authorJens Axboe <axboe@kernel.dk>
Tue, 30 Jan 2024 03:59:18 +0000 (20:59 -0700)
committerJens Axboe <axboe@kernel.dk>
Thu, 8 Feb 2024 20:27:06 +0000 (13:27 -0700)
We have various functions calculating the CQE cflags we need to pass
back, but it's all the same everywhere. Make a number of the putting
functions void, and just have the two main helps for this, io_put_kbuf()
and io_put_kbuf_comp() calculate the actual mask and pass it back.

While at it, cleanup how we put REQ_F_BUFFER_RING buffers. Before
this change, we would call into __io_put_kbuf() only to go right back
in to the header defined functions. As clearing this type of buffer
is just re-assigning the buf_index and incrementing the head, this
is very wasteful.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/kbuf.c
io_uring/kbuf.h

index 71880615bb7881d5cf3527f4dd4ab42a03dc53b5..ee866d646997932ebf0bba236ccfb7761b26e8ea 100644 (file)
@@ -102,10 +102,8 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
        return true;
 }
 
-unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
+void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
 {
-       unsigned int cflags;
-
        /*
         * We can add this buffer back to two lists:
         *
@@ -118,21 +116,17 @@ unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
         * We migrate buffers from the comp_list to the issue cache list
         * when we need one.
         */
-       if (req->flags & REQ_F_BUFFER_RING) {
-               /* no buffers to recycle for this case */
-               cflags = __io_put_kbuf_list(req, NULL);
-       } else if (issue_flags & IO_URING_F_UNLOCKED) {
+       if (issue_flags & IO_URING_F_UNLOCKED) {
                struct io_ring_ctx *ctx = req->ctx;
 
                spin_lock(&ctx->completion_lock);
-               cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
+               __io_put_kbuf_list(req, &ctx->io_buffers_comp);
                spin_unlock(&ctx->completion_lock);
        } else {
                lockdep_assert_held(&req->ctx->uring_lock);
 
-               cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
+               __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
        }
-       return cflags;
 }
 
 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
index 53dfaa71a397cedd9b52923a3f0178873aaf9960..f74c910b83f40035b988e875388245a8b46560e3 100644 (file)
@@ -57,7 +57,7 @@ int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
 
 void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
 
-unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
+void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
 
 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
 
@@ -108,41 +108,54 @@ static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
        return false;
 }
 
-static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
-                                             struct list_head *list)
+static inline void __io_put_kbuf_ring(struct io_kiocb *req)
 {
-       unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
+       if (req->buf_list) {
+               req->buf_index = req->buf_list->bgid;
+               req->buf_list->head++;
+       }
+       req->flags &= ~REQ_F_BUFFER_RING;
+}
 
+static inline void __io_put_kbuf_list(struct io_kiocb *req,
+                                     struct list_head *list)
+{
        if (req->flags & REQ_F_BUFFER_RING) {
-               if (req->buf_list) {
-                       req->buf_index = req->buf_list->bgid;
-                       req->buf_list->head++;
-               }
-               req->flags &= ~REQ_F_BUFFER_RING;
+               __io_put_kbuf_ring(req);
        } else {
                req->buf_index = req->kbuf->bgid;
                list_add(&req->kbuf->list, list);
                req->flags &= ~REQ_F_BUFFER_SELECTED;
        }
-
-       return ret;
 }
 
 static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
 {
+       unsigned int ret;
+
        lockdep_assert_held(&req->ctx->completion_lock);
 
        if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
                return 0;
-       return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
+
+       ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
+       __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
+       return ret;
 }
 
 static inline unsigned int io_put_kbuf(struct io_kiocb *req,
                                       unsigned issue_flags)
 {
+       unsigned int ret;
 
-       if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
+       if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
                return 0;
-       return __io_put_kbuf(req, issue_flags);
+
+       ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
+       if (req->flags & REQ_F_BUFFER_RING)
+               __io_put_kbuf_ring(req);
+       else
+               __io_put_kbuf(req, issue_flags);
+       return ret;
 }
 #endif