1 // SPDX-License-Identifier: GPL-2.0
5 #include <uapi/linux/io_uring.h>
7 struct io_buffer_list {
9 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
10 * then these are classic provided buffers and ->buf_list is used.
13 struct list_head buf_list;
15 struct page **buf_pages;
16 struct io_uring_buf_ring *buf_ring;
21 /* below is for ring provided buffers */
29 struct list_head list;
36 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
37 unsigned int issue_flags);
38 void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags);
39 void io_destroy_buffers(struct io_ring_ctx *ctx);
41 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
42 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
44 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
45 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
47 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
48 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
50 static inline bool io_do_buffer_select(struct io_kiocb *req)
52 if (!(req->flags & REQ_F_BUFFER_SELECT))
54 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
57 static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
59 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
62 * For legacy provided buffer mode, don't recycle if we already did
63 * IO to this buffer. For ring-mapped provided buffer mode, we should
64 * increment ring->head to explicitly monopolize the buffer to avoid
67 if ((req->flags & REQ_F_BUFFER_SELECTED) &&
68 (req->flags & REQ_F_PARTIAL_IO))
72 * READV uses fields in `struct io_rw` (len/addr) to stash the selected
73 * buffer data. However if that buffer is recycled the original request
74 * data stored in addr is lost. Therefore forbid recycling for now.
76 if (req->opcode == IORING_OP_READV)
79 __io_kbuf_recycle(req, issue_flags);
82 static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list)
84 if (req->flags & REQ_F_BUFFER_RING) {
86 req->buf_list->head++;
87 req->flags &= ~REQ_F_BUFFER_RING;
89 list_add(&req->kbuf->list, list);
90 req->flags &= ~REQ_F_BUFFER_SELECTED;
93 return IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
96 static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
98 lockdep_assert_held(&req->ctx->completion_lock);
100 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
102 return __io_put_kbuf(req, &req->ctx->io_buffers_comp);
105 static inline unsigned int io_put_kbuf(struct io_kiocb *req,
106 unsigned issue_flags)
110 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
114 * We can add this buffer back to two lists:
116 * 1) The io_buffers_cache list. This one is protected by the
117 * ctx->uring_lock. If we already hold this lock, add back to this
118 * list as we can grab it from issue as well.
119 * 2) The io_buffers_comp list. This one is protected by the
120 * ctx->completion_lock.
122 * We migrate buffers from the comp_list to the issue cache list
125 if (req->flags & REQ_F_BUFFER_RING) {
126 /* no buffers to recycle for this case */
127 cflags = __io_put_kbuf(req, NULL);
128 } else if (issue_flags & IO_URING_F_UNLOCKED) {
129 struct io_ring_ctx *ctx = req->ctx;
131 spin_lock(&ctx->completion_lock);
132 cflags = __io_put_kbuf(req, &ctx->io_buffers_comp);
133 spin_unlock(&ctx->completion_lock);
135 lockdep_assert_held(&req->ctx->uring_lock);
137 cflags = __io_put_kbuf(req, &req->ctx->io_buffers_cache);