1 // SPDX-License-Identifier: GPL-2.0
5 #include <uapi/linux/io_uring.h>
7 struct io_buffer_list {
9 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
10 * then these are classic provided buffers and ->buf_list is used.
13 struct list_head buf_list;
15 struct page **buf_pages;
16 struct io_uring_buf_ring *buf_ring;
22 /* below is for ring provided buffers */
28 /* ring mapped provided buffers */
30 /* ring mapped provided buffers, but mmap'ed by application */
32 /* bl is visible from an RCU point of view for lookup */
37 struct list_head list;
44 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
45 unsigned int issue_flags);
46 void io_destroy_buffers(struct io_ring_ctx *ctx);
48 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
49 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
51 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
52 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
54 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
55 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
56 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
58 void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
60 void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
62 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
64 void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid);
66 static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
69 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
70 * the flag and hence ensure that bl->head doesn't get incremented.
71 * If the tail has already been incremented, hang on to it.
72 * The exception is partial io, that case we should increment bl->head
73 * to monopolize the buffer.
76 req->buf_index = req->buf_list->bgid;
77 req->flags &= ~REQ_F_BUFFER_RING;
83 static inline bool io_do_buffer_select(struct io_kiocb *req)
85 if (!(req->flags & REQ_F_BUFFER_SELECT))
87 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
90 static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
92 if (req->flags & REQ_F_BL_NO_RECYCLE)
94 if (req->flags & REQ_F_BUFFER_SELECTED)
95 return io_kbuf_recycle_legacy(req, issue_flags);
96 if (req->flags & REQ_F_BUFFER_RING)
97 return io_kbuf_recycle_ring(req);
101 static inline void __io_put_kbuf_ring(struct io_kiocb *req)
104 req->buf_index = req->buf_list->bgid;
105 req->buf_list->head++;
107 req->flags &= ~REQ_F_BUFFER_RING;
110 static inline void __io_put_kbuf_list(struct io_kiocb *req,
111 struct list_head *list)
113 if (req->flags & REQ_F_BUFFER_RING) {
114 __io_put_kbuf_ring(req);
116 req->buf_index = req->kbuf->bgid;
117 list_add(&req->kbuf->list, list);
118 req->flags &= ~REQ_F_BUFFER_SELECTED;
122 static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
126 lockdep_assert_held(&req->ctx->completion_lock);
128 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
131 ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
132 __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
136 static inline unsigned int io_put_kbuf(struct io_kiocb *req,
137 unsigned issue_flags)
141 if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
144 ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
145 if (req->flags & REQ_F_BUFFER_RING)
146 __io_put_kbuf_ring(req);
148 __io_put_kbuf(req, issue_flags);