Commit | Line | Data |
---|---|---|
3b77495a JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #ifndef IOU_KBUF_H | |
3 | #define IOU_KBUF_H | |
4 | ||
5 | #include <uapi/linux/io_uring.h> | |
6 | ||
7 | struct io_buffer_list { | |
8 | /* | |
9 | * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not, | |
10 | * then these are classic provided buffers and ->buf_list is used. | |
11 | */ | |
12 | union { | |
13 | struct list_head buf_list; | |
14 | struct { | |
15 | struct page **buf_pages; | |
16 | struct io_uring_buf_ring *buf_ring; | |
17 | }; | |
5cf4f52e | 18 | struct rcu_head rcu; |
3b77495a JA |
19 | }; |
20 | __u16 bgid; | |
21 | ||
22 | /* below is for ring provided buffers */ | |
23 | __u16 buf_nr_pages; | |
24 | __u16 nr_entries; | |
25 | __u16 head; | |
26 | __u16 mask; | |
25a2c188 | 27 | |
6b69c4ab JA |
28 | atomic_t refs; |
29 | ||
25a2c188 | 30 | /* ring mapped provided buffers */ |
9219e4a9 | 31 | __u8 is_buf_ring; |
c56e022c JA |
32 | /* ring mapped provided buffers, but mmap'ed by application */ |
33 | __u8 is_mmap; | |
3b77495a JA |
34 | }; |
35 | ||
36 | struct io_buffer { | |
37 | struct list_head list; | |
38 | __u64 addr; | |
39 | __u32 len; | |
40 | __u16 bid; | |
41 | __u16 bgid; | |
42 | }; | |
43 | ||
35c8711c JA |
44 | enum { |
45 | /* can alloc a bigger vec */ | |
46 | KBUF_MODE_EXPAND = 1, | |
47 | /* if bigger vec allocated, free old one */ | |
48 | KBUF_MODE_FREE = 2, | |
49 | }; | |
50 | ||
51 | struct buf_sel_arg { | |
52 | struct iovec *iovs; | |
53 | size_t out_len; | |
54 | size_t max_len; | |
55 | int nr_iovs; | |
56 | int mode; | |
57 | }; | |
58 | ||
3b77495a JA |
59 | void __user *io_buffer_select(struct io_kiocb *req, size_t *len, |
60 | unsigned int issue_flags); | |
35c8711c JA |
61 | int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg, |
62 | unsigned int issue_flags); | |
63 | int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg); | |
3b77495a JA |
64 | void io_destroy_buffers(struct io_ring_ctx *ctx); |
65 | ||
66 | int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); | |
67 | int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags); | |
68 | ||
69 | int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); | |
70 | int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags); | |
71 | ||
72 | int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); | |
73 | int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); | |
d293b1a8 | 74 | int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg); |
3b77495a | 75 | |
8435c6f3 | 76 | void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags); |
53ccf69b | 77 | |
89d528ba | 78 | bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); |
795bbbc8 | 79 | |
561e4f94 JA |
80 | void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl); |
81 | struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx, | |
82 | unsigned long bgid); | |
87585b05 | 83 | int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma); |
c56e022c | 84 | |
89d528ba | 85 | static inline bool io_kbuf_recycle_ring(struct io_kiocb *req) |
795bbbc8 HX |
86 | { |
87 | /* | |
88 | * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear | |
89 | * the flag and hence ensure that bl->head doesn't get incremented. | |
90 | * If the tail has already been incremented, hang on to it. | |
91 | * The exception is partial io, that case we should increment bl->head | |
92 | * to monopolize the buffer. | |
93 | */ | |
94 | if (req->buf_list) { | |
186daf23 | 95 | req->buf_index = req->buf_list->bgid; |
35c8711c | 96 | req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT); |
186daf23 | 97 | return true; |
795bbbc8 | 98 | } |
89d528ba | 99 | return false; |
795bbbc8 | 100 | } |
024b8fde | 101 | |
3b77495a JA |
102 | static inline bool io_do_buffer_select(struct io_kiocb *req) |
103 | { | |
104 | if (!(req->flags & REQ_F_BUFFER_SELECT)) | |
105 | return false; | |
106 | return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)); | |
107 | } | |
108 | ||
89d528ba | 109 | static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) |
3b77495a | 110 | { |
186daf23 JA |
111 | if (req->flags & REQ_F_BL_NO_RECYCLE) |
112 | return false; | |
024b8fde | 113 | if (req->flags & REQ_F_BUFFER_SELECTED) |
89d528ba | 114 | return io_kbuf_recycle_legacy(req, issue_flags); |
024b8fde | 115 | if (req->flags & REQ_F_BUFFER_RING) |
89d528ba DY |
116 | return io_kbuf_recycle_ring(req); |
117 | return false; | |
3b77495a JA |
118 | } |
119 | ||
35c8711c | 120 | static inline void __io_put_kbuf_ring(struct io_kiocb *req, int nr) |
3b77495a | 121 | { |
35c8711c JA |
122 | struct io_buffer_list *bl = req->buf_list; |
123 | ||
124 | if (bl) { | |
125 | if (req->flags & REQ_F_BUFFERS_COMMIT) { | |
126 | bl->head += nr; | |
127 | req->flags &= ~REQ_F_BUFFERS_COMMIT; | |
128 | } | |
129 | req->buf_index = bl->bgid; | |
8435c6f3 JA |
130 | } |
131 | req->flags &= ~REQ_F_BUFFER_RING; | |
132 | } | |
32f3c434 | 133 | |
8435c6f3 JA |
134 | static inline void __io_put_kbuf_list(struct io_kiocb *req, |
135 | struct list_head *list) | |
136 | { | |
3b77495a | 137 | if (req->flags & REQ_F_BUFFER_RING) { |
35c8711c | 138 | __io_put_kbuf_ring(req, 1); |
3b77495a | 139 | } else { |
32f3c434 | 140 | req->buf_index = req->kbuf->bgid; |
3b77495a JA |
141 | list_add(&req->kbuf->list, list); |
142 | req->flags &= ~REQ_F_BUFFER_SELECTED; | |
143 | } | |
3b77495a JA |
144 | } |
145 | ||
bbbef3e9 | 146 | static inline void io_kbuf_drop(struct io_kiocb *req) |
3b77495a JA |
147 | { |
148 | lockdep_assert_held(&req->ctx->completion_lock); | |
149 | ||
150 | if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) | |
bbbef3e9 | 151 | return; |
8435c6f3 | 152 | |
8435c6f3 | 153 | __io_put_kbuf_list(req, &req->ctx->io_buffers_comp); |
3b77495a JA |
154 | } |
155 | ||
35c8711c JA |
156 | static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int nbufs, |
157 | unsigned issue_flags) | |
3b77495a | 158 | { |
8435c6f3 | 159 | unsigned int ret; |
3b77495a | 160 | |
8435c6f3 | 161 | if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED))) |
3b77495a | 162 | return 0; |
8435c6f3 JA |
163 | |
164 | ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT); | |
165 | if (req->flags & REQ_F_BUFFER_RING) | |
35c8711c | 166 | __io_put_kbuf_ring(req, nbufs); |
8435c6f3 JA |
167 | else |
168 | __io_put_kbuf(req, issue_flags); | |
169 | return ret; | |
3b77495a | 170 | } |
35c8711c JA |
171 | |
172 | static inline unsigned int io_put_kbuf(struct io_kiocb *req, | |
173 | unsigned issue_flags) | |
174 | { | |
175 | return __io_put_kbufs(req, 1, issue_flags); | |
176 | } | |
177 | ||
178 | static inline unsigned int io_put_kbufs(struct io_kiocb *req, int nbufs, | |
179 | unsigned issue_flags) | |
180 | { | |
181 | return __io_put_kbufs(req, nbufs, issue_flags); | |
182 | } | |
3b77495a | 183 | #endif |