Commit | Line | Data |
---|---|---|
3b77495a JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #ifndef IOU_KBUF_H | |
3 | #define IOU_KBUF_H | |
4 | ||
5 | #include <uapi/linux/io_uring.h> | |
6 | ||
7 | struct io_buffer_list { | |
8 | /* | |
9 | * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not, | |
10 | * then these are classic provided buffers and ->buf_list is used. | |
11 | */ | |
12 | union { | |
13 | struct list_head buf_list; | |
14 | struct { | |
15 | struct page **buf_pages; | |
16 | struct io_uring_buf_ring *buf_ring; | |
17 | }; | |
5cf4f52e | 18 | struct rcu_head rcu; |
3b77495a JA |
19 | }; |
20 | __u16 bgid; | |
21 | ||
22 | /* below is for ring provided buffers */ | |
23 | __u16 buf_nr_pages; | |
24 | __u16 nr_entries; | |
25 | __u16 head; | |
26 | __u16 mask; | |
25a2c188 | 27 | |
6b69c4ab JA |
28 | atomic_t refs; |
29 | ||
25a2c188 | 30 | /* ring mapped provided buffers */ |
9219e4a9 | 31 | __u8 is_buf_ring; |
c56e022c JA |
32 | /* ring mapped provided buffers, but mmap'ed by application */ |
33 | __u8 is_mmap; | |
3b77495a JA |
34 | }; |
35 | ||
36 | struct io_buffer { | |
37 | struct list_head list; | |
38 | __u64 addr; | |
39 | __u32 len; | |
40 | __u16 bid; | |
41 | __u16 bgid; | |
42 | }; | |
43 | ||
44 | void __user *io_buffer_select(struct io_kiocb *req, size_t *len, | |
45 | unsigned int issue_flags); | |
3b77495a JA |
46 | void io_destroy_buffers(struct io_ring_ctx *ctx); |
47 | ||
48 | int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); | |
49 | int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags); | |
50 | ||
51 | int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); | |
52 | int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags); | |
53 | ||
54 | int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); | |
55 | int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); | |
d293b1a8 | 56 | int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg); |
3b77495a | 57 | |
8435c6f3 | 58 | void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags); |
53ccf69b | 59 | |
89d528ba | 60 | bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); |
795bbbc8 | 61 | |
561e4f94 JA |
62 | void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl); |
63 | struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx, | |
64 | unsigned long bgid); | |
87585b05 | 65 | int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma); |
c56e022c | 66 | |
89d528ba | 67 | static inline bool io_kbuf_recycle_ring(struct io_kiocb *req) |
795bbbc8 HX |
68 | { |
69 | /* | |
70 | * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear | |
71 | * the flag and hence ensure that bl->head doesn't get incremented. | |
72 | * If the tail has already been incremented, hang on to it. | |
73 | * The exception is partial io, that case we should increment bl->head | |
74 | * to monopolize the buffer. | |
75 | */ | |
76 | if (req->buf_list) { | |
186daf23 JA |
77 | req->buf_index = req->buf_list->bgid; |
78 | req->flags &= ~REQ_F_BUFFER_RING; | |
79 | return true; | |
795bbbc8 | 80 | } |
89d528ba | 81 | return false; |
795bbbc8 | 82 | } |
024b8fde | 83 | |
3b77495a JA |
84 | static inline bool io_do_buffer_select(struct io_kiocb *req) |
85 | { | |
86 | if (!(req->flags & REQ_F_BUFFER_SELECT)) | |
87 | return false; | |
88 | return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)); | |
89 | } | |
90 | ||
89d528ba | 91 | static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) |
3b77495a | 92 | { |
186daf23 JA |
93 | if (req->flags & REQ_F_BL_NO_RECYCLE) |
94 | return false; | |
024b8fde | 95 | if (req->flags & REQ_F_BUFFER_SELECTED) |
89d528ba | 96 | return io_kbuf_recycle_legacy(req, issue_flags); |
024b8fde | 97 | if (req->flags & REQ_F_BUFFER_RING) |
89d528ba DY |
98 | return io_kbuf_recycle_ring(req); |
99 | return false; | |
3b77495a JA |
100 | } |
101 | ||
8435c6f3 | 102 | static inline void __io_put_kbuf_ring(struct io_kiocb *req) |
3b77495a | 103 | { |
8435c6f3 JA |
104 | if (req->buf_list) { |
105 | req->buf_index = req->buf_list->bgid; | |
106 | req->buf_list->head++; | |
107 | } | |
108 | req->flags &= ~REQ_F_BUFFER_RING; | |
109 | } | |
32f3c434 | 110 | |
8435c6f3 JA |
111 | static inline void __io_put_kbuf_list(struct io_kiocb *req, |
112 | struct list_head *list) | |
113 | { | |
3b77495a | 114 | if (req->flags & REQ_F_BUFFER_RING) { |
8435c6f3 | 115 | __io_put_kbuf_ring(req); |
3b77495a | 116 | } else { |
32f3c434 | 117 | req->buf_index = req->kbuf->bgid; |
3b77495a JA |
118 | list_add(&req->kbuf->list, list); |
119 | req->flags &= ~REQ_F_BUFFER_SELECTED; | |
120 | } | |
3b77495a JA |
121 | } |
122 | ||
bbbef3e9 | 123 | static inline void io_kbuf_drop(struct io_kiocb *req) |
3b77495a JA |
124 | { |
125 | lockdep_assert_held(&req->ctx->completion_lock); | |
126 | ||
127 | if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) | |
bbbef3e9 | 128 | return; |
8435c6f3 | 129 | |
8435c6f3 | 130 | __io_put_kbuf_list(req, &req->ctx->io_buffers_comp); |
3b77495a JA |
131 | } |
132 | ||
133 | static inline unsigned int io_put_kbuf(struct io_kiocb *req, | |
134 | unsigned issue_flags) | |
135 | { | |
8435c6f3 | 136 | unsigned int ret; |
3b77495a | 137 | |
8435c6f3 | 138 | if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED))) |
3b77495a | 139 | return 0; |
8435c6f3 JA |
140 | |
141 | ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT); | |
142 | if (req->flags & REQ_F_BUFFER_RING) | |
143 | __io_put_kbuf_ring(req); | |
144 | else | |
145 | __io_put_kbuf(req, issue_flags); | |
146 | return ret; | |
3b77495a JA |
147 | } |
148 | #endif |