Commit | Line | Data |
---|---|---|
3b77495a JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #ifndef IOU_KBUF_H | |
3 | #define IOU_KBUF_H | |
4 | ||
5 | #include <uapi/linux/io_uring.h> | |
6 | ||
7 | struct io_buffer_list { | |
8 | /* | |
9 | * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not, | |
10 | * then these are classic provided buffers and ->buf_list is used. | |
11 | */ | |
12 | union { | |
13 | struct list_head buf_list; | |
14 | struct { | |
15 | struct page **buf_pages; | |
16 | struct io_uring_buf_ring *buf_ring; | |
17 | }; | |
5cf4f52e | 18 | struct rcu_head rcu; |
3b77495a JA |
19 | }; |
20 | __u16 bgid; | |
21 | ||
22 | /* below is for ring provided buffers */ | |
23 | __u16 buf_nr_pages; | |
24 | __u16 nr_entries; | |
25 | __u16 head; | |
26 | __u16 mask; | |
25a2c188 JA |
27 | |
28 | /* ring mapped provided buffers */ | |
29 | __u8 is_mapped; | |
c56e022c JA |
30 | /* ring mapped provided buffers, but mmap'ed by application */ |
31 | __u8 is_mmap; | |
5cf4f52e JA |
32 | /* bl is visible from an RCU point of view for lookup */ |
33 | __u8 is_ready; | |
3b77495a JA |
34 | }; |
35 | ||
36 | struct io_buffer { | |
37 | struct list_head list; | |
38 | __u64 addr; | |
39 | __u32 len; | |
40 | __u16 bid; | |
41 | __u16 bgid; | |
42 | }; | |
43 | ||
44 | void __user *io_buffer_select(struct io_kiocb *req, size_t *len, | |
45 | unsigned int issue_flags); | |
3b77495a JA |
46 | void io_destroy_buffers(struct io_ring_ctx *ctx); |
47 | ||
48 | int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); | |
49 | int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags); | |
50 | ||
51 | int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); | |
52 | int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags); | |
53 | ||
54 | int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); | |
55 | int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); | |
d293b1a8 | 56 | int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg); |
3b77495a | 57 | |
c392cbec JA |
58 | void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx); |
59 | ||
8435c6f3 | 60 | void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags); |
53ccf69b | 61 | |
89d528ba | 62 | bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); |
795bbbc8 | 63 | |
c56e022c JA |
64 | void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid); |
65 | ||
89d528ba | 66 | static inline bool io_kbuf_recycle_ring(struct io_kiocb *req) |
795bbbc8 HX |
67 | { |
68 | /* | |
69 | * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear | |
70 | * the flag and hence ensure that bl->head doesn't get incremented. | |
71 | * If the tail has already been incremented, hang on to it. | |
72 | * The exception is partial io, that case we should increment bl->head | |
73 | * to monopolize the buffer. | |
74 | */ | |
75 | if (req->buf_list) { | |
186daf23 JA |
76 | req->buf_index = req->buf_list->bgid; |
77 | req->flags &= ~REQ_F_BUFFER_RING; | |
78 | return true; | |
795bbbc8 | 79 | } |
89d528ba | 80 | return false; |
795bbbc8 | 81 | } |
024b8fde | 82 | |
3b77495a JA |
83 | static inline bool io_do_buffer_select(struct io_kiocb *req) |
84 | { | |
85 | if (!(req->flags & REQ_F_BUFFER_SELECT)) | |
86 | return false; | |
87 | return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)); | |
88 | } | |
89 | ||
89d528ba | 90 | static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) |
3b77495a | 91 | { |
186daf23 JA |
92 | if (req->flags & REQ_F_BL_NO_RECYCLE) |
93 | return false; | |
024b8fde | 94 | if (req->flags & REQ_F_BUFFER_SELECTED) |
89d528ba | 95 | return io_kbuf_recycle_legacy(req, issue_flags); |
024b8fde | 96 | if (req->flags & REQ_F_BUFFER_RING) |
89d528ba DY |
97 | return io_kbuf_recycle_ring(req); |
98 | return false; | |
3b77495a JA |
99 | } |
100 | ||
8435c6f3 | 101 | static inline void __io_put_kbuf_ring(struct io_kiocb *req) |
3b77495a | 102 | { |
8435c6f3 JA |
103 | if (req->buf_list) { |
104 | req->buf_index = req->buf_list->bgid; | |
105 | req->buf_list->head++; | |
106 | } | |
107 | req->flags &= ~REQ_F_BUFFER_RING; | |
108 | } | |
32f3c434 | 109 | |
8435c6f3 JA |
110 | static inline void __io_put_kbuf_list(struct io_kiocb *req, |
111 | struct list_head *list) | |
112 | { | |
3b77495a | 113 | if (req->flags & REQ_F_BUFFER_RING) { |
8435c6f3 | 114 | __io_put_kbuf_ring(req); |
3b77495a | 115 | } else { |
32f3c434 | 116 | req->buf_index = req->kbuf->bgid; |
3b77495a JA |
117 | list_add(&req->kbuf->list, list); |
118 | req->flags &= ~REQ_F_BUFFER_SELECTED; | |
119 | } | |
3b77495a JA |
120 | } |
121 | ||
122 | static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req) | |
123 | { | |
8435c6f3 JA |
124 | unsigned int ret; |
125 | ||
3b77495a JA |
126 | lockdep_assert_held(&req->ctx->completion_lock); |
127 | ||
128 | if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) | |
129 | return 0; | |
8435c6f3 JA |
130 | |
131 | ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT); | |
132 | __io_put_kbuf_list(req, &req->ctx->io_buffers_comp); | |
133 | return ret; | |
3b77495a JA |
134 | } |
135 | ||
136 | static inline unsigned int io_put_kbuf(struct io_kiocb *req, | |
137 | unsigned issue_flags) | |
138 | { | |
8435c6f3 | 139 | unsigned int ret; |
3b77495a | 140 | |
8435c6f3 | 141 | if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED))) |
3b77495a | 142 | return 0; |
8435c6f3 JA |
143 | |
144 | ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT); | |
145 | if (req->flags & REQ_F_BUFFER_RING) | |
146 | __io_put_kbuf_ring(req); | |
147 | else | |
148 | __io_put_kbuf(req, issue_flags); | |
149 | return ret; | |
3b77495a JA |
150 | } |
151 | #endif |