Commit | Line | Data |
---|---|---|
3b77495a JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #ifndef IOU_KBUF_H | |
3 | #define IOU_KBUF_H | |
4 | ||
5 | #include <uapi/linux/io_uring.h> | |
6 | ||
7 | struct io_buffer_list { | |
8 | /* | |
9 | * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not, | |
10 | * then these are classic provided buffers and ->buf_list is used. | |
11 | */ | |
12 | union { | |
13 | struct list_head buf_list; | |
14 | struct { | |
15 | struct page **buf_pages; | |
16 | struct io_uring_buf_ring *buf_ring; | |
17 | }; | |
18 | }; | |
19 | __u16 bgid; | |
20 | ||
21 | /* below is for ring provided buffers */ | |
22 | __u16 buf_nr_pages; | |
23 | __u16 nr_entries; | |
24 | __u16 head; | |
25 | __u16 mask; | |
26 | }; | |
27 | ||
28 | struct io_buffer { | |
29 | struct list_head list; | |
30 | __u64 addr; | |
31 | __u32 len; | |
32 | __u16 bid; | |
33 | __u16 bgid; | |
34 | }; | |
35 | ||
36 | void __user *io_buffer_select(struct io_kiocb *req, size_t *len, | |
37 | unsigned int issue_flags); | |
38 | void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags); | |
39 | void io_destroy_buffers(struct io_ring_ctx *ctx); | |
40 | ||
41 | int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); | |
42 | int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags); | |
43 | ||
44 | int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); | |
45 | int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags); | |
46 | ||
47 | int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); | |
48 | int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); | |
49 | ||
50 | static inline bool io_do_buffer_select(struct io_kiocb *req) | |
51 | { | |
52 | if (!(req->flags & REQ_F_BUFFER_SELECT)) | |
53 | return false; | |
54 | return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)); | |
55 | } | |
56 | ||
57 | static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) | |
58 | { | |
59 | if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) | |
60 | return; | |
61 | /* | |
62 | * For legacy provided buffer mode, don't recycle if we already did | |
63 | * IO to this buffer. For ring-mapped provided buffer mode, we should | |
64 | * increment ring->head to explicitly monopolize the buffer to avoid | |
65 | * multiple use. | |
66 | */ | |
67 | if ((req->flags & REQ_F_BUFFER_SELECTED) && | |
68 | (req->flags & REQ_F_PARTIAL_IO)) | |
69 | return; | |
70 | ||
71 | /* | |
72 | * READV uses fields in `struct io_rw` (len/addr) to stash the selected | |
73 | * buffer data. However if that buffer is recycled the original request | |
74 | * data stored in addr is lost. Therefore forbid recycling for now. | |
75 | */ | |
76 | if (req->opcode == IORING_OP_READV) | |
77 | return; | |
78 | ||
79 | __io_kbuf_recycle(req, issue_flags); | |
80 | } | |
81 | ||
82 | static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list) | |
83 | { | |
84 | if (req->flags & REQ_F_BUFFER_RING) { | |
85 | if (req->buf_list) | |
86 | req->buf_list->head++; | |
87 | req->flags &= ~REQ_F_BUFFER_RING; | |
88 | } else { | |
89 | list_add(&req->kbuf->list, list); | |
90 | req->flags &= ~REQ_F_BUFFER_SELECTED; | |
91 | } | |
92 | ||
93 | return IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT); | |
94 | } | |
95 | ||
96 | static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req) | |
97 | { | |
98 | lockdep_assert_held(&req->ctx->completion_lock); | |
99 | ||
100 | if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) | |
101 | return 0; | |
102 | return __io_put_kbuf(req, &req->ctx->io_buffers_comp); | |
103 | } | |
104 | ||
105 | static inline unsigned int io_put_kbuf(struct io_kiocb *req, | |
106 | unsigned issue_flags) | |
107 | { | |
108 | unsigned int cflags; | |
109 | ||
110 | if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) | |
111 | return 0; | |
112 | ||
113 | /* | |
114 | * We can add this buffer back to two lists: | |
115 | * | |
116 | * 1) The io_buffers_cache list. This one is protected by the | |
117 | * ctx->uring_lock. If we already hold this lock, add back to this | |
118 | * list as we can grab it from issue as well. | |
119 | * 2) The io_buffers_comp list. This one is protected by the | |
120 | * ctx->completion_lock. | |
121 | * | |
122 | * We migrate buffers from the comp_list to the issue cache list | |
123 | * when we need one. | |
124 | */ | |
125 | if (req->flags & REQ_F_BUFFER_RING) { | |
126 | /* no buffers to recycle for this case */ | |
127 | cflags = __io_put_kbuf(req, NULL); | |
128 | } else if (issue_flags & IO_URING_F_UNLOCKED) { | |
129 | struct io_ring_ctx *ctx = req->ctx; | |
130 | ||
131 | spin_lock(&ctx->completion_lock); | |
132 | cflags = __io_put_kbuf(req, &ctx->io_buffers_comp); | |
133 | spin_unlock(&ctx->completion_lock); | |
134 | } else { | |
135 | lockdep_assert_held(&req->ctx->uring_lock); | |
136 | ||
137 | cflags = __io_put_kbuf(req, &req->ctx->io_buffers_cache); | |
138 | } | |
139 | ||
140 | return cflags; | |
141 | } | |
142 | #endif |