Commit | Line | Data |
---|---|---|
c9f06aa7 JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/kernel.h> | |
3 | #include <linux/errno.h> | |
4 | #include <linux/file.h> | |
5 | #include <linux/mm.h> | |
6 | #include <linux/slab.h> | |
7 | #include <linux/nospec.h> | |
8 | #include <linux/io_uring.h> | |
9 | ||
10 | #include <uapi/linux/io_uring.h> | |
11 | ||
c9f06aa7 JA |
12 | #include "io_uring.h" |
13 | #include "tctx.h" | |
14 | ||
15 | static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx, | |
16 | struct task_struct *task) | |
17 | { | |
18 | struct io_wq_hash *hash; | |
19 | struct io_wq_data data; | |
20 | unsigned int concurrency; | |
21 | ||
22 | mutex_lock(&ctx->uring_lock); | |
23 | hash = ctx->hash_map; | |
24 | if (!hash) { | |
25 | hash = kzalloc(sizeof(*hash), GFP_KERNEL); | |
26 | if (!hash) { | |
27 | mutex_unlock(&ctx->uring_lock); | |
28 | return ERR_PTR(-ENOMEM); | |
29 | } | |
30 | refcount_set(&hash->refs, 1); | |
31 | init_waitqueue_head(&hash->wait); | |
32 | ctx->hash_map = hash; | |
33 | } | |
34 | mutex_unlock(&ctx->uring_lock); | |
35 | ||
36 | data.hash = hash; | |
37 | data.task = task; | |
38 | data.free_work = io_wq_free_work; | |
39 | data.do_work = io_wq_submit_work; | |
40 | ||
41 | /* Do QD, or 4 * CPUS, whatever is smallest */ | |
42 | concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); | |
43 | ||
44 | return io_wq_create(concurrency, &data); | |
45 | } | |
46 | ||
47 | void __io_uring_free(struct task_struct *tsk) | |
48 | { | |
49 | struct io_uring_task *tctx = tsk->io_uring; | |
50 | ||
51 | WARN_ON_ONCE(!xa_empty(&tctx->xa)); | |
52 | WARN_ON_ONCE(tctx->io_wq); | |
53 | WARN_ON_ONCE(tctx->cached_refs); | |
54 | ||
c9f06aa7 JA |
55 | percpu_counter_destroy(&tctx->inflight); |
56 | kfree(tctx); | |
57 | tsk->io_uring = NULL; | |
58 | } | |
59 | ||
60 | __cold int io_uring_alloc_task_context(struct task_struct *task, | |
61 | struct io_ring_ctx *ctx) | |
62 | { | |
63 | struct io_uring_task *tctx; | |
64 | int ret; | |
65 | ||
66 | tctx = kzalloc(sizeof(*tctx), GFP_KERNEL); | |
67 | if (unlikely(!tctx)) | |
68 | return -ENOMEM; | |
69 | ||
c9f06aa7 JA |
70 | ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL); |
71 | if (unlikely(ret)) { | |
c9f06aa7 JA |
72 | kfree(tctx); |
73 | return ret; | |
74 | } | |
75 | ||
76 | tctx->io_wq = io_init_wq_offload(ctx, task); | |
77 | if (IS_ERR(tctx->io_wq)) { | |
78 | ret = PTR_ERR(tctx->io_wq); | |
79 | percpu_counter_destroy(&tctx->inflight); | |
c9f06aa7 JA |
80 | kfree(tctx); |
81 | return ret; | |
82 | } | |
83 | ||
84 | xa_init(&tctx->xa); | |
85 | init_waitqueue_head(&tctx->wait); | |
8d664282 | 86 | atomic_set(&tctx->in_cancel, 0); |
c9f06aa7 JA |
87 | atomic_set(&tctx->inflight_tracked, 0); |
88 | task->io_uring = tctx; | |
f88262e6 | 89 | init_llist_head(&tctx->task_list); |
c9f06aa7 JA |
90 | init_task_work(&tctx->task_work, tctx_task_work); |
91 | return 0; | |
92 | } | |
93 | ||
97c96e9f | 94 | int __io_uring_add_tctx_node(struct io_ring_ctx *ctx) |
c9f06aa7 JA |
95 | { |
96 | struct io_uring_task *tctx = current->io_uring; | |
97 | struct io_tctx_node *node; | |
98 | int ret; | |
99 | ||
100 | if (unlikely(!tctx)) { | |
101 | ret = io_uring_alloc_task_context(current, ctx); | |
102 | if (unlikely(ret)) | |
103 | return ret; | |
104 | ||
105 | tctx = current->io_uring; | |
106 | if (ctx->iowq_limits_set) { | |
107 | unsigned int limits[2] = { ctx->iowq_limits[0], | |
108 | ctx->iowq_limits[1], }; | |
109 | ||
110 | ret = io_wq_max_workers(tctx->io_wq, limits); | |
111 | if (ret) | |
112 | return ret; | |
113 | } | |
114 | } | |
115 | if (!xa_load(&tctx->xa, (unsigned long)ctx)) { | |
116 | node = kmalloc(sizeof(*node), GFP_KERNEL); | |
117 | if (!node) | |
118 | return -ENOMEM; | |
119 | node->ctx = ctx; | |
120 | node->task = current; | |
121 | ||
122 | ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx, | |
123 | node, GFP_KERNEL)); | |
124 | if (ret) { | |
125 | kfree(node); | |
126 | return ret; | |
127 | } | |
128 | ||
129 | mutex_lock(&ctx->uring_lock); | |
130 | list_add(&node->ctx_node, &ctx->tctx_list); | |
131 | mutex_unlock(&ctx->uring_lock); | |
132 | } | |
97c96e9f DY |
133 | return 0; |
134 | } | |
135 | ||
136 | int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx) | |
137 | { | |
138 | int ret; | |
139 | ||
4add705e DY |
140 | if (ctx->flags & IORING_SETUP_SINGLE_ISSUER |
141 | && ctx->submitter_task != current) | |
142 | return -EEXIST; | |
97c96e9f DY |
143 | |
144 | ret = __io_uring_add_tctx_node(ctx); | |
145 | if (ret) | |
146 | return ret; | |
147 | ||
148 | current->io_uring->last = ctx; | |
c9f06aa7 JA |
149 | return 0; |
150 | } | |
151 | ||
152 | /* | |
153 | * Remove this io_uring_file -> task mapping. | |
154 | */ | |
155 | __cold void io_uring_del_tctx_node(unsigned long index) | |
156 | { | |
157 | struct io_uring_task *tctx = current->io_uring; | |
158 | struct io_tctx_node *node; | |
159 | ||
160 | if (!tctx) | |
161 | return; | |
162 | node = xa_erase(&tctx->xa, index); | |
163 | if (!node) | |
164 | return; | |
165 | ||
166 | WARN_ON_ONCE(current != node->task); | |
167 | WARN_ON_ONCE(list_empty(&node->ctx_node)); | |
168 | ||
169 | mutex_lock(&node->ctx->uring_lock); | |
170 | list_del(&node->ctx_node); | |
171 | mutex_unlock(&node->ctx->uring_lock); | |
172 | ||
173 | if (tctx->last == node->ctx) | |
174 | tctx->last = NULL; | |
175 | kfree(node); | |
176 | } | |
177 | ||
178 | __cold void io_uring_clean_tctx(struct io_uring_task *tctx) | |
179 | { | |
180 | struct io_wq *wq = tctx->io_wq; | |
181 | struct io_tctx_node *node; | |
182 | unsigned long index; | |
183 | ||
184 | xa_for_each(&tctx->xa, index, node) { | |
185 | io_uring_del_tctx_node(index); | |
186 | cond_resched(); | |
187 | } | |
188 | if (wq) { | |
189 | /* | |
190 | * Must be after io_uring_del_tctx_node() (removes nodes under | |
191 | * uring_lock) to avoid race with io_uring_try_cancel_iowq(). | |
192 | */ | |
193 | io_wq_put_and_exit(wq); | |
194 | tctx->io_wq = NULL; | |
195 | } | |
196 | } | |
197 | ||
198 | void io_uring_unreg_ringfd(void) | |
199 | { | |
200 | struct io_uring_task *tctx = current->io_uring; | |
201 | int i; | |
202 | ||
203 | for (i = 0; i < IO_RINGFD_REG_MAX; i++) { | |
204 | if (tctx->registered_rings[i]) { | |
205 | fput(tctx->registered_rings[i]); | |
206 | tctx->registered_rings[i] = NULL; | |
207 | } | |
208 | } | |
209 | } | |
210 | ||
6e76ac59 | 211 | int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file, |
c9f06aa7 JA |
212 | int start, int end) |
213 | { | |
c9f06aa7 | 214 | int offset; |
c9f06aa7 JA |
215 | for (offset = start; offset < end; offset++) { |
216 | offset = array_index_nospec(offset, IO_RINGFD_REG_MAX); | |
217 | if (tctx->registered_rings[offset]) | |
218 | continue; | |
219 | ||
c9f06aa7 JA |
220 | tctx->registered_rings[offset] = file; |
221 | return offset; | |
222 | } | |
c9f06aa7 JA |
223 | return -EBUSY; |
224 | } | |
225 | ||
6e76ac59 JT |
226 | static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd, |
227 | int start, int end) | |
228 | { | |
229 | struct file *file; | |
230 | int offset; | |
231 | ||
232 | file = fget(fd); | |
233 | if (!file) { | |
234 | return -EBADF; | |
235 | } else if (!io_is_uring_fops(file)) { | |
236 | fput(file); | |
237 | return -EOPNOTSUPP; | |
238 | } | |
239 | offset = io_ring_add_registered_file(tctx, file, start, end); | |
240 | if (offset < 0) | |
241 | fput(file); | |
242 | return offset; | |
243 | } | |
244 | ||
c9f06aa7 JA |
245 | /* |
246 | * Register a ring fd to avoid fdget/fdput for each io_uring_enter() | |
247 | * invocation. User passes in an array of struct io_uring_rsrc_update | |
248 | * with ->data set to the ring_fd, and ->offset given for the desired | |
249 | * index. If no index is desired, application may set ->offset == -1U | |
250 | * and we'll find an available index. Returns number of entries | |
251 | * successfully processed, or < 0 on error if none were processed. | |
252 | */ | |
253 | int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg, | |
254 | unsigned nr_args) | |
255 | { | |
256 | struct io_uring_rsrc_update __user *arg = __arg; | |
257 | struct io_uring_rsrc_update reg; | |
258 | struct io_uring_task *tctx; | |
259 | int ret, i; | |
260 | ||
261 | if (!nr_args || nr_args > IO_RINGFD_REG_MAX) | |
262 | return -EINVAL; | |
263 | ||
264 | mutex_unlock(&ctx->uring_lock); | |
97c96e9f | 265 | ret = __io_uring_add_tctx_node(ctx); |
c9f06aa7 JA |
266 | mutex_lock(&ctx->uring_lock); |
267 | if (ret) | |
268 | return ret; | |
269 | ||
270 | tctx = current->io_uring; | |
271 | for (i = 0; i < nr_args; i++) { | |
272 | int start, end; | |
273 | ||
274 | if (copy_from_user(®, &arg[i], sizeof(reg))) { | |
275 | ret = -EFAULT; | |
276 | break; | |
277 | } | |
278 | ||
279 | if (reg.resv) { | |
280 | ret = -EINVAL; | |
281 | break; | |
282 | } | |
283 | ||
284 | if (reg.offset == -1U) { | |
285 | start = 0; | |
286 | end = IO_RINGFD_REG_MAX; | |
287 | } else { | |
288 | if (reg.offset >= IO_RINGFD_REG_MAX) { | |
289 | ret = -EINVAL; | |
290 | break; | |
291 | } | |
292 | start = reg.offset; | |
293 | end = start + 1; | |
294 | } | |
295 | ||
296 | ret = io_ring_add_registered_fd(tctx, reg.data, start, end); | |
297 | if (ret < 0) | |
298 | break; | |
299 | ||
300 | reg.offset = ret; | |
301 | if (copy_to_user(&arg[i], ®, sizeof(reg))) { | |
302 | fput(tctx->registered_rings[reg.offset]); | |
303 | tctx->registered_rings[reg.offset] = NULL; | |
304 | ret = -EFAULT; | |
305 | break; | |
306 | } | |
307 | } | |
308 | ||
309 | return i ? i : ret; | |
310 | } | |
311 | ||
312 | int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg, | |
313 | unsigned nr_args) | |
314 | { | |
315 | struct io_uring_rsrc_update __user *arg = __arg; | |
316 | struct io_uring_task *tctx = current->io_uring; | |
317 | struct io_uring_rsrc_update reg; | |
318 | int ret = 0, i; | |
319 | ||
320 | if (!nr_args || nr_args > IO_RINGFD_REG_MAX) | |
321 | return -EINVAL; | |
322 | if (!tctx) | |
323 | return 0; | |
324 | ||
325 | for (i = 0; i < nr_args; i++) { | |
326 | if (copy_from_user(®, &arg[i], sizeof(reg))) { | |
327 | ret = -EFAULT; | |
328 | break; | |
329 | } | |
330 | if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) { | |
331 | ret = -EINVAL; | |
332 | break; | |
333 | } | |
334 | ||
335 | reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX); | |
336 | if (tctx->registered_rings[reg.offset]) { | |
337 | fput(tctx->registered_rings[reg.offset]); | |
338 | tctx->registered_rings[reg.offset] = NULL; | |
339 | } | |
340 | } | |
341 | ||
342 | return i ? i : ret; | |
343 | } |