io_uring/rsrc: refactor io_rsrc_ref_quiesce
[linux-block.git] / io_uring / rsrc.c
CommitLineData
73572984
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/nospec.h>
9#include <linux/hugetlb.h>
10#include <linux/compat.h>
11#include <linux/io_uring.h>
12
13#include <uapi/linux/io_uring.h>
14
73572984
JA
15#include "io_uring.h"
16#include "openclose.h"
17#include "rsrc.h"
18
19struct io_rsrc_update {
20 struct file *file;
21 u64 arg;
22 u32 nr_args;
23 u32 offset;
24};
25
26static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
27 struct io_mapped_ubuf **pimu,
28 struct page **last_hpage);
29
73572984
JA
30/* only define max */
31#define IORING_MAX_FIXED_FILES (1U << 20)
32#define IORING_MAX_REG_BUFFERS (1U << 14)
33
757ef468
PB
34static inline bool io_put_rsrc_data_ref(struct io_rsrc_data *rsrc_data)
35{
36 return !--rsrc_data->refs;
37}
38
6a9ce66f 39int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
73572984
JA
40{
41 unsigned long page_limit, cur_pages, new_pages;
42
6a9ce66f
PB
43 if (!nr_pages)
44 return 0;
45
73572984
JA
46 /* Don't allow more pages than we can safely lock */
47 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
48
4ccc6db0 49 cur_pages = atomic_long_read(&user->locked_vm);
73572984 50 do {
73572984
JA
51 new_pages = cur_pages + nr_pages;
52 if (new_pages > page_limit)
53 return -ENOMEM;
4ccc6db0
UB
54 } while (!atomic_long_try_cmpxchg(&user->locked_vm,
55 &cur_pages, new_pages));
73572984
JA
56 return 0;
57}
58
59static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
60{
61 if (ctx->user)
62 __io_unaccount_mem(ctx->user, nr_pages);
63
64 if (ctx->mm_account)
65 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
66}
67
68static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
69{
70 int ret;
71
72 if (ctx->user) {
73 ret = __io_account_mem(ctx->user, nr_pages);
74 if (ret)
75 return ret;
76 }
77
78 if (ctx->mm_account)
79 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
80
81 return 0;
82}
83
84static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
85 void __user *arg, unsigned index)
86{
87 struct iovec __user *src;
88
89#ifdef CONFIG_COMPAT
90 if (ctx->compat) {
91 struct compat_iovec __user *ciovs;
92 struct compat_iovec ciov;
93
94 ciovs = (struct compat_iovec __user *) arg;
95 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
96 return -EFAULT;
97
98 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
99 dst->iov_len = ciov.iov_len;
100 return 0;
101 }
102#endif
103 src = (struct iovec __user *) arg;
104 if (copy_from_user(dst, &src[index], sizeof(*dst)))
105 return -EFAULT;
106 return 0;
107}
108
109static int io_buffer_validate(struct iovec *iov)
110{
111 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
112
113 /*
114 * Don't impose further limits on the size and buffer
115 * constraints here, we'll -EINVAL later when IO is
116 * submitted if they are wrong.
117 */
118 if (!iov->iov_base)
119 return iov->iov_len ? -EFAULT : 0;
120 if (!iov->iov_len)
121 return -EFAULT;
122
123 /* arbitrary limit, but we need something */
124 if (iov->iov_len > SZ_1G)
125 return -EFAULT;
126
127 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
128 return -EOVERFLOW;
129
130 return 0;
131}
132
133static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
134{
135 struct io_mapped_ubuf *imu = *slot;
136 unsigned int i;
137
138 if (imu != ctx->dummy_ubuf) {
139 for (i = 0; i < imu->nr_bvecs; i++)
140 unpin_user_page(imu->bvec[i].bv_page);
141 if (imu->acct_pages)
142 io_unaccount_mem(ctx, imu->acct_pages);
143 kvfree(imu);
144 }
145 *slot = NULL;
146}
147
ff7c75ec
PB
148static void io_rsrc_put_work_one(struct io_rsrc_data *rsrc_data,
149 struct io_rsrc_put *prsrc)
150{
151 struct io_ring_ctx *ctx = rsrc_data->ctx;
152
36b9818a
PB
153 if (prsrc->tag)
154 io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
ff7c75ec
PB
155 rsrc_data->do_put(ctx, prsrc);
156}
157
73572984
JA
158static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
159{
160 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
73572984
JA
161 struct io_rsrc_put *prsrc, *tmp;
162
ff7c75ec
PB
163 if (ref_node->inline_items)
164 io_rsrc_put_work_one(rsrc_data, &ref_node->item);
165
c824986c 166 list_for_each_entry_safe(prsrc, tmp, &ref_node->item_list, list) {
73572984 167 list_del(&prsrc->list);
ff7c75ec 168 io_rsrc_put_work_one(rsrc_data, prsrc);
73572984
JA
169 kfree(prsrc);
170 }
171
9eae8655 172 io_rsrc_node_destroy(rsrc_data->ctx, ref_node);
757ef468 173 if (io_put_rsrc_data_ref(rsrc_data))
73572984
JA
174 complete(&rsrc_data->done);
175}
176
73572984
JA
177void io_wait_rsrc_data(struct io_rsrc_data *data)
178{
757ef468 179 if (data && !io_put_rsrc_data_ref(data))
73572984
JA
180 wait_for_completion(&data->done);
181}
182
9eae8655 183void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
73572984 184{
9eae8655
PB
185 if (!io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache))
186 kfree(node);
73572984
JA
187}
188
ef8ae64f
PB
189void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
190 __must_hold(&node->rsrc_data->ctx->uring_lock)
73572984 191{
73572984 192 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
73572984 193
73572984
JA
194 while (!list_empty(&ctx->rsrc_ref_list)) {
195 node = list_first_entry(&ctx->rsrc_ref_list,
196 struct io_rsrc_node, node);
197 /* recycle ref nodes in order */
c732ea24 198 if (node->refs)
73572984 199 break;
36b9818a
PB
200 list_del(&node->node);
201 __io_rsrc_put_work(node);
d34b1b0b 202 }
73572984
JA
203}
204
2933ae6e 205struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
73572984
JA
206{
207 struct io_rsrc_node *ref_node;
9eae8655
PB
208 struct io_cache_entry *entry;
209
210 entry = io_alloc_cache_get(&ctx->rsrc_node_cache);
211 if (entry) {
212 ref_node = container_of(entry, struct io_rsrc_node, cache);
213 } else {
214 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
215 if (!ref_node)
216 return NULL;
217 }
73572984 218
13c22396 219 ref_node->rsrc_data = NULL;
ef8ae64f 220 ref_node->refs = 1;
73572984 221 INIT_LIST_HEAD(&ref_node->node);
c824986c 222 INIT_LIST_HEAD(&ref_node->item_list);
ff7c75ec 223 ref_node->inline_items = 0;
73572984
JA
224 return ref_node;
225}
226
227void io_rsrc_node_switch(struct io_ring_ctx *ctx,
228 struct io_rsrc_data *data_to_kill)
229 __must_hold(&ctx->uring_lock)
230{
2933ae6e
PB
231 struct io_rsrc_node *node = ctx->rsrc_node;
232 struct io_rsrc_node *backup = io_rsrc_node_alloc(ctx);
73572984 233
2933ae6e
PB
234 if (WARN_ON_ONCE(!backup))
235 return;
73572984 236
2933ae6e
PB
237 data_to_kill->refs++;
238 node->rsrc_data = data_to_kill;
239 list_add_tail(&node->node, &ctx->rsrc_ref_list);
240 /* put master ref */
241 io_put_rsrc_node(ctx, node);
242 ctx->rsrc_node = backup;
73572984
JA
243}
244
245int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
246{
528407b1
PB
247 if (io_alloc_cache_empty(&ctx->rsrc_node_cache)) {
248 struct io_rsrc_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
249
250 if (!node)
251 return -ENOMEM;
252 io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache);
253 }
254 return 0;
73572984
JA
255}
256
257__cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
258 struct io_ring_ctx *ctx)
259{
260 int ret;
261
262 /* As we may drop ->uring_lock, other task may have started quiesce */
263 if (data->quiesce)
264 return -ENXIO;
77e3202a
PB
265 ret = io_rsrc_node_switch_start(ctx);
266 if (ret)
267 return ret;
268 io_rsrc_node_switch(ctx, data);
269
757ef468
PB
270 /* kill initial ref */
271 if (io_put_rsrc_data_ref(data))
77e3202a 272 return 0;
73572984
JA
273
274 data->quiesce = true;
275 do {
eef81fca 276 mutex_unlock(&ctx->uring_lock);
ef67fcb4 277 ret = io_run_task_work_sig(ctx);
77e3202a 278 if (ret < 0) {
77e3202a 279 mutex_lock(&ctx->uring_lock);
757ef468
PB
280 if (!data->refs) {
281 ret = 0;
282 } else {
283 /* restore the master reference */
284 data->refs++;
285 }
77e3202a
PB
286 break;
287 }
eef81fca
PB
288 wait_for_completion_interruptible(&data->done);
289 mutex_lock(&ctx->uring_lock);
290 ret = 0;
291 } while (data->refs);
73572984
JA
292 data->quiesce = false;
293
294 return ret;
295}
296
297static void io_free_page_table(void **table, size_t size)
298{
299 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
300
301 for (i = 0; i < nr_tables; i++)
302 kfree(table[i]);
303 kfree(table);
304}
305
306static void io_rsrc_data_free(struct io_rsrc_data *data)
307{
308 size_t size = data->nr * sizeof(data->tags[0][0]);
309
310 if (data->tags)
311 io_free_page_table((void **)data->tags, size);
312 kfree(data);
313}
314
315static __cold void **io_alloc_page_table(size_t size)
316{
317 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
318 size_t init_size = size;
319 void **table;
320
321 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
322 if (!table)
323 return NULL;
324
325 for (i = 0; i < nr_tables; i++) {
326 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
327
328 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
329 if (!table[i]) {
330 io_free_page_table(table, init_size);
331 return NULL;
332 }
333 size -= this_size;
334 }
335 return table;
336}
337
338__cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx,
339 rsrc_put_fn *do_put, u64 __user *utags,
340 unsigned nr, struct io_rsrc_data **pdata)
341{
342 struct io_rsrc_data *data;
6acd352d 343 int ret = 0;
73572984
JA
344 unsigned i;
345
346 data = kzalloc(sizeof(*data), GFP_KERNEL);
347 if (!data)
348 return -ENOMEM;
349 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
350 if (!data->tags) {
351 kfree(data);
352 return -ENOMEM;
353 }
354
355 data->nr = nr;
356 data->ctx = ctx;
357 data->do_put = do_put;
757ef468 358 data->refs = 1;
73572984
JA
359 if (utags) {
360 ret = -EFAULT;
361 for (i = 0; i < nr; i++) {
362 u64 *tag_slot = io_get_tag_slot(data, i);
363
364 if (copy_from_user(tag_slot, &utags[i],
365 sizeof(*tag_slot)))
366 goto fail;
367 }
368 }
73572984
JA
369 init_completion(&data->done);
370 *pdata = data;
371 return 0;
372fail:
373 io_rsrc_data_free(data);
374 return ret;
375}
376
377static int __io_sqe_files_update(struct io_ring_ctx *ctx,
378 struct io_uring_rsrc_update2 *up,
379 unsigned nr_args)
380{
381 u64 __user *tags = u64_to_user_ptr(up->tags);
382 __s32 __user *fds = u64_to_user_ptr(up->data);
383 struct io_rsrc_data *data = ctx->file_data;
384 struct io_fixed_file *file_slot;
385 struct file *file;
386 int fd, i, err = 0;
387 unsigned int done;
388 bool needs_switch = false;
389
390 if (!ctx->file_data)
391 return -ENXIO;
392 if (up->offset + nr_args > ctx->nr_user_files)
393 return -EINVAL;
394
395 for (done = 0; done < nr_args; done++) {
396 u64 tag = 0;
397
398 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
399 copy_from_user(&fd, &fds[done], sizeof(fd))) {
400 err = -EFAULT;
401 break;
402 }
403 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
404 err = -EINVAL;
405 break;
406 }
407 if (fd == IORING_REGISTER_FILES_SKIP)
408 continue;
409
410 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
411 file_slot = io_fixed_file_slot(&ctx->file_table, i);
412
413 if (file_slot->file_ptr) {
414 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
415 err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
416 if (err)
417 break;
418 file_slot->file_ptr = 0;
419 io_file_bitmap_clear(&ctx->file_table, i);
420 needs_switch = true;
421 }
422 if (fd != -1) {
423 file = fget(fd);
424 if (!file) {
425 err = -EBADF;
426 break;
427 }
428 /*
429 * Don't allow io_uring instances to be registered. If
430 * UNIX isn't enabled, then this causes a reference
431 * cycle and this instance can never get freed. If UNIX
432 * is enabled we'll handle it just fine, but there's
433 * still no point in allowing a ring fd as it doesn't
434 * support regular read/write anyway.
435 */
436 if (io_is_uring_fops(file)) {
437 fput(file);
438 err = -EBADF;
439 break;
440 }
441 err = io_scm_file_account(ctx, file);
442 if (err) {
443 fput(file);
444 break;
445 }
446 *io_get_tag_slot(data, i) = tag;
447 io_fixed_file_set(file_slot, file);
448 io_file_bitmap_set(&ctx->file_table, i);
449 }
450 }
451
452 if (needs_switch)
453 io_rsrc_node_switch(ctx, data);
454 return done ? done : err;
455}
456
457static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
458 struct io_uring_rsrc_update2 *up,
459 unsigned int nr_args)
460{
461 u64 __user *tags = u64_to_user_ptr(up->tags);
462 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
463 struct page *last_hpage = NULL;
464 bool needs_switch = false;
465 __u32 done;
466 int i, err;
467
468 if (!ctx->buf_data)
469 return -ENXIO;
470 if (up->offset + nr_args > ctx->nr_user_bufs)
471 return -EINVAL;
472
473 for (done = 0; done < nr_args; done++) {
474 struct io_mapped_ubuf *imu;
475 int offset = up->offset + done;
476 u64 tag = 0;
477
478 err = io_copy_iov(ctx, &iov, iovs, done);
479 if (err)
480 break;
481 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
482 err = -EFAULT;
483 break;
484 }
485 err = io_buffer_validate(&iov);
486 if (err)
487 break;
488 if (!iov.iov_base && tag) {
489 err = -EINVAL;
490 break;
491 }
492 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
493 if (err)
494 break;
495
496 i = array_index_nospec(offset, ctx->nr_user_bufs);
497 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
498 err = io_queue_rsrc_removal(ctx->buf_data, i,
499 ctx->rsrc_node, ctx->user_bufs[i]);
500 if (unlikely(err)) {
501 io_buffer_unmap(ctx, &imu);
502 break;
503 }
5ff4fdff 504 ctx->user_bufs[i] = ctx->dummy_ubuf;
73572984
JA
505 needs_switch = true;
506 }
507
508 ctx->user_bufs[i] = imu;
953c37e0 509 *io_get_tag_slot(ctx->buf_data, i) = tag;
73572984
JA
510 }
511
512 if (needs_switch)
513 io_rsrc_node_switch(ctx, ctx->buf_data);
514 return done ? done : err;
515}
516
517static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
518 struct io_uring_rsrc_update2 *up,
519 unsigned nr_args)
520{
521 __u32 tmp;
522 int err;
523
786788a8
PB
524 lockdep_assert_held(&ctx->uring_lock);
525
73572984
JA
526 if (check_add_overflow(up->offset, nr_args, &tmp))
527 return -EOVERFLOW;
528 err = io_rsrc_node_switch_start(ctx);
529 if (err)
530 return err;
531
532 switch (type) {
533 case IORING_RSRC_FILE:
534 return __io_sqe_files_update(ctx, up, nr_args);
535 case IORING_RSRC_BUFFER:
536 return __io_sqe_buffers_update(ctx, up, nr_args);
537 }
538 return -EINVAL;
539}
540
541int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
542 unsigned nr_args)
543{
544 struct io_uring_rsrc_update2 up;
545
546 if (!nr_args)
547 return -EINVAL;
548 memset(&up, 0, sizeof(up));
549 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
550 return -EFAULT;
551 if (up.resv || up.resv2)
552 return -EINVAL;
553 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
554}
555
556int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
557 unsigned size, unsigned type)
558{
559 struct io_uring_rsrc_update2 up;
560
561 if (size != sizeof(up))
562 return -EINVAL;
563 if (copy_from_user(&up, arg, sizeof(up)))
564 return -EFAULT;
565 if (!up.nr || up.resv || up.resv2)
566 return -EINVAL;
567 return __io_register_rsrc_update(ctx, type, &up, up.nr);
568}
569
570__cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
571 unsigned int size, unsigned int type)
572{
573 struct io_uring_rsrc_register rr;
574
575 /* keep it extendible */
576 if (size != sizeof(rr))
577 return -EINVAL;
578
579 memset(&rr, 0, sizeof(rr));
580 if (copy_from_user(&rr, arg, size))
581 return -EFAULT;
582 if (!rr.nr || rr.resv2)
583 return -EINVAL;
584 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
585 return -EINVAL;
586
587 switch (type) {
588 case IORING_RSRC_FILE:
589 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
590 break;
591 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
592 rr.nr, u64_to_user_ptr(rr.tags));
593 case IORING_RSRC_BUFFER:
594 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
595 break;
596 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
597 rr.nr, u64_to_user_ptr(rr.tags));
598 }
599 return -EINVAL;
600}
601
d9808ceb 602int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
73572984 603{
f2ccb5ae 604 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
73572984
JA
605
606 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
607 return -EINVAL;
608 if (sqe->rw_flags || sqe->splice_fd_in)
609 return -EINVAL;
610
611 up->offset = READ_ONCE(sqe->off);
612 up->nr_args = READ_ONCE(sqe->len);
613 if (!up->nr_args)
614 return -EINVAL;
615 up->arg = READ_ONCE(sqe->addr);
616 return 0;
617}
618
619static int io_files_update_with_index_alloc(struct io_kiocb *req,
620 unsigned int issue_flags)
621{
f2ccb5ae 622 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
73572984
JA
623 __s32 __user *fds = u64_to_user_ptr(up->arg);
624 unsigned int done;
625 struct file *file;
626 int ret, fd;
627
628 if (!req->ctx->file_data)
629 return -ENXIO;
630
631 for (done = 0; done < up->nr_args; done++) {
632 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
633 ret = -EFAULT;
634 break;
635 }
636
637 file = fget(fd);
638 if (!file) {
639 ret = -EBADF;
640 break;
641 }
642 ret = io_fixed_fd_install(req, issue_flags, file,
643 IORING_FILE_INDEX_ALLOC);
644 if (ret < 0)
645 break;
646 if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
f110ed84 647 __io_close_fixed(req->ctx, issue_flags, ret);
73572984
JA
648 ret = -EFAULT;
649 break;
650 }
651 }
652
653 if (done)
654 return done;
655 return ret;
656}
657
d9808ceb 658int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
73572984 659{
f2ccb5ae 660 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
73572984
JA
661 struct io_ring_ctx *ctx = req->ctx;
662 struct io_uring_rsrc_update2 up2;
663 int ret;
664
665 up2.offset = up->offset;
666 up2.data = up->arg;
667 up2.nr = 0;
668 up2.tags = 0;
669 up2.resv = 0;
670 up2.resv2 = 0;
671
672 if (up->offset == IORING_FILE_INDEX_ALLOC) {
673 ret = io_files_update_with_index_alloc(req, issue_flags);
674 } else {
675 io_ring_submit_lock(ctx, issue_flags);
676 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
677 &up2, up->nr_args);
678 io_ring_submit_unlock(ctx, issue_flags);
679 }
680
681 if (ret < 0)
682 req_set_fail(req);
683 io_req_set_res(req, ret, 0);
684 return IOU_OK;
685}
686
687int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
688 struct io_rsrc_node *node, void *rsrc)
689{
690 u64 *tag_slot = io_get_tag_slot(data, idx);
691 struct io_rsrc_put *prsrc;
ff7c75ec 692 bool inline_item = true;
73572984 693
ff7c75ec
PB
694 if (!node->inline_items) {
695 prsrc = &node->item;
696 node->inline_items++;
697 } else {
698 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
699 if (!prsrc)
700 return -ENOMEM;
701 inline_item = false;
702 }
73572984
JA
703
704 prsrc->tag = *tag_slot;
705 *tag_slot = 0;
706 prsrc->rsrc = rsrc;
ff7c75ec
PB
707 if (!inline_item)
708 list_add(&prsrc->list, &node->item_list);
73572984
JA
709 return 0;
710}
711
712void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
713{
73572984
JA
714 int i;
715
716 for (i = 0; i < ctx->nr_user_files; i++) {
717 struct file *file = io_file_from_index(&ctx->file_table, i);
718
38eddb2c
PB
719 /* skip scm accounted files, they'll be freed by ->ring_sock */
720 if (!file || io_file_need_scm(file))
73572984
JA
721 continue;
722 io_file_bitmap_clear(&ctx->file_table, i);
723 fput(file);
724 }
73572984
JA
725
726#if defined(CONFIG_UNIX)
727 if (ctx->ring_sock) {
728 struct sock *sock = ctx->ring_sock->sk;
729 struct sk_buff *skb;
730
731 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
732 kfree_skb(skb);
733 }
734#endif
735 io_free_file_tables(&ctx->file_table);
02a4d923 736 io_file_table_set_alloc_range(ctx, 0, 0);
73572984
JA
737 io_rsrc_data_free(ctx->file_data);
738 ctx->file_data = NULL;
739 ctx->nr_user_files = 0;
740}
741
742int io_sqe_files_unregister(struct io_ring_ctx *ctx)
743{
744 unsigned nr = ctx->nr_user_files;
745 int ret;
746
747 if (!ctx->file_data)
748 return -ENXIO;
749
750 /*
751 * Quiesce may unlock ->uring_lock, and while it's not held
752 * prevent new requests using the table.
753 */
754 ctx->nr_user_files = 0;
755 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
756 ctx->nr_user_files = nr;
757 if (!ret)
758 __io_sqe_files_unregister(ctx);
759 return ret;
760}
761
762/*
763 * Ensure the UNIX gc is aware of our file set, so we are certain that
764 * the io_uring can be safely unregistered on process exit, even if we have
765 * loops in the file referencing. We account only files that can hold other
766 * files because otherwise they can't form a loop and so are not interesting
767 * for GC.
768 */
769int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
770{
771#if defined(CONFIG_UNIX)
772 struct sock *sk = ctx->ring_sock->sk;
773 struct sk_buff_head *head = &sk->sk_receive_queue;
774 struct scm_fp_list *fpl;
775 struct sk_buff *skb;
776
777 if (likely(!io_file_need_scm(file)))
778 return 0;
779
780 /*
781 * See if we can merge this file into an existing skb SCM_RIGHTS
782 * file set. If there's no room, fall back to allocating a new skb
783 * and filling it in.
784 */
785 spin_lock_irq(&head->lock);
786 skb = skb_peek(head);
787 if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
788 __skb_unlink(skb, head);
789 else
790 skb = NULL;
791 spin_unlock_irq(&head->lock);
792
793 if (!skb) {
794 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
795 if (!fpl)
796 return -ENOMEM;
797
798 skb = alloc_skb(0, GFP_KERNEL);
799 if (!skb) {
800 kfree(fpl);
801 return -ENOMEM;
802 }
803
804 fpl->user = get_uid(current_user());
805 fpl->max = SCM_MAX_FD;
806 fpl->count = 0;
807
808 UNIXCB(skb).fp = fpl;
809 skb->sk = sk;
0091bfc8 810 skb->scm_io_uring = 1;
73572984
JA
811 skb->destructor = unix_destruct_scm;
812 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
813 }
814
815 fpl = UNIXCB(skb).fp;
816 fpl->fp[fpl->count++] = get_file(file);
817 unix_inflight(fpl->user, file);
818 skb_queue_head(head, skb);
819 fput(file);
820#endif
821 return 0;
822}
823
d581076b 824static __cold void io_rsrc_file_scm_put(struct io_ring_ctx *ctx, struct file *file)
73572984 825{
73572984
JA
826#if defined(CONFIG_UNIX)
827 struct sock *sock = ctx->ring_sock->sk;
828 struct sk_buff_head list, *head = &sock->sk_receive_queue;
829 struct sk_buff *skb;
830 int i;
831
73572984
JA
832 __skb_queue_head_init(&list);
833
834 /*
835 * Find the skb that holds this file in its SCM_RIGHTS. When found,
836 * remove this entry and rearrange the file array.
837 */
838 skb = skb_dequeue(head);
839 while (skb) {
840 struct scm_fp_list *fp;
841
842 fp = UNIXCB(skb).fp;
843 for (i = 0; i < fp->count; i++) {
844 int left;
845
846 if (fp->fp[i] != file)
847 continue;
848
849 unix_notinflight(fp->user, fp->fp[i]);
850 left = fp->count - 1 - i;
851 if (left) {
852 memmove(&fp->fp[i], &fp->fp[i + 1],
853 left * sizeof(struct file *));
854 }
855 fp->count--;
856 if (!fp->count) {
857 kfree_skb(skb);
858 skb = NULL;
859 } else {
860 __skb_queue_tail(&list, skb);
861 }
862 fput(file);
863 file = NULL;
864 break;
865 }
866
867 if (!file)
868 break;
869
870 __skb_queue_tail(&list, skb);
871
872 skb = skb_dequeue(head);
873 }
874
875 if (skb_peek(&list)) {
876 spin_lock_irq(&head->lock);
877 while ((skb = __skb_dequeue(&list)) != NULL)
878 __skb_queue_tail(head, skb);
879 spin_unlock_irq(&head->lock);
880 }
73572984
JA
881#endif
882}
883
d581076b
PB
884static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
885{
886 struct file *file = prsrc->file;
887
888 if (likely(!io_file_need_scm(file)))
889 fput(file);
890 else
891 io_rsrc_file_scm_put(ctx, file);
892}
893
73572984
JA
894int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
895 unsigned nr_args, u64 __user *tags)
896{
897 __s32 __user *fds = (__s32 __user *) arg;
898 struct file *file;
899 int fd, ret;
900 unsigned i;
901
902 if (ctx->file_data)
903 return -EBUSY;
904 if (!nr_args)
905 return -EINVAL;
906 if (nr_args > IORING_MAX_FIXED_FILES)
907 return -EMFILE;
908 if (nr_args > rlimit(RLIMIT_NOFILE))
909 return -EMFILE;
73572984
JA
910 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
911 &ctx->file_data);
912 if (ret)
913 return ret;
914
915 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
916 io_rsrc_data_free(ctx->file_data);
917 ctx->file_data = NULL;
918 return -ENOMEM;
919 }
920
921 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
922 struct io_fixed_file *file_slot;
923
924 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
925 ret = -EFAULT;
926 goto fail;
927 }
928 /* allow sparse sets */
929 if (!fds || fd == -1) {
930 ret = -EINVAL;
931 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
932 goto fail;
933 continue;
934 }
935
936 file = fget(fd);
937 ret = -EBADF;
938 if (unlikely(!file))
939 goto fail;
940
941 /*
942 * Don't allow io_uring instances to be registered. If UNIX
943 * isn't enabled, then this causes a reference cycle and this
944 * instance can never get freed. If UNIX is enabled we'll
945 * handle it just fine, but there's still no point in allowing
946 * a ring fd as it doesn't support regular read/write anyway.
947 */
948 if (io_is_uring_fops(file)) {
949 fput(file);
950 goto fail;
951 }
952 ret = io_scm_file_account(ctx, file);
953 if (ret) {
954 fput(file);
955 goto fail;
956 }
957 file_slot = io_fixed_file_slot(&ctx->file_table, i);
958 io_fixed_file_set(file_slot, file);
959 io_file_bitmap_set(&ctx->file_table, i);
960 }
961
6e73dffb
PB
962 /* default it to the whole table */
963 io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
73572984
JA
964 return 0;
965fail:
966 __io_sqe_files_unregister(ctx);
967 return ret;
968}
969
970static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
971{
972 io_buffer_unmap(ctx, &prsrc->buf);
973 prsrc->buf = NULL;
974}
975
976void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
977{
978 unsigned int i;
979
980 for (i = 0; i < ctx->nr_user_bufs; i++)
981 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
982 kfree(ctx->user_bufs);
983 io_rsrc_data_free(ctx->buf_data);
984 ctx->user_bufs = NULL;
985 ctx->buf_data = NULL;
986 ctx->nr_user_bufs = 0;
987}
988
989int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
990{
991 unsigned nr = ctx->nr_user_bufs;
992 int ret;
993
994 if (!ctx->buf_data)
995 return -ENXIO;
996
997 /*
998 * Quiesce may unlock ->uring_lock, and while it's not held
999 * prevent new requests using the table.
1000 */
1001 ctx->nr_user_bufs = 0;
1002 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
1003 ctx->nr_user_bufs = nr;
1004 if (!ret)
1005 __io_sqe_buffers_unregister(ctx);
1006 return ret;
1007}
1008
1009/*
1010 * Not super efficient, but this is just a registration time. And we do cache
1011 * the last compound head, so generally we'll only do a full search if we don't
1012 * match that one.
1013 *
1014 * We check if the given compound head page has already been accounted, to
1015 * avoid double accounting it. This allows us to account the full size of the
1016 * page, not just the constituent pages of a huge page.
1017 */
1018static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
1019 int nr_pages, struct page *hpage)
1020{
1021 int i, j;
1022
1023 /* check current page array */
1024 for (i = 0; i < nr_pages; i++) {
1025 if (!PageCompound(pages[i]))
1026 continue;
1027 if (compound_head(pages[i]) == hpage)
1028 return true;
1029 }
1030
1031 /* check previously registered pages */
1032 for (i = 0; i < ctx->nr_user_bufs; i++) {
1033 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
1034
1035 for (j = 0; j < imu->nr_bvecs; j++) {
1036 if (!PageCompound(imu->bvec[j].bv_page))
1037 continue;
1038 if (compound_head(imu->bvec[j].bv_page) == hpage)
1039 return true;
1040 }
1041 }
1042
1043 return false;
1044}
1045
1046static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
1047 int nr_pages, struct io_mapped_ubuf *imu,
1048 struct page **last_hpage)
1049{
1050 int i, ret;
1051
1052 imu->acct_pages = 0;
1053 for (i = 0; i < nr_pages; i++) {
1054 if (!PageCompound(pages[i])) {
1055 imu->acct_pages++;
1056 } else {
1057 struct page *hpage;
1058
1059 hpage = compound_head(pages[i]);
1060 if (hpage == *last_hpage)
1061 continue;
1062 *last_hpage = hpage;
1063 if (headpage_already_acct(ctx, pages, i, hpage))
1064 continue;
1065 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
1066 }
1067 }
1068
1069 if (!imu->acct_pages)
1070 return 0;
1071
1072 ret = io_account_mem(ctx, imu->acct_pages);
1073 if (ret)
1074 imu->acct_pages = 0;
1075 return ret;
1076}
1077
1078struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
1079{
1080 unsigned long start, end, nr_pages;
1081 struct vm_area_struct **vmas = NULL;
1082 struct page **pages = NULL;
1083 int i, pret, ret = -ENOMEM;
1084
1085 end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1086 start = ubuf >> PAGE_SHIFT;
1087 nr_pages = end - start;
1088
1089 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
1090 if (!pages)
1091 goto done;
1092
1093 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
1094 GFP_KERNEL);
1095 if (!vmas)
1096 goto done;
1097
1098 ret = 0;
1099 mmap_read_lock(current->mm);
1100 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
1101 pages, vmas);
1102 if (pret == nr_pages) {
edd47826
PB
1103 struct file *file = vmas[0]->vm_file;
1104
73572984
JA
1105 /* don't support file backed memory */
1106 for (i = 0; i < nr_pages; i++) {
edd47826
PB
1107 if (vmas[i]->vm_file != file) {
1108 ret = -EINVAL;
1109 break;
1110 }
1111 if (!file)
73572984 1112 continue;
edd47826 1113 if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) {
73572984
JA
1114 ret = -EOPNOTSUPP;
1115 break;
1116 }
1117 }
1118 *npages = nr_pages;
1119 } else {
1120 ret = pret < 0 ? pret : -EFAULT;
1121 }
1122 mmap_read_unlock(current->mm);
1123 if (ret) {
1124 /*
1125 * if we did partial map, or found file backed vmas,
1126 * release any pages we did get
1127 */
1128 if (pret > 0)
1129 unpin_user_pages(pages, pret);
1130 goto done;
1131 }
1132 ret = 0;
1133done:
1134 kvfree(vmas);
1135 if (ret < 0) {
1136 kvfree(pages);
1137 pages = ERR_PTR(ret);
1138 }
1139 return pages;
1140}
1141
1142static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
1143 struct io_mapped_ubuf **pimu,
1144 struct page **last_hpage)
1145{
1146 struct io_mapped_ubuf *imu = NULL;
1147 struct page **pages = NULL;
1148 unsigned long off;
1149 size_t size;
1150 int ret, nr_pages, i;
977bc873 1151 struct folio *folio = NULL;
73572984 1152
5ff4fdff
PB
1153 *pimu = ctx->dummy_ubuf;
1154 if (!iov->iov_base)
73572984 1155 return 0;
73572984 1156
73572984 1157 ret = -ENOMEM;
73572984
JA
1158 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
1159 &nr_pages);
1160 if (IS_ERR(pages)) {
1161 ret = PTR_ERR(pages);
1162 pages = NULL;
1163 goto done;
1164 }
1165
57bebf80
PB
1166 /* If it's a huge page, try to coalesce them into a single bvec entry */
1167 if (nr_pages > 1) {
1168 folio = page_folio(pages[0]);
1169 for (i = 1; i < nr_pages; i++) {
1170 if (page_folio(pages[i]) != folio) {
1171 folio = NULL;
1172 break;
1173 }
1174 }
1175 if (folio) {
d2acf789
PB
1176 /*
1177 * The pages are bound to the folio, it doesn't
1178 * actually unpin them but drops all but one reference,
1179 * which is usually put down by io_buffer_unmap().
1180 * Note, needs a better helper.
1181 */
1182 unpin_user_pages(&pages[1], nr_pages - 1);
57bebf80
PB
1183 nr_pages = 1;
1184 }
1185 }
1186
73572984
JA
1187 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
1188 if (!imu)
1189 goto done;
1190
1191 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
1192 if (ret) {
1193 unpin_user_pages(pages, nr_pages);
1194 goto done;
1195 }
1196
1197 off = (unsigned long) iov->iov_base & ~PAGE_MASK;
1198 size = iov->iov_len;
57bebf80
PB
1199 /* store original address for later verification */
1200 imu->ubuf = (unsigned long) iov->iov_base;
1201 imu->ubuf_end = imu->ubuf + iov->iov_len;
1202 imu->nr_bvecs = nr_pages;
1203 *pimu = imu;
1204 ret = 0;
1205
1206 if (folio) {
1207 bvec_set_page(&imu->bvec[0], pages[0], size, off);
1208 goto done;
1209 }
73572984
JA
1210 for (i = 0; i < nr_pages; i++) {
1211 size_t vec_len;
1212
1213 vec_len = min_t(size_t, size, PAGE_SIZE - off);
cc342a21 1214 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
73572984
JA
1215 off = 0;
1216 size -= vec_len;
1217 }
73572984
JA
1218done:
1219 if (ret)
1220 kvfree(imu);
1221 kvfree(pages);
1222 return ret;
1223}
1224
1225static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
1226{
1227 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
1228 return ctx->user_bufs ? 0 : -ENOMEM;
1229}
1230
1231int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
1232 unsigned int nr_args, u64 __user *tags)
1233{
1234 struct page *last_hpage = NULL;
1235 struct io_rsrc_data *data;
1236 int i, ret;
1237 struct iovec iov;
1238
1239 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
1240
1241 if (ctx->user_bufs)
1242 return -EBUSY;
1243 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
1244 return -EINVAL;
73572984
JA
1245 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
1246 if (ret)
1247 return ret;
1248 ret = io_buffers_map_alloc(ctx, nr_args);
1249 if (ret) {
1250 io_rsrc_data_free(data);
1251 return ret;
1252 }
1253
1254 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
1255 if (arg) {
1256 ret = io_copy_iov(ctx, &iov, arg, i);
1257 if (ret)
1258 break;
1259 ret = io_buffer_validate(&iov);
1260 if (ret)
1261 break;
1262 } else {
1263 memset(&iov, 0, sizeof(iov));
1264 }
1265
1266 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
1267 ret = -EINVAL;
1268 break;
1269 }
1270
1271 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
1272 &last_hpage);
1273 if (ret)
1274 break;
1275 }
1276
1277 WARN_ON_ONCE(ctx->buf_data);
1278
1279 ctx->buf_data = data;
1280 if (ret)
1281 __io_sqe_buffers_unregister(ctx);
73572984
JA
1282 return ret;
1283}
c059f785
PB
1284
1285int io_import_fixed(int ddir, struct iov_iter *iter,
1286 struct io_mapped_ubuf *imu,
1287 u64 buf_addr, size_t len)
1288{
1289 u64 buf_end;
1290 size_t offset;
1291
1292 if (WARN_ON_ONCE(!imu))
1293 return -EFAULT;
1294 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1295 return -EFAULT;
1296 /* not inside the mapped region */
1297 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1298 return -EFAULT;
1299
1300 /*
6bf65a1b 1301 * Might not be a start of buffer, set size appropriately
c059f785
PB
1302 * and advance us to the beginning.
1303 */
1304 offset = buf_addr - imu->ubuf;
1305 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1306
1307 if (offset) {
1308 /*
1309 * Don't use iov_iter_advance() here, as it's really slow for
1310 * using the latter parts of a big fixed buffer - it iterates
1311 * over each segment manually. We can cheat a bit here, because
1312 * we know that:
1313 *
1314 * 1) it's a BVEC iter, we set it up
1315 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1316 * first and last bvec
1317 *
1318 * So just find our index, and adjust the iterator afterwards.
1319 * If the offset is within the first bvec (or the whole first
1320 * bvec, just use iov_iter_advance(). This makes it easier
1321 * since we can just skip the first segment, which may not
1322 * be PAGE_SIZE aligned.
1323 */
1324 const struct bio_vec *bvec = imu->bvec;
1325
1326 if (offset <= bvec->bv_len) {
57bebf80
PB
1327 /*
1328 * Note, huge pages buffers consists of one large
1329 * bvec entry and should always go this way. The other
1330 * branch doesn't expect non PAGE_SIZE'd chunks.
1331 */
b000ae0e
PB
1332 iter->bvec = bvec;
1333 iter->nr_segs = bvec->bv_len;
1334 iter->count -= offset;
1335 iter->iov_offset = offset;
c059f785
PB
1336 } else {
1337 unsigned long seg_skip;
1338
1339 /* skip first vec */
1340 offset -= bvec->bv_len;
1341 seg_skip = 1 + (offset >> PAGE_SHIFT);
1342
1343 iter->bvec = bvec + seg_skip;
1344 iter->nr_segs -= seg_skip;
1345 iter->count -= bvec->bv_len + offset;
1346 iter->iov_offset = offset & ~PAGE_MASK;
1347 }
1348 }
1349
1350 return 0;
1351}