bfq: update internal depth state when queue depth changes
[linux-2.6-block.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
7 * the application and kernel side. When the application reads the CQ ring
8 * tail, it must use an appropriate smp_rmb() to order with the smp_wmb()
9 * the kernel uses after writing the tail. Failure to do so could cause a
10 * delay in when the application notices that completion events available.
11 * This isn't a fatal condition. Likewise, the application must use an
12 * appropriate smp_wmb() both before writing the SQ tail, and after writing
13 * the SQ tail. The first one orders the sqe writes with the tail write, and
14 * the latter is paired with the smp_rmb() the kernel will issue before
15 * reading the SQ tail on submission.
16 *
17 * Also see the examples in the liburing library:
18 *
19 * git://git.kernel.dk/liburing
20 *
21 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
22 * from data shared between the kernel and application. This is done both
23 * for ordering purposes, but also to ensure that once a value is loaded from
24 * data that the application could potentially modify, it remains stable.
25 *
26 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 27 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
28 */
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/errno.h>
32#include <linux/syscalls.h>
33#include <linux/compat.h>
34#include <linux/refcount.h>
35#include <linux/uio.h>
36
37#include <linux/sched/signal.h>
38#include <linux/fs.h>
39#include <linux/file.h>
40#include <linux/fdtable.h>
41#include <linux/mm.h>
42#include <linux/mman.h>
43#include <linux/mmu_context.h>
44#include <linux/percpu.h>
45#include <linux/slab.h>
46#include <linux/workqueue.h>
6c271ce2 47#include <linux/kthread.h>
2b188cc1 48#include <linux/blkdev.h>
edafccee 49#include <linux/bvec.h>
2b188cc1
JA
50#include <linux/net.h>
51#include <net/sock.h>
52#include <net/af_unix.h>
6b06314c 53#include <net/scm.h>
2b188cc1
JA
54#include <linux/anon_inodes.h>
55#include <linux/sched/mm.h>
56#include <linux/uaccess.h>
57#include <linux/nospec.h>
edafccee
JA
58#include <linux/sizes.h>
59#include <linux/hugetlb.h>
2b188cc1
JA
60
61#include <uapi/linux/io_uring.h>
62
63#include "internal.h"
64
65#define IORING_MAX_ENTRIES 4096
6b06314c 66#define IORING_MAX_FIXED_FILES 1024
2b188cc1
JA
67
68struct io_uring {
69 u32 head ____cacheline_aligned_in_smp;
70 u32 tail ____cacheline_aligned_in_smp;
71};
72
73struct io_sq_ring {
74 struct io_uring r;
75 u32 ring_mask;
76 u32 ring_entries;
77 u32 dropped;
78 u32 flags;
79 u32 array[];
80};
81
82struct io_cq_ring {
83 struct io_uring r;
84 u32 ring_mask;
85 u32 ring_entries;
86 u32 overflow;
87 struct io_uring_cqe cqes[];
88};
89
edafccee
JA
90struct io_mapped_ubuf {
91 u64 ubuf;
92 size_t len;
93 struct bio_vec *bvec;
94 unsigned int nr_bvecs;
95};
96
31b51510
JA
97struct async_list {
98 spinlock_t lock;
99 atomic_t cnt;
100 struct list_head list;
101
102 struct file *file;
103 off_t io_end;
104 size_t io_pages;
105};
106
2b188cc1
JA
107struct io_ring_ctx {
108 struct {
109 struct percpu_ref refs;
110 } ____cacheline_aligned_in_smp;
111
112 struct {
113 unsigned int flags;
114 bool compat;
115 bool account_mem;
116
117 /* SQ ring */
118 struct io_sq_ring *sq_ring;
119 unsigned cached_sq_head;
120 unsigned sq_entries;
121 unsigned sq_mask;
6c271ce2 122 unsigned sq_thread_idle;
2b188cc1
JA
123 struct io_uring_sqe *sq_sqes;
124 } ____cacheline_aligned_in_smp;
125
126 /* IO offload */
127 struct workqueue_struct *sqo_wq;
6c271ce2 128 struct task_struct *sqo_thread; /* if using sq thread polling */
2b188cc1 129 struct mm_struct *sqo_mm;
6c271ce2
JA
130 wait_queue_head_t sqo_wait;
131 unsigned sqo_stop;
2b188cc1
JA
132
133 struct {
134 /* CQ ring */
135 struct io_cq_ring *cq_ring;
136 unsigned cached_cq_tail;
137 unsigned cq_entries;
138 unsigned cq_mask;
139 struct wait_queue_head cq_wait;
140 struct fasync_struct *cq_fasync;
141 } ____cacheline_aligned_in_smp;
142
6b06314c
JA
143 /*
144 * If used, fixed file set. Writers must ensure that ->refs is dead,
145 * readers must ensure that ->refs is alive as long as the file* is
146 * used. Only updated through io_uring_register(2).
147 */
148 struct file **user_files;
149 unsigned nr_user_files;
150
edafccee
JA
151 /* if used, fixed mapped user buffers */
152 unsigned nr_user_bufs;
153 struct io_mapped_ubuf *user_bufs;
154
2b188cc1
JA
155 struct user_struct *user;
156
157 struct completion ctx_done;
158
159 struct {
160 struct mutex uring_lock;
161 wait_queue_head_t wait;
162 } ____cacheline_aligned_in_smp;
163
164 struct {
165 spinlock_t completion_lock;
def596e9
JA
166 bool poll_multi_file;
167 /*
168 * ->poll_list is protected by the ctx->uring_lock for
169 * io_uring instances that don't use IORING_SETUP_SQPOLL.
170 * For SQPOLL, only the single threaded io_sq_thread() will
171 * manipulate the list, hence no extra locking is needed there.
172 */
173 struct list_head poll_list;
221c5eb2 174 struct list_head cancel_list;
2b188cc1
JA
175 } ____cacheline_aligned_in_smp;
176
31b51510
JA
177 struct async_list pending_async[2];
178
2b188cc1
JA
179#if defined(CONFIG_UNIX)
180 struct socket *ring_sock;
181#endif
182};
183
184struct sqe_submit {
185 const struct io_uring_sqe *sqe;
186 unsigned short index;
187 bool has_user;
def596e9 188 bool needs_lock;
6c271ce2 189 bool needs_fixed_file;
2b188cc1
JA
190};
191
09bb8394
JA
192/*
193 * First field must be the file pointer in all the
194 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
195 */
221c5eb2
JA
196struct io_poll_iocb {
197 struct file *file;
198 struct wait_queue_head *head;
199 __poll_t events;
8c838788 200 bool done;
221c5eb2
JA
201 bool canceled;
202 struct wait_queue_entry wait;
203};
204
09bb8394
JA
205/*
206 * NOTE! Each of the iocb union members has the file pointer
207 * as the first entry in their struct definition. So you can
208 * access the file pointer through any of the sub-structs,
209 * or directly as just 'ki_filp' in this struct.
210 */
2b188cc1 211struct io_kiocb {
221c5eb2 212 union {
09bb8394 213 struct file *file;
221c5eb2
JA
214 struct kiocb rw;
215 struct io_poll_iocb poll;
216 };
2b188cc1
JA
217
218 struct sqe_submit submit;
219
220 struct io_ring_ctx *ctx;
221 struct list_head list;
222 unsigned int flags;
c16361c1 223 refcount_t refs;
2b188cc1 224#define REQ_F_FORCE_NONBLOCK 1 /* inline submission attempt */
def596e9 225#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
6b06314c 226#define REQ_F_FIXED_FILE 4 /* ctx owns file */
31b51510 227#define REQ_F_SEQ_PREV 8 /* sequential with previous */
d530a402 228#define REQ_F_PREPPED 16 /* prep already done */
2b188cc1 229 u64 user_data;
def596e9 230 u64 error;
2b188cc1
JA
231
232 struct work_struct work;
233};
234
235#define IO_PLUG_THRESHOLD 2
def596e9 236#define IO_IOPOLL_BATCH 8
2b188cc1 237
9a56a232
JA
238struct io_submit_state {
239 struct blk_plug plug;
240
2579f913
JA
241 /*
242 * io_kiocb alloc cache
243 */
244 void *reqs[IO_IOPOLL_BATCH];
245 unsigned int free_reqs;
246 unsigned int cur_req;
247
9a56a232
JA
248 /*
249 * File reference cache
250 */
251 struct file *file;
252 unsigned int fd;
253 unsigned int has_refs;
254 unsigned int used_refs;
255 unsigned int ios_left;
256};
257
2b188cc1
JA
258static struct kmem_cache *req_cachep;
259
260static const struct file_operations io_uring_fops;
261
262struct sock *io_uring_get_socket(struct file *file)
263{
264#if defined(CONFIG_UNIX)
265 if (file->f_op == &io_uring_fops) {
266 struct io_ring_ctx *ctx = file->private_data;
267
268 return ctx->ring_sock->sk;
269 }
270#endif
271 return NULL;
272}
273EXPORT_SYMBOL(io_uring_get_socket);
274
275static void io_ring_ctx_ref_free(struct percpu_ref *ref)
276{
277 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
278
279 complete(&ctx->ctx_done);
280}
281
282static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
283{
284 struct io_ring_ctx *ctx;
31b51510 285 int i;
2b188cc1
JA
286
287 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
288 if (!ctx)
289 return NULL;
290
291 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, 0, GFP_KERNEL)) {
292 kfree(ctx);
293 return NULL;
294 }
295
296 ctx->flags = p->flags;
297 init_waitqueue_head(&ctx->cq_wait);
298 init_completion(&ctx->ctx_done);
299 mutex_init(&ctx->uring_lock);
300 init_waitqueue_head(&ctx->wait);
31b51510
JA
301 for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) {
302 spin_lock_init(&ctx->pending_async[i].lock);
303 INIT_LIST_HEAD(&ctx->pending_async[i].list);
304 atomic_set(&ctx->pending_async[i].cnt, 0);
305 }
2b188cc1 306 spin_lock_init(&ctx->completion_lock);
def596e9 307 INIT_LIST_HEAD(&ctx->poll_list);
221c5eb2 308 INIT_LIST_HEAD(&ctx->cancel_list);
2b188cc1
JA
309 return ctx;
310}
311
312static void io_commit_cqring(struct io_ring_ctx *ctx)
313{
314 struct io_cq_ring *ring = ctx->cq_ring;
315
316 if (ctx->cached_cq_tail != READ_ONCE(ring->r.tail)) {
317 /* order cqe stores with ring update */
318 smp_store_release(&ring->r.tail, ctx->cached_cq_tail);
319
320 /*
321 * Write sider barrier of tail update, app has read side. See
322 * comment at the top of this file.
323 */
324 smp_wmb();
325
326 if (wq_has_sleeper(&ctx->cq_wait)) {
327 wake_up_interruptible(&ctx->cq_wait);
328 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
329 }
330 }
331}
332
333static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
334{
335 struct io_cq_ring *ring = ctx->cq_ring;
336 unsigned tail;
337
338 tail = ctx->cached_cq_tail;
339 /* See comment at the top of the file */
340 smp_rmb();
341 if (tail + 1 == READ_ONCE(ring->r.head))
342 return NULL;
343
344 ctx->cached_cq_tail++;
345 return &ring->cqes[tail & ctx->cq_mask];
346}
347
348static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
349 long res, unsigned ev_flags)
350{
351 struct io_uring_cqe *cqe;
352
353 /*
354 * If we can't get a cq entry, userspace overflowed the
355 * submission (by quite a lot). Increment the overflow count in
356 * the ring.
357 */
358 cqe = io_get_cqring(ctx);
359 if (cqe) {
360 WRITE_ONCE(cqe->user_data, ki_user_data);
361 WRITE_ONCE(cqe->res, res);
362 WRITE_ONCE(cqe->flags, ev_flags);
363 } else {
364 unsigned overflow = READ_ONCE(ctx->cq_ring->overflow);
365
366 WRITE_ONCE(ctx->cq_ring->overflow, overflow + 1);
367 }
368}
369
8c838788
JA
370static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
371{
372 if (waitqueue_active(&ctx->wait))
373 wake_up(&ctx->wait);
374 if (waitqueue_active(&ctx->sqo_wait))
375 wake_up(&ctx->sqo_wait);
376}
377
378static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
2b188cc1
JA
379 long res, unsigned ev_flags)
380{
381 unsigned long flags;
382
383 spin_lock_irqsave(&ctx->completion_lock, flags);
8c838788 384 io_cqring_fill_event(ctx, user_data, res, ev_flags);
2b188cc1
JA
385 io_commit_cqring(ctx);
386 spin_unlock_irqrestore(&ctx->completion_lock, flags);
387
8c838788 388 io_cqring_ev_posted(ctx);
2b188cc1
JA
389}
390
391static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
392{
393 percpu_ref_put_many(&ctx->refs, refs);
394
395 if (waitqueue_active(&ctx->wait))
396 wake_up(&ctx->wait);
397}
398
2579f913
JA
399static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
400 struct io_submit_state *state)
2b188cc1 401{
fd6fab2c 402 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2b188cc1
JA
403 struct io_kiocb *req;
404
405 if (!percpu_ref_tryget(&ctx->refs))
406 return NULL;
407
2579f913 408 if (!state) {
fd6fab2c 409 req = kmem_cache_alloc(req_cachep, gfp);
2579f913
JA
410 if (unlikely(!req))
411 goto out;
412 } else if (!state->free_reqs) {
413 size_t sz;
414 int ret;
415
416 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
fd6fab2c
JA
417 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
418
419 /*
420 * Bulk alloc is all-or-nothing. If we fail to get a batch,
421 * retry single alloc to be on the safe side.
422 */
423 if (unlikely(ret <= 0)) {
424 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
425 if (!state->reqs[0])
426 goto out;
427 ret = 1;
428 }
2579f913
JA
429 state->free_reqs = ret - 1;
430 state->cur_req = 1;
431 req = state->reqs[0];
432 } else {
433 req = state->reqs[state->cur_req];
434 state->free_reqs--;
435 state->cur_req++;
2b188cc1
JA
436 }
437
2579f913
JA
438 req->ctx = ctx;
439 req->flags = 0;
e65ef56d
JA
440 /* one is dropped after submission, the other at completion */
441 refcount_set(&req->refs, 2);
2579f913
JA
442 return req;
443out:
2b188cc1
JA
444 io_ring_drop_ctx_refs(ctx, 1);
445 return NULL;
446}
447
def596e9
JA
448static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
449{
450 if (*nr) {
451 kmem_cache_free_bulk(req_cachep, *nr, reqs);
452 io_ring_drop_ctx_refs(ctx, *nr);
453 *nr = 0;
454 }
455}
456
2b188cc1
JA
457static void io_free_req(struct io_kiocb *req)
458{
09bb8394
JA
459 if (req->file && !(req->flags & REQ_F_FIXED_FILE))
460 fput(req->file);
e65ef56d
JA
461 io_ring_drop_ctx_refs(req->ctx, 1);
462 kmem_cache_free(req_cachep, req);
463}
464
465static void io_put_req(struct io_kiocb *req)
466{
467 if (refcount_dec_and_test(&req->refs))
468 io_free_req(req);
2b188cc1
JA
469}
470
def596e9
JA
471/*
472 * Find and free completed poll iocbs
473 */
474static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
475 struct list_head *done)
476{
477 void *reqs[IO_IOPOLL_BATCH];
478 struct io_kiocb *req;
09bb8394 479 int to_free;
def596e9 480
09bb8394 481 to_free = 0;
def596e9
JA
482 while (!list_empty(done)) {
483 req = list_first_entry(done, struct io_kiocb, list);
484 list_del(&req->list);
485
486 io_cqring_fill_event(ctx, req->user_data, req->error, 0);
def596e9
JA
487 (*nr_events)++;
488
09bb8394
JA
489 if (refcount_dec_and_test(&req->refs)) {
490 /* If we're not using fixed files, we have to pair the
491 * completion part with the file put. Use regular
492 * completions for those, only batch free for fixed
493 * file.
494 */
495 if (req->flags & REQ_F_FIXED_FILE) {
496 reqs[to_free++] = req;
497 if (to_free == ARRAY_SIZE(reqs))
498 io_free_req_many(ctx, reqs, &to_free);
6b06314c 499 } else {
09bb8394 500 io_free_req(req);
6b06314c 501 }
9a56a232 502 }
def596e9 503 }
def596e9 504
09bb8394 505 io_commit_cqring(ctx);
def596e9
JA
506 io_free_req_many(ctx, reqs, &to_free);
507}
508
509static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
510 long min)
511{
512 struct io_kiocb *req, *tmp;
513 LIST_HEAD(done);
514 bool spin;
515 int ret;
516
517 /*
518 * Only spin for completions if we don't have multiple devices hanging
519 * off our complete list, and we're under the requested amount.
520 */
521 spin = !ctx->poll_multi_file && *nr_events < min;
522
523 ret = 0;
524 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
525 struct kiocb *kiocb = &req->rw;
526
527 /*
528 * Move completed entries to our local list. If we find a
529 * request that requires polling, break out and complete
530 * the done list first, if we have entries there.
531 */
532 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
533 list_move_tail(&req->list, &done);
534 continue;
535 }
536 if (!list_empty(&done))
537 break;
538
539 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
540 if (ret < 0)
541 break;
542
543 if (ret && spin)
544 spin = false;
545 ret = 0;
546 }
547
548 if (!list_empty(&done))
549 io_iopoll_complete(ctx, nr_events, &done);
550
551 return ret;
552}
553
554/*
555 * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a
556 * non-spinning poll check - we'll still enter the driver poll loop, but only
557 * as a non-spinning completion check.
558 */
559static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
560 long min)
561{
562 while (!list_empty(&ctx->poll_list)) {
563 int ret;
564
565 ret = io_do_iopoll(ctx, nr_events, min);
566 if (ret < 0)
567 return ret;
568 if (!min || *nr_events >= min)
569 return 0;
570 }
571
572 return 1;
573}
574
575/*
576 * We can't just wait for polled events to come to us, we have to actively
577 * find and complete them.
578 */
579static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
580{
581 if (!(ctx->flags & IORING_SETUP_IOPOLL))
582 return;
583
584 mutex_lock(&ctx->uring_lock);
585 while (!list_empty(&ctx->poll_list)) {
586 unsigned int nr_events = 0;
587
588 io_iopoll_getevents(ctx, &nr_events, 1);
589 }
590 mutex_unlock(&ctx->uring_lock);
591}
592
593static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
594 long min)
595{
596 int ret = 0;
597
598 do {
599 int tmin = 0;
600
601 if (*nr_events < min)
602 tmin = min - *nr_events;
603
604 ret = io_iopoll_getevents(ctx, nr_events, tmin);
605 if (ret <= 0)
606 break;
607 ret = 0;
608 } while (min && !*nr_events && !need_resched());
609
610 return ret;
611}
612
2b188cc1
JA
613static void kiocb_end_write(struct kiocb *kiocb)
614{
615 if (kiocb->ki_flags & IOCB_WRITE) {
616 struct inode *inode = file_inode(kiocb->ki_filp);
617
618 /*
619 * Tell lockdep we inherited freeze protection from submission
620 * thread.
621 */
622 if (S_ISREG(inode->i_mode))
623 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
624 file_end_write(kiocb->ki_filp);
625 }
626}
627
628static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
629{
630 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
631
632 kiocb_end_write(kiocb);
633
2b188cc1 634 io_cqring_add_event(req->ctx, req->user_data, res, 0);
e65ef56d 635 io_put_req(req);
2b188cc1
JA
636}
637
def596e9
JA
638static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
639{
640 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
641
642 kiocb_end_write(kiocb);
643
644 req->error = res;
645 if (res != -EAGAIN)
646 req->flags |= REQ_F_IOPOLL_COMPLETED;
647}
648
649/*
650 * After the iocb has been issued, it's safe to be found on the poll list.
651 * Adding the kiocb to the list AFTER submission ensures that we don't
652 * find it from a io_iopoll_getevents() thread before the issuer is done
653 * accessing the kiocb cookie.
654 */
655static void io_iopoll_req_issued(struct io_kiocb *req)
656{
657 struct io_ring_ctx *ctx = req->ctx;
658
659 /*
660 * Track whether we have multiple files in our lists. This will impact
661 * how we do polling eventually, not spinning if we're on potentially
662 * different devices.
663 */
664 if (list_empty(&ctx->poll_list)) {
665 ctx->poll_multi_file = false;
666 } else if (!ctx->poll_multi_file) {
667 struct io_kiocb *list_req;
668
669 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
670 list);
671 if (list_req->rw.ki_filp != req->rw.ki_filp)
672 ctx->poll_multi_file = true;
673 }
674
675 /*
676 * For fast devices, IO may have already completed. If it has, add
677 * it to the front so we find it first.
678 */
679 if (req->flags & REQ_F_IOPOLL_COMPLETED)
680 list_add(&req->list, &ctx->poll_list);
681 else
682 list_add_tail(&req->list, &ctx->poll_list);
683}
684
9a56a232
JA
685static void io_file_put(struct io_submit_state *state, struct file *file)
686{
687 if (!state) {
688 fput(file);
689 } else if (state->file) {
690 int diff = state->has_refs - state->used_refs;
691
692 if (diff)
693 fput_many(state->file, diff);
694 state->file = NULL;
695 }
696}
697
698/*
699 * Get as many references to a file as we have IOs left in this submission,
700 * assuming most submissions are for one file, or at least that each file
701 * has more than one submission.
702 */
703static struct file *io_file_get(struct io_submit_state *state, int fd)
704{
705 if (!state)
706 return fget(fd);
707
708 if (state->file) {
709 if (state->fd == fd) {
710 state->used_refs++;
711 state->ios_left--;
712 return state->file;
713 }
714 io_file_put(state, NULL);
715 }
716 state->file = fget_many(fd, state->ios_left);
717 if (!state->file)
718 return NULL;
719
720 state->fd = fd;
721 state->has_refs = state->ios_left;
722 state->used_refs = 1;
723 state->ios_left--;
724 return state->file;
725}
726
2b188cc1
JA
727/*
728 * If we tracked the file through the SCM inflight mechanism, we could support
729 * any file. For now, just ensure that anything potentially problematic is done
730 * inline.
731 */
732static bool io_file_supports_async(struct file *file)
733{
734 umode_t mode = file_inode(file)->i_mode;
735
736 if (S_ISBLK(mode) || S_ISCHR(mode))
737 return true;
738 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
739 return true;
740
741 return false;
742}
743
6c271ce2 744static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
9a56a232 745 bool force_nonblock, struct io_submit_state *state)
2b188cc1 746{
6c271ce2 747 const struct io_uring_sqe *sqe = s->sqe;
def596e9 748 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 749 struct kiocb *kiocb = &req->rw;
09bb8394
JA
750 unsigned ioprio;
751 int ret;
2b188cc1 752
09bb8394
JA
753 if (!req->file)
754 return -EBADF;
2b188cc1 755 /* For -EAGAIN retry, everything is already prepped */
d530a402 756 if (req->flags & REQ_F_PREPPED)
2b188cc1
JA
757 return 0;
758
09bb8394
JA
759 if (force_nonblock && !io_file_supports_async(req->file))
760 force_nonblock = false;
6b06314c 761
2b188cc1
JA
762 kiocb->ki_pos = READ_ONCE(sqe->off);
763 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
764 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
765
766 ioprio = READ_ONCE(sqe->ioprio);
767 if (ioprio) {
768 ret = ioprio_check_cap(ioprio);
769 if (ret)
09bb8394 770 return ret;
2b188cc1
JA
771
772 kiocb->ki_ioprio = ioprio;
773 } else
774 kiocb->ki_ioprio = get_current_ioprio();
775
776 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
777 if (unlikely(ret))
09bb8394 778 return ret;
2b188cc1
JA
779 if (force_nonblock) {
780 kiocb->ki_flags |= IOCB_NOWAIT;
781 req->flags |= REQ_F_FORCE_NONBLOCK;
782 }
def596e9 783 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
784 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
785 !kiocb->ki_filp->f_op->iopoll)
09bb8394 786 return -EOPNOTSUPP;
2b188cc1 787
def596e9
JA
788 req->error = 0;
789 kiocb->ki_flags |= IOCB_HIPRI;
790 kiocb->ki_complete = io_complete_rw_iopoll;
791 } else {
09bb8394
JA
792 if (kiocb->ki_flags & IOCB_HIPRI)
793 return -EINVAL;
def596e9
JA
794 kiocb->ki_complete = io_complete_rw;
795 }
d530a402 796 req->flags |= REQ_F_PREPPED;
2b188cc1 797 return 0;
2b188cc1
JA
798}
799
800static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
801{
802 switch (ret) {
803 case -EIOCBQUEUED:
804 break;
805 case -ERESTARTSYS:
806 case -ERESTARTNOINTR:
807 case -ERESTARTNOHAND:
808 case -ERESTART_RESTARTBLOCK:
809 /*
810 * We can't just restart the syscall, since previously
811 * submitted sqes may already be in progress. Just fail this
812 * IO with EINTR.
813 */
814 ret = -EINTR;
815 /* fall through */
816 default:
817 kiocb->ki_complete(kiocb, ret, 0);
818 }
819}
820
edafccee
JA
821static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
822 const struct io_uring_sqe *sqe,
823 struct iov_iter *iter)
824{
825 size_t len = READ_ONCE(sqe->len);
826 struct io_mapped_ubuf *imu;
827 unsigned index, buf_index;
828 size_t offset;
829 u64 buf_addr;
830
831 /* attempt to use fixed buffers without having provided iovecs */
832 if (unlikely(!ctx->user_bufs))
833 return -EFAULT;
834
835 buf_index = READ_ONCE(sqe->buf_index);
836 if (unlikely(buf_index >= ctx->nr_user_bufs))
837 return -EFAULT;
838
839 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
840 imu = &ctx->user_bufs[index];
841 buf_addr = READ_ONCE(sqe->addr);
842
843 /* overflow */
844 if (buf_addr + len < buf_addr)
845 return -EFAULT;
846 /* not inside the mapped region */
847 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
848 return -EFAULT;
849
850 /*
851 * May not be a start of buffer, set size appropriately
852 * and advance us to the beginning.
853 */
854 offset = buf_addr - imu->ubuf;
855 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
856 if (offset)
857 iov_iter_advance(iter, offset);
875f1d07
JA
858
859 /* don't drop a reference to these pages */
860 iter->type |= ITER_BVEC_FLAG_NO_REF;
edafccee
JA
861 return 0;
862}
863
2b188cc1
JA
864static int io_import_iovec(struct io_ring_ctx *ctx, int rw,
865 const struct sqe_submit *s, struct iovec **iovec,
866 struct iov_iter *iter)
867{
868 const struct io_uring_sqe *sqe = s->sqe;
869 void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
870 size_t sqe_len = READ_ONCE(sqe->len);
edafccee
JA
871 u8 opcode;
872
873 /*
874 * We're reading ->opcode for the second time, but the first read
875 * doesn't care whether it's _FIXED or not, so it doesn't matter
876 * whether ->opcode changes concurrently. The first read does care
877 * about whether it is a READ or a WRITE, so we don't trust this read
878 * for that purpose and instead let the caller pass in the read/write
879 * flag.
880 */
881 opcode = READ_ONCE(sqe->opcode);
882 if (opcode == IORING_OP_READ_FIXED ||
883 opcode == IORING_OP_WRITE_FIXED) {
e0c5c576 884 int ret = io_import_fixed(ctx, rw, sqe, iter);
edafccee
JA
885 *iovec = NULL;
886 return ret;
887 }
2b188cc1
JA
888
889 if (!s->has_user)
890 return -EFAULT;
891
892#ifdef CONFIG_COMPAT
893 if (ctx->compat)
894 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
895 iovec, iter);
896#endif
897
898 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
899}
900
31b51510
JA
901/*
902 * Make a note of the last file/offset/direction we punted to async
903 * context. We'll use this information to see if we can piggy back a
904 * sequential request onto the previous one, if it's still hasn't been
905 * completed by the async worker.
906 */
907static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
908{
909 struct async_list *async_list = &req->ctx->pending_async[rw];
910 struct kiocb *kiocb = &req->rw;
911 struct file *filp = kiocb->ki_filp;
912 off_t io_end = kiocb->ki_pos + len;
913
914 if (filp == async_list->file && kiocb->ki_pos == async_list->io_end) {
915 unsigned long max_pages;
916
917 /* Use 8x RA size as a decent limiter for both reads/writes */
918 max_pages = filp->f_ra.ra_pages;
919 if (!max_pages)
b5420237 920 max_pages = VM_READAHEAD_PAGES;
31b51510
JA
921 max_pages *= 8;
922
923 /* If max pages are exceeded, reset the state */
924 len >>= PAGE_SHIFT;
925 if (async_list->io_pages + len <= max_pages) {
926 req->flags |= REQ_F_SEQ_PREV;
927 async_list->io_pages += len;
928 } else {
929 io_end = 0;
930 async_list->io_pages = 0;
931 }
932 }
933
934 /* New file? Reset state. */
935 if (async_list->file != filp) {
936 async_list->io_pages = 0;
937 async_list->file = filp;
938 }
939 async_list->io_end = io_end;
940}
941
e0c5c576
JA
942static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
943 bool force_nonblock, struct io_submit_state *state)
2b188cc1
JA
944{
945 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
946 struct kiocb *kiocb = &req->rw;
947 struct iov_iter iter;
948 struct file *file;
31b51510 949 size_t iov_count;
e0c5c576 950 int ret;
2b188cc1 951
6c271ce2 952 ret = io_prep_rw(req, s, force_nonblock, state);
2b188cc1
JA
953 if (ret)
954 return ret;
955 file = kiocb->ki_filp;
956
2b188cc1 957 if (unlikely(!(file->f_mode & FMODE_READ)))
09bb8394 958 return -EBADF;
2b188cc1 959 if (unlikely(!file->f_op->read_iter))
09bb8394 960 return -EINVAL;
2b188cc1
JA
961
962 ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
963 if (ret)
09bb8394 964 return ret;
2b188cc1 965
31b51510
JA
966 iov_count = iov_iter_count(&iter);
967 ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
2b188cc1
JA
968 if (!ret) {
969 ssize_t ret2;
970
971 /* Catch -EAGAIN return for forced non-blocking submission */
972 ret2 = call_read_iter(file, kiocb, &iter);
31b51510 973 if (!force_nonblock || ret2 != -EAGAIN) {
2b188cc1 974 io_rw_done(kiocb, ret2);
31b51510
JA
975 } else {
976 /*
977 * If ->needs_lock is true, we're already in async
978 * context.
979 */
980 if (!s->needs_lock)
981 io_async_list_note(READ, req, iov_count);
2b188cc1 982 ret = -EAGAIN;
31b51510 983 }
2b188cc1
JA
984 }
985 kfree(iovec);
2b188cc1
JA
986 return ret;
987}
988
e0c5c576
JA
989static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
990 bool force_nonblock, struct io_submit_state *state)
2b188cc1
JA
991{
992 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
993 struct kiocb *kiocb = &req->rw;
994 struct iov_iter iter;
995 struct file *file;
31b51510 996 size_t iov_count;
e0c5c576 997 int ret;
2b188cc1 998
6c271ce2 999 ret = io_prep_rw(req, s, force_nonblock, state);
2b188cc1
JA
1000 if (ret)
1001 return ret;
2b188cc1 1002
2b188cc1
JA
1003 file = kiocb->ki_filp;
1004 if (unlikely(!(file->f_mode & FMODE_WRITE)))
09bb8394 1005 return -EBADF;
2b188cc1 1006 if (unlikely(!file->f_op->write_iter))
09bb8394 1007 return -EINVAL;
2b188cc1
JA
1008
1009 ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
1010 if (ret)
09bb8394 1011 return ret;
2b188cc1 1012
31b51510
JA
1013 iov_count = iov_iter_count(&iter);
1014
1015 ret = -EAGAIN;
1016 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) {
1017 /* If ->needs_lock is true, we're already in async context. */
1018 if (!s->needs_lock)
1019 io_async_list_note(WRITE, req, iov_count);
1020 goto out_free;
1021 }
1022
1023 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
2b188cc1 1024 if (!ret) {
9bf7933f
RP
1025 ssize_t ret2;
1026
2b188cc1
JA
1027 /*
1028 * Open-code file_start_write here to grab freeze protection,
1029 * which will be released by another thread in
1030 * io_complete_rw(). Fool lockdep by telling it the lock got
1031 * released so that it doesn't complain about the held lock when
1032 * we return to userspace.
1033 */
1034 if (S_ISREG(file_inode(file)->i_mode)) {
1035 __sb_start_write(file_inode(file)->i_sb,
1036 SB_FREEZE_WRITE, true);
1037 __sb_writers_release(file_inode(file)->i_sb,
1038 SB_FREEZE_WRITE);
1039 }
1040 kiocb->ki_flags |= IOCB_WRITE;
9bf7933f
RP
1041
1042 ret2 = call_write_iter(file, kiocb, &iter);
1043 if (!force_nonblock || ret2 != -EAGAIN) {
1044 io_rw_done(kiocb, ret2);
1045 } else {
1046 /*
1047 * If ->needs_lock is true, we're already in async
1048 * context.
1049 */
1050 if (!s->needs_lock)
1051 io_async_list_note(WRITE, req, iov_count);
1052 ret = -EAGAIN;
1053 }
2b188cc1 1054 }
31b51510 1055out_free:
2b188cc1 1056 kfree(iovec);
2b188cc1
JA
1057 return ret;
1058}
1059
1060/*
1061 * IORING_OP_NOP just posts a completion event, nothing else.
1062 */
1063static int io_nop(struct io_kiocb *req, u64 user_data)
1064{
1065 struct io_ring_ctx *ctx = req->ctx;
1066 long err = 0;
1067
def596e9
JA
1068 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1069 return -EINVAL;
1070
2b188cc1 1071 io_cqring_add_event(ctx, user_data, err, 0);
e65ef56d 1072 io_put_req(req);
2b188cc1
JA
1073 return 0;
1074}
1075
c992fe29
CH
1076static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1077{
6b06314c 1078 struct io_ring_ctx *ctx = req->ctx;
c992fe29 1079
09bb8394
JA
1080 if (!req->file)
1081 return -EBADF;
d530a402
JA
1082 /* Prep already done (EAGAIN retry) */
1083 if (req->flags & REQ_F_PREPPED)
c992fe29
CH
1084 return 0;
1085
6b06314c 1086 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 1087 return -EINVAL;
edafccee 1088 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
1089 return -EINVAL;
1090
d530a402 1091 req->flags |= REQ_F_PREPPED;
c992fe29
CH
1092 return 0;
1093}
1094
1095static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1096 bool force_nonblock)
1097{
1098 loff_t sqe_off = READ_ONCE(sqe->off);
1099 loff_t sqe_len = READ_ONCE(sqe->len);
1100 loff_t end = sqe_off + sqe_len;
1101 unsigned fsync_flags;
1102 int ret;
1103
1104 fsync_flags = READ_ONCE(sqe->fsync_flags);
1105 if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
1106 return -EINVAL;
1107
1108 ret = io_prep_fsync(req, sqe);
1109 if (ret)
1110 return ret;
1111
1112 /* fsync always requires a blocking context */
1113 if (force_nonblock)
1114 return -EAGAIN;
1115
1116 ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
1117 end > 0 ? end : LLONG_MAX,
1118 fsync_flags & IORING_FSYNC_DATASYNC);
1119
c992fe29 1120 io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
e65ef56d 1121 io_put_req(req);
c992fe29
CH
1122 return 0;
1123}
1124
221c5eb2
JA
1125static void io_poll_remove_one(struct io_kiocb *req)
1126{
1127 struct io_poll_iocb *poll = &req->poll;
1128
1129 spin_lock(&poll->head->lock);
1130 WRITE_ONCE(poll->canceled, true);
1131 if (!list_empty(&poll->wait.entry)) {
1132 list_del_init(&poll->wait.entry);
1133 queue_work(req->ctx->sqo_wq, &req->work);
1134 }
1135 spin_unlock(&poll->head->lock);
1136
1137 list_del_init(&req->list);
1138}
1139
1140static void io_poll_remove_all(struct io_ring_ctx *ctx)
1141{
1142 struct io_kiocb *req;
1143
1144 spin_lock_irq(&ctx->completion_lock);
1145 while (!list_empty(&ctx->cancel_list)) {
1146 req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
1147 io_poll_remove_one(req);
1148 }
1149 spin_unlock_irq(&ctx->completion_lock);
1150}
1151
1152/*
1153 * Find a running poll command that matches one specified in sqe->addr,
1154 * and remove it if found.
1155 */
1156static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1157{
1158 struct io_ring_ctx *ctx = req->ctx;
1159 struct io_kiocb *poll_req, *next;
1160 int ret = -ENOENT;
1161
1162 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1163 return -EINVAL;
1164 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
1165 sqe->poll_events)
1166 return -EINVAL;
1167
1168 spin_lock_irq(&ctx->completion_lock);
1169 list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) {
1170 if (READ_ONCE(sqe->addr) == poll_req->user_data) {
1171 io_poll_remove_one(poll_req);
1172 ret = 0;
1173 break;
1174 }
1175 }
1176 spin_unlock_irq(&ctx->completion_lock);
1177
1178 io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
e65ef56d 1179 io_put_req(req);
221c5eb2
JA
1180 return 0;
1181}
1182
8c838788
JA
1183static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
1184 __poll_t mask)
221c5eb2 1185{
8c838788
JA
1186 req->poll.done = true;
1187 io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask), 0);
1188 io_commit_cqring(ctx);
221c5eb2
JA
1189}
1190
1191static void io_poll_complete_work(struct work_struct *work)
1192{
1193 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1194 struct io_poll_iocb *poll = &req->poll;
1195 struct poll_table_struct pt = { ._key = poll->events };
1196 struct io_ring_ctx *ctx = req->ctx;
1197 __poll_t mask = 0;
1198
1199 if (!READ_ONCE(poll->canceled))
1200 mask = vfs_poll(poll->file, &pt) & poll->events;
1201
1202 /*
1203 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1204 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
1205 * synchronize with them. In the cancellation case the list_del_init
1206 * itself is not actually needed, but harmless so we keep it in to
1207 * avoid further branches in the fast path.
1208 */
1209 spin_lock_irq(&ctx->completion_lock);
1210 if (!mask && !READ_ONCE(poll->canceled)) {
1211 add_wait_queue(poll->head, &poll->wait);
1212 spin_unlock_irq(&ctx->completion_lock);
1213 return;
1214 }
1215 list_del_init(&req->list);
8c838788 1216 io_poll_complete(ctx, req, mask);
221c5eb2
JA
1217 spin_unlock_irq(&ctx->completion_lock);
1218
8c838788
JA
1219 io_cqring_ev_posted(ctx);
1220 io_put_req(req);
221c5eb2
JA
1221}
1222
1223static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1224 void *key)
1225{
1226 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
1227 wait);
1228 struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
1229 struct io_ring_ctx *ctx = req->ctx;
1230 __poll_t mask = key_to_poll(key);
8c838788 1231 unsigned long flags;
221c5eb2
JA
1232
1233 /* for instances that support it check for an event match first: */
8c838788
JA
1234 if (mask && !(mask & poll->events))
1235 return 0;
221c5eb2 1236
8c838788 1237 list_del_init(&poll->wait.entry);
221c5eb2 1238
8c838788
JA
1239 if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
1240 list_del(&req->list);
1241 io_poll_complete(ctx, req, mask);
1242 spin_unlock_irqrestore(&ctx->completion_lock, flags);
221c5eb2 1243
8c838788
JA
1244 io_cqring_ev_posted(ctx);
1245 io_put_req(req);
1246 } else {
1247 queue_work(ctx->sqo_wq, &req->work);
221c5eb2
JA
1248 }
1249
221c5eb2
JA
1250 return 1;
1251}
1252
1253struct io_poll_table {
1254 struct poll_table_struct pt;
1255 struct io_kiocb *req;
1256 int error;
1257};
1258
1259static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1260 struct poll_table_struct *p)
1261{
1262 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
1263
1264 if (unlikely(pt->req->poll.head)) {
1265 pt->error = -EINVAL;
1266 return;
1267 }
1268
1269 pt->error = 0;
1270 pt->req->poll.head = head;
1271 add_wait_queue(head, &pt->req->poll.wait);
1272}
1273
1274static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1275{
1276 struct io_poll_iocb *poll = &req->poll;
1277 struct io_ring_ctx *ctx = req->ctx;
1278 struct io_poll_table ipt;
8c838788 1279 bool cancel = false;
221c5eb2
JA
1280 __poll_t mask;
1281 u16 events;
221c5eb2
JA
1282
1283 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1284 return -EINVAL;
1285 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1286 return -EINVAL;
09bb8394
JA
1287 if (!poll->file)
1288 return -EBADF;
221c5eb2
JA
1289
1290 INIT_WORK(&req->work, io_poll_complete_work);
1291 events = READ_ONCE(sqe->poll_events);
1292 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
1293
221c5eb2 1294 poll->head = NULL;
8c838788 1295 poll->done = false;
221c5eb2
JA
1296 poll->canceled = false;
1297
1298 ipt.pt._qproc = io_poll_queue_proc;
1299 ipt.pt._key = poll->events;
1300 ipt.req = req;
1301 ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1302
1303 /* initialized the list so that we can do list_empty checks */
1304 INIT_LIST_HEAD(&poll->wait.entry);
1305 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
1306
221c5eb2 1307 mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
221c5eb2
JA
1308
1309 spin_lock_irq(&ctx->completion_lock);
8c838788
JA
1310 if (likely(poll->head)) {
1311 spin_lock(&poll->head->lock);
1312 if (unlikely(list_empty(&poll->wait.entry))) {
1313 if (ipt.error)
1314 cancel = true;
1315 ipt.error = 0;
1316 mask = 0;
1317 }
1318 if (mask || ipt.error)
1319 list_del_init(&poll->wait.entry);
1320 else if (cancel)
1321 WRITE_ONCE(poll->canceled, true);
1322 else if (!poll->done) /* actually waiting for an event */
1323 list_add_tail(&req->list, &ctx->cancel_list);
1324 spin_unlock(&poll->head->lock);
1325 }
1326 if (mask) { /* no async, we'd stolen it */
1327 req->error = mangle_poll(mask);
221c5eb2 1328 ipt.error = 0;
8c838788 1329 io_poll_complete(ctx, req, mask);
221c5eb2 1330 }
221c5eb2
JA
1331 spin_unlock_irq(&ctx->completion_lock);
1332
8c838788
JA
1333 if (mask) {
1334 io_cqring_ev_posted(ctx);
e65ef56d 1335 io_put_req(req);
221c5eb2 1336 }
8c838788 1337 return ipt.error;
221c5eb2
JA
1338}
1339
2b188cc1 1340static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
9a56a232
JA
1341 const struct sqe_submit *s, bool force_nonblock,
1342 struct io_submit_state *state)
2b188cc1 1343{
e0c5c576 1344 int ret, opcode;
2b188cc1
JA
1345
1346 if (unlikely(s->index >= ctx->sq_entries))
1347 return -EINVAL;
1348 req->user_data = READ_ONCE(s->sqe->user_data);
1349
1350 opcode = READ_ONCE(s->sqe->opcode);
1351 switch (opcode) {
1352 case IORING_OP_NOP:
1353 ret = io_nop(req, req->user_data);
1354 break;
1355 case IORING_OP_READV:
edafccee
JA
1356 if (unlikely(s->sqe->buf_index))
1357 return -EINVAL;
9a56a232 1358 ret = io_read(req, s, force_nonblock, state);
2b188cc1
JA
1359 break;
1360 case IORING_OP_WRITEV:
edafccee
JA
1361 if (unlikely(s->sqe->buf_index))
1362 return -EINVAL;
1363 ret = io_write(req, s, force_nonblock, state);
1364 break;
1365 case IORING_OP_READ_FIXED:
1366 ret = io_read(req, s, force_nonblock, state);
1367 break;
1368 case IORING_OP_WRITE_FIXED:
9a56a232 1369 ret = io_write(req, s, force_nonblock, state);
2b188cc1 1370 break;
c992fe29
CH
1371 case IORING_OP_FSYNC:
1372 ret = io_fsync(req, s->sqe, force_nonblock);
1373 break;
221c5eb2
JA
1374 case IORING_OP_POLL_ADD:
1375 ret = io_poll_add(req, s->sqe);
1376 break;
1377 case IORING_OP_POLL_REMOVE:
1378 ret = io_poll_remove(req, s->sqe);
1379 break;
2b188cc1
JA
1380 default:
1381 ret = -EINVAL;
1382 break;
1383 }
1384
def596e9
JA
1385 if (ret)
1386 return ret;
1387
1388 if (ctx->flags & IORING_SETUP_IOPOLL) {
1389 if (req->error == -EAGAIN)
1390 return -EAGAIN;
1391
1392 /* workqueue context doesn't hold uring_lock, grab it now */
1393 if (s->needs_lock)
1394 mutex_lock(&ctx->uring_lock);
1395 io_iopoll_req_issued(req);
1396 if (s->needs_lock)
1397 mutex_unlock(&ctx->uring_lock);
1398 }
1399
1400 return 0;
2b188cc1
JA
1401}
1402
31b51510
JA
1403static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx,
1404 const struct io_uring_sqe *sqe)
1405{
1406 switch (sqe->opcode) {
1407 case IORING_OP_READV:
1408 case IORING_OP_READ_FIXED:
1409 return &ctx->pending_async[READ];
1410 case IORING_OP_WRITEV:
1411 case IORING_OP_WRITE_FIXED:
1412 return &ctx->pending_async[WRITE];
1413 default:
1414 return NULL;
1415 }
1416}
1417
edafccee
JA
1418static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
1419{
1420 u8 opcode = READ_ONCE(sqe->opcode);
1421
1422 return !(opcode == IORING_OP_READ_FIXED ||
1423 opcode == IORING_OP_WRITE_FIXED);
1424}
1425
2b188cc1
JA
1426static void io_sq_wq_submit_work(struct work_struct *work)
1427{
1428 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2b188cc1 1429 struct io_ring_ctx *ctx = req->ctx;
31b51510
JA
1430 struct mm_struct *cur_mm = NULL;
1431 struct async_list *async_list;
1432 LIST_HEAD(req_list);
edafccee 1433 mm_segment_t old_fs;
2b188cc1
JA
1434 int ret;
1435
31b51510
JA
1436 async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
1437restart:
1438 do {
1439 struct sqe_submit *s = &req->submit;
1440 const struct io_uring_sqe *sqe = s->sqe;
2b188cc1 1441
31b51510
JA
1442 /* Ensure we clear previously set forced non-block flag */
1443 req->flags &= ~REQ_F_FORCE_NONBLOCK;
1444 req->rw.ki_flags &= ~IOCB_NOWAIT;
1445
1446 ret = 0;
1447 if (io_sqe_needs_user(sqe) && !cur_mm) {
1448 if (!mmget_not_zero(ctx->sqo_mm)) {
1449 ret = -EFAULT;
1450 } else {
1451 cur_mm = ctx->sqo_mm;
1452 use_mm(cur_mm);
1453 old_fs = get_fs();
1454 set_fs(USER_DS);
1455 }
1456 }
1457
1458 if (!ret) {
1459 s->has_user = cur_mm != NULL;
1460 s->needs_lock = true;
1461 do {
1462 ret = __io_submit_sqe(ctx, req, s, false, NULL);
1463 /*
1464 * We can get EAGAIN for polled IO even though
1465 * we're forcing a sync submission from here,
1466 * since we can't wait for request slots on the
1467 * block side.
1468 */
1469 if (ret != -EAGAIN)
1470 break;
1471 cond_resched();
1472 } while (1);
e65ef56d
JA
1473
1474 /* drop submission reference */
1475 io_put_req(req);
31b51510
JA
1476 }
1477 if (ret) {
1478 io_cqring_add_event(ctx, sqe->user_data, ret, 0);
e65ef56d 1479 io_put_req(req);
31b51510
JA
1480 }
1481
1482 /* async context always use a copy of the sqe */
1483 kfree(sqe);
1484
1485 if (!async_list)
1486 break;
1487 if (!list_empty(&req_list)) {
1488 req = list_first_entry(&req_list, struct io_kiocb,
1489 list);
1490 list_del(&req->list);
1491 continue;
1492 }
1493 if (list_empty(&async_list->list))
1494 break;
1495
1496 req = NULL;
1497 spin_lock(&async_list->lock);
1498 if (list_empty(&async_list->list)) {
1499 spin_unlock(&async_list->lock);
1500 break;
1501 }
1502 list_splice_init(&async_list->list, &req_list);
1503 spin_unlock(&async_list->lock);
1504
1505 req = list_first_entry(&req_list, struct io_kiocb, list);
1506 list_del(&req->list);
1507 } while (req);
edafccee
JA
1508
1509 /*
31b51510
JA
1510 * Rare case of racing with a submitter. If we find the count has
1511 * dropped to zero AND we have pending work items, then restart
1512 * the processing. This is a tiny race window.
edafccee 1513 */
31b51510
JA
1514 if (async_list) {
1515 ret = atomic_dec_return(&async_list->cnt);
1516 while (!ret && !list_empty(&async_list->list)) {
1517 spin_lock(&async_list->lock);
1518 atomic_inc(&async_list->cnt);
1519 list_splice_init(&async_list->list, &req_list);
1520 spin_unlock(&async_list->lock);
1521
1522 if (!list_empty(&req_list)) {
1523 req = list_first_entry(&req_list,
1524 struct io_kiocb, list);
1525 list_del(&req->list);
1526 goto restart;
1527 }
1528 ret = atomic_dec_return(&async_list->cnt);
edafccee 1529 }
edafccee 1530 }
2b188cc1 1531
31b51510 1532 if (cur_mm) {
edafccee 1533 set_fs(old_fs);
31b51510
JA
1534 unuse_mm(cur_mm);
1535 mmput(cur_mm);
2b188cc1 1536 }
31b51510 1537}
2b188cc1 1538
31b51510
JA
1539/*
1540 * See if we can piggy back onto previously submitted work, that is still
1541 * running. We currently only allow this if the new request is sequential
1542 * to the previous one we punted.
1543 */
1544static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
1545{
1546 bool ret = false;
1547
1548 if (!list)
1549 return false;
1550 if (!(req->flags & REQ_F_SEQ_PREV))
1551 return false;
1552 if (!atomic_read(&list->cnt))
1553 return false;
1554
1555 ret = true;
1556 spin_lock(&list->lock);
1557 list_add_tail(&req->list, &list->list);
1558 if (!atomic_read(&list->cnt)) {
1559 list_del_init(&req->list);
1560 ret = false;
1561 }
1562 spin_unlock(&list->lock);
1563 return ret;
2b188cc1
JA
1564}
1565
09bb8394
JA
1566static bool io_op_needs_file(const struct io_uring_sqe *sqe)
1567{
1568 int op = READ_ONCE(sqe->opcode);
1569
1570 switch (op) {
1571 case IORING_OP_NOP:
1572 case IORING_OP_POLL_REMOVE:
1573 return false;
1574 default:
1575 return true;
1576 }
1577}
1578
1579static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
1580 struct io_submit_state *state, struct io_kiocb *req)
1581{
1582 unsigned flags;
1583 int fd;
1584
1585 flags = READ_ONCE(s->sqe->flags);
1586 fd = READ_ONCE(s->sqe->fd);
1587
1588 if (!io_op_needs_file(s->sqe)) {
1589 req->file = NULL;
1590 return 0;
1591 }
1592
1593 if (flags & IOSQE_FIXED_FILE) {
1594 if (unlikely(!ctx->user_files ||
1595 (unsigned) fd >= ctx->nr_user_files))
1596 return -EBADF;
1597 req->file = ctx->user_files[fd];
1598 req->flags |= REQ_F_FIXED_FILE;
1599 } else {
1600 if (s->needs_fixed_file)
1601 return -EBADF;
1602 req->file = io_file_get(state, fd);
1603 if (unlikely(!req->file))
1604 return -EBADF;
1605 }
1606
1607 return 0;
1608}
1609
9a56a232
JA
1610static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
1611 struct io_submit_state *state)
2b188cc1
JA
1612{
1613 struct io_kiocb *req;
e0c5c576 1614 int ret;
2b188cc1
JA
1615
1616 /* enforce forwards compatibility on users */
6b06314c 1617 if (unlikely(s->sqe->flags & ~IOSQE_FIXED_FILE))
2b188cc1
JA
1618 return -EINVAL;
1619
2579f913 1620 req = io_get_req(ctx, state);
2b188cc1
JA
1621 if (unlikely(!req))
1622 return -EAGAIN;
1623
09bb8394
JA
1624 ret = io_req_set_file(ctx, s, state, req);
1625 if (unlikely(ret))
1626 goto out;
2b188cc1 1627
9a56a232 1628 ret = __io_submit_sqe(ctx, req, s, true, state);
2b188cc1
JA
1629 if (ret == -EAGAIN) {
1630 struct io_uring_sqe *sqe_copy;
1631
1632 sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
1633 if (sqe_copy) {
31b51510
JA
1634 struct async_list *list;
1635
2b188cc1
JA
1636 memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy));
1637 s->sqe = sqe_copy;
1638
1639 memcpy(&req->submit, s, sizeof(*s));
31b51510
JA
1640 list = io_async_list_from_sqe(ctx, s->sqe);
1641 if (!io_add_to_prev_work(list, req)) {
1642 if (list)
1643 atomic_inc(&list->cnt);
1644 INIT_WORK(&req->work, io_sq_wq_submit_work);
1645 queue_work(ctx->sqo_wq, &req->work);
1646 }
e65ef56d
JA
1647
1648 /*
1649 * Queued up for async execution, worker will release
1650 * submit reference when the iocb is actually
1651 * submitted.
1652 */
1653 return 0;
2b188cc1
JA
1654 }
1655 }
e65ef56d 1656
09bb8394 1657out:
e65ef56d
JA
1658 /* drop submission reference */
1659 io_put_req(req);
1660
1661 /* and drop final reference, if we failed */
2b188cc1 1662 if (ret)
e65ef56d 1663 io_put_req(req);
2b188cc1
JA
1664
1665 return ret;
1666}
1667
9a56a232
JA
1668/*
1669 * Batched submission is done, ensure local IO is flushed out.
1670 */
1671static void io_submit_state_end(struct io_submit_state *state)
1672{
1673 blk_finish_plug(&state->plug);
1674 io_file_put(state, NULL);
2579f913
JA
1675 if (state->free_reqs)
1676 kmem_cache_free_bulk(req_cachep, state->free_reqs,
1677 &state->reqs[state->cur_req]);
9a56a232
JA
1678}
1679
1680/*
1681 * Start submission side cache.
1682 */
1683static void io_submit_state_start(struct io_submit_state *state,
1684 struct io_ring_ctx *ctx, unsigned max_ios)
1685{
1686 blk_start_plug(&state->plug);
2579f913 1687 state->free_reqs = 0;
9a56a232
JA
1688 state->file = NULL;
1689 state->ios_left = max_ios;
1690}
1691
2b188cc1
JA
1692static void io_commit_sqring(struct io_ring_ctx *ctx)
1693{
1694 struct io_sq_ring *ring = ctx->sq_ring;
1695
1696 if (ctx->cached_sq_head != READ_ONCE(ring->r.head)) {
1697 /*
1698 * Ensure any loads from the SQEs are done at this point,
1699 * since once we write the new head, the application could
1700 * write new data to them.
1701 */
1702 smp_store_release(&ring->r.head, ctx->cached_sq_head);
1703
1704 /*
1705 * write side barrier of head update, app has read side. See
1706 * comment at the top of this file
1707 */
1708 smp_wmb();
1709 }
1710}
1711
1712/*
1713 * Undo last io_get_sqring()
1714 */
1715static void io_drop_sqring(struct io_ring_ctx *ctx)
1716{
1717 ctx->cached_sq_head--;
1718}
1719
1720/*
1721 * Fetch an sqe, if one is available. Note that s->sqe will point to memory
1722 * that is mapped by userspace. This means that care needs to be taken to
1723 * ensure that reads are stable, as we cannot rely on userspace always
1724 * being a good citizen. If members of the sqe are validated and then later
1725 * used, it's important that those reads are done through READ_ONCE() to
1726 * prevent a re-load down the line.
1727 */
1728static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
1729{
1730 struct io_sq_ring *ring = ctx->sq_ring;
1731 unsigned head;
1732
1733 /*
1734 * The cached sq head (or cq tail) serves two purposes:
1735 *
1736 * 1) allows us to batch the cost of updating the user visible
1737 * head updates.
1738 * 2) allows the kernel side to track the head on its own, even
1739 * though the application is the one updating it.
1740 */
1741 head = ctx->cached_sq_head;
1742 /* See comment at the top of this file */
1743 smp_rmb();
1744 if (head == READ_ONCE(ring->r.tail))
1745 return false;
1746
1747 head = READ_ONCE(ring->array[head & ctx->sq_mask]);
1748 if (head < ctx->sq_entries) {
1749 s->index = head;
1750 s->sqe = &ctx->sq_sqes[head];
1751 ctx->cached_sq_head++;
1752 return true;
1753 }
1754
1755 /* drop invalid entries */
1756 ctx->cached_sq_head++;
1757 ring->dropped++;
1758 /* See comment at the top of this file */
1759 smp_wmb();
1760 return false;
1761}
1762
6c271ce2
JA
1763static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
1764 unsigned int nr, bool has_user, bool mm_fault)
1765{
1766 struct io_submit_state state, *statep = NULL;
1767 int ret, i, submitted = 0;
1768
1769 if (nr > IO_PLUG_THRESHOLD) {
1770 io_submit_state_start(&state, ctx, nr);
1771 statep = &state;
1772 }
1773
1774 for (i = 0; i < nr; i++) {
1775 if (unlikely(mm_fault)) {
1776 ret = -EFAULT;
1777 } else {
1778 sqes[i].has_user = has_user;
1779 sqes[i].needs_lock = true;
1780 sqes[i].needs_fixed_file = true;
1781 ret = io_submit_sqe(ctx, &sqes[i], statep);
1782 }
1783 if (!ret) {
1784 submitted++;
1785 continue;
1786 }
1787
1788 io_cqring_add_event(ctx, sqes[i].sqe->user_data, ret, 0);
1789 }
1790
1791 if (statep)
1792 io_submit_state_end(&state);
1793
1794 return submitted;
1795}
1796
1797static int io_sq_thread(void *data)
1798{
1799 struct sqe_submit sqes[IO_IOPOLL_BATCH];
1800 struct io_ring_ctx *ctx = data;
1801 struct mm_struct *cur_mm = NULL;
1802 mm_segment_t old_fs;
1803 DEFINE_WAIT(wait);
1804 unsigned inflight;
1805 unsigned long timeout;
1806
1807 old_fs = get_fs();
1808 set_fs(USER_DS);
1809
1810 timeout = inflight = 0;
1811 while (!kthread_should_stop() && !ctx->sqo_stop) {
1812 bool all_fixed, mm_fault = false;
1813 int i;
1814
1815 if (inflight) {
1816 unsigned nr_events = 0;
1817
1818 if (ctx->flags & IORING_SETUP_IOPOLL) {
1819 /*
1820 * We disallow the app entering submit/complete
1821 * with polling, but we still need to lock the
1822 * ring to prevent racing with polled issue
1823 * that got punted to a workqueue.
1824 */
1825 mutex_lock(&ctx->uring_lock);
1826 io_iopoll_check(ctx, &nr_events, 0);
1827 mutex_unlock(&ctx->uring_lock);
1828 } else {
1829 /*
1830 * Normal IO, just pretend everything completed.
1831 * We don't have to poll completions for that.
1832 */
1833 nr_events = inflight;
1834 }
1835
1836 inflight -= nr_events;
1837 if (!inflight)
1838 timeout = jiffies + ctx->sq_thread_idle;
1839 }
1840
1841 if (!io_get_sqring(ctx, &sqes[0])) {
1842 /*
1843 * We're polling. If we're within the defined idle
1844 * period, then let us spin without work before going
1845 * to sleep.
1846 */
1847 if (inflight || !time_after(jiffies, timeout)) {
1848 cpu_relax();
1849 continue;
1850 }
1851
1852 /*
1853 * Drop cur_mm before scheduling, we can't hold it for
1854 * long periods (or over schedule()). Do this before
1855 * adding ourselves to the waitqueue, as the unuse/drop
1856 * may sleep.
1857 */
1858 if (cur_mm) {
1859 unuse_mm(cur_mm);
1860 mmput(cur_mm);
1861 cur_mm = NULL;
1862 }
1863
1864 prepare_to_wait(&ctx->sqo_wait, &wait,
1865 TASK_INTERRUPTIBLE);
1866
1867 /* Tell userspace we may need a wakeup call */
1868 ctx->sq_ring->flags |= IORING_SQ_NEED_WAKEUP;
1869 smp_wmb();
1870
1871 if (!io_get_sqring(ctx, &sqes[0])) {
1872 if (kthread_should_stop()) {
1873 finish_wait(&ctx->sqo_wait, &wait);
1874 break;
1875 }
1876 if (signal_pending(current))
1877 flush_signals(current);
1878 schedule();
1879 finish_wait(&ctx->sqo_wait, &wait);
1880
1881 ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
1882 smp_wmb();
1883 continue;
1884 }
1885 finish_wait(&ctx->sqo_wait, &wait);
1886
1887 ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
1888 smp_wmb();
1889 }
1890
1891 i = 0;
1892 all_fixed = true;
1893 do {
1894 if (all_fixed && io_sqe_needs_user(sqes[i].sqe))
1895 all_fixed = false;
1896
1897 i++;
1898 if (i == ARRAY_SIZE(sqes))
1899 break;
1900 } while (io_get_sqring(ctx, &sqes[i]));
1901
1902 /* Unless all new commands are FIXED regions, grab mm */
1903 if (!all_fixed && !cur_mm) {
1904 mm_fault = !mmget_not_zero(ctx->sqo_mm);
1905 if (!mm_fault) {
1906 use_mm(ctx->sqo_mm);
1907 cur_mm = ctx->sqo_mm;
1908 }
1909 }
1910
1911 inflight += io_submit_sqes(ctx, sqes, i, cur_mm != NULL,
1912 mm_fault);
1913
1914 /* Commit SQ ring head once we've consumed all SQEs */
1915 io_commit_sqring(ctx);
1916 }
1917
1918 set_fs(old_fs);
1919 if (cur_mm) {
1920 unuse_mm(cur_mm);
1921 mmput(cur_mm);
1922 }
06058632
JA
1923
1924 if (kthread_should_park())
1925 kthread_parkme();
1926
6c271ce2
JA
1927 return 0;
1928}
1929
2b188cc1
JA
1930static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
1931{
9a56a232 1932 struct io_submit_state state, *statep = NULL;
2b188cc1 1933 int i, ret = 0, submit = 0;
2b188cc1 1934
9a56a232
JA
1935 if (to_submit > IO_PLUG_THRESHOLD) {
1936 io_submit_state_start(&state, ctx, to_submit);
1937 statep = &state;
1938 }
2b188cc1
JA
1939
1940 for (i = 0; i < to_submit; i++) {
1941 struct sqe_submit s;
1942
1943 if (!io_get_sqring(ctx, &s))
1944 break;
1945
1946 s.has_user = true;
def596e9 1947 s.needs_lock = false;
6c271ce2 1948 s.needs_fixed_file = false;
def596e9 1949
9a56a232 1950 ret = io_submit_sqe(ctx, &s, statep);
2b188cc1
JA
1951 if (ret) {
1952 io_drop_sqring(ctx);
1953 break;
1954 }
1955
1956 submit++;
1957 }
1958 io_commit_sqring(ctx);
1959
9a56a232
JA
1960 if (statep)
1961 io_submit_state_end(statep);
2b188cc1
JA
1962
1963 return submit ? submit : ret;
1964}
1965
1966static unsigned io_cqring_events(struct io_cq_ring *ring)
1967{
1968 return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
1969}
1970
1971/*
1972 * Wait until events become available, if we don't already have some. The
1973 * application must reap them itself, as they reside on the shared cq ring.
1974 */
1975static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
1976 const sigset_t __user *sig, size_t sigsz)
1977{
1978 struct io_cq_ring *ring = ctx->cq_ring;
1979 sigset_t ksigmask, sigsaved;
1980 DEFINE_WAIT(wait);
1981 int ret;
1982
1983 /* See comment at the top of this file */
1984 smp_rmb();
1985 if (io_cqring_events(ring) >= min_events)
1986 return 0;
1987
1988 if (sig) {
9e75ad5d
AB
1989#ifdef CONFIG_COMPAT
1990 if (in_compat_syscall())
1991 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
1992 &ksigmask, &sigsaved, sigsz);
1993 else
1994#endif
1995 ret = set_user_sigmask(sig, &ksigmask,
1996 &sigsaved, sigsz);
1997
2b188cc1
JA
1998 if (ret)
1999 return ret;
2000 }
2001
2002 do {
2003 prepare_to_wait(&ctx->wait, &wait, TASK_INTERRUPTIBLE);
2004
2005 ret = 0;
2006 /* See comment at the top of this file */
2007 smp_rmb();
2008 if (io_cqring_events(ring) >= min_events)
2009 break;
2010
2011 schedule();
2012
2013 ret = -EINTR;
2014 if (signal_pending(current))
2015 break;
2016 } while (1);
2017
2018 finish_wait(&ctx->wait, &wait);
2019
2020 if (sig)
2021 restore_user_sigmask(sig, &sigsaved);
2022
2023 return READ_ONCE(ring->r.head) == READ_ONCE(ring->r.tail) ? ret : 0;
2024}
2025
6b06314c
JA
2026static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
2027{
2028#if defined(CONFIG_UNIX)
2029 if (ctx->ring_sock) {
2030 struct sock *sock = ctx->ring_sock->sk;
2031 struct sk_buff *skb;
2032
2033 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
2034 kfree_skb(skb);
2035 }
2036#else
2037 int i;
2038
2039 for (i = 0; i < ctx->nr_user_files; i++)
2040 fput(ctx->user_files[i]);
2041#endif
2042}
2043
2044static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
2045{
2046 if (!ctx->user_files)
2047 return -ENXIO;
2048
2049 __io_sqe_files_unregister(ctx);
2050 kfree(ctx->user_files);
2051 ctx->user_files = NULL;
2052 ctx->nr_user_files = 0;
2053 return 0;
2054}
2055
6c271ce2
JA
2056static void io_sq_thread_stop(struct io_ring_ctx *ctx)
2057{
2058 if (ctx->sqo_thread) {
2059 ctx->sqo_stop = 1;
2060 mb();
06058632 2061 kthread_park(ctx->sqo_thread);
6c271ce2
JA
2062 kthread_stop(ctx->sqo_thread);
2063 ctx->sqo_thread = NULL;
2064 }
2065}
2066
6b06314c
JA
2067static void io_finish_async(struct io_ring_ctx *ctx)
2068{
6c271ce2
JA
2069 io_sq_thread_stop(ctx);
2070
6b06314c
JA
2071 if (ctx->sqo_wq) {
2072 destroy_workqueue(ctx->sqo_wq);
2073 ctx->sqo_wq = NULL;
2074 }
2075}
2076
2077#if defined(CONFIG_UNIX)
2078static void io_destruct_skb(struct sk_buff *skb)
2079{
2080 struct io_ring_ctx *ctx = skb->sk->sk_user_data;
2081
2082 io_finish_async(ctx);
2083 unix_destruct_scm(skb);
2084}
2085
2086/*
2087 * Ensure the UNIX gc is aware of our file set, so we are certain that
2088 * the io_uring can be safely unregistered on process exit, even if we have
2089 * loops in the file referencing.
2090 */
2091static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
2092{
2093 struct sock *sk = ctx->ring_sock->sk;
2094 struct scm_fp_list *fpl;
2095 struct sk_buff *skb;
2096 int i;
2097
2098 if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
2099 unsigned long inflight = ctx->user->unix_inflight + nr;
2100
2101 if (inflight > task_rlimit(current, RLIMIT_NOFILE))
2102 return -EMFILE;
2103 }
2104
2105 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
2106 if (!fpl)
2107 return -ENOMEM;
2108
2109 skb = alloc_skb(0, GFP_KERNEL);
2110 if (!skb) {
2111 kfree(fpl);
2112 return -ENOMEM;
2113 }
2114
2115 skb->sk = sk;
2116 skb->destructor = io_destruct_skb;
2117
2118 fpl->user = get_uid(ctx->user);
2119 for (i = 0; i < nr; i++) {
2120 fpl->fp[i] = get_file(ctx->user_files[i + offset]);
2121 unix_inflight(fpl->user, fpl->fp[i]);
2122 }
2123
2124 fpl->max = fpl->count = nr;
2125 UNIXCB(skb).fp = fpl;
2126 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
2127 skb_queue_head(&sk->sk_receive_queue, skb);
2128
2129 for (i = 0; i < nr; i++)
2130 fput(fpl->fp[i]);
2131
2132 return 0;
2133}
2134
2135/*
2136 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
2137 * causes regular reference counting to break down. We rely on the UNIX
2138 * garbage collection to take care of this problem for us.
2139 */
2140static int io_sqe_files_scm(struct io_ring_ctx *ctx)
2141{
2142 unsigned left, total;
2143 int ret = 0;
2144
2145 total = 0;
2146 left = ctx->nr_user_files;
2147 while (left) {
2148 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
2149 int ret;
2150
2151 ret = __io_sqe_files_scm(ctx, this_files, total);
2152 if (ret)
2153 break;
2154 left -= this_files;
2155 total += this_files;
2156 }
2157
2158 if (!ret)
2159 return 0;
2160
2161 while (total < ctx->nr_user_files) {
2162 fput(ctx->user_files[total]);
2163 total++;
2164 }
2165
2166 return ret;
2167}
2168#else
2169static int io_sqe_files_scm(struct io_ring_ctx *ctx)
2170{
2171 return 0;
2172}
2173#endif
2174
2175static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
2176 unsigned nr_args)
2177{
2178 __s32 __user *fds = (__s32 __user *) arg;
2179 int fd, ret = 0;
2180 unsigned i;
2181
2182 if (ctx->user_files)
2183 return -EBUSY;
2184 if (!nr_args)
2185 return -EINVAL;
2186 if (nr_args > IORING_MAX_FIXED_FILES)
2187 return -EMFILE;
2188
2189 ctx->user_files = kcalloc(nr_args, sizeof(struct file *), GFP_KERNEL);
2190 if (!ctx->user_files)
2191 return -ENOMEM;
2192
2193 for (i = 0; i < nr_args; i++) {
2194 ret = -EFAULT;
2195 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
2196 break;
2197
2198 ctx->user_files[i] = fget(fd);
2199
2200 ret = -EBADF;
2201 if (!ctx->user_files[i])
2202 break;
2203 /*
2204 * Don't allow io_uring instances to be registered. If UNIX
2205 * isn't enabled, then this causes a reference cycle and this
2206 * instance can never get freed. If UNIX is enabled we'll
2207 * handle it just fine, but there's still no point in allowing
2208 * a ring fd as it doesn't support regular read/write anyway.
2209 */
2210 if (ctx->user_files[i]->f_op == &io_uring_fops) {
2211 fput(ctx->user_files[i]);
2212 break;
2213 }
2214 ctx->nr_user_files++;
2215 ret = 0;
2216 }
2217
2218 if (ret) {
2219 for (i = 0; i < ctx->nr_user_files; i++)
2220 fput(ctx->user_files[i]);
2221
2222 kfree(ctx->user_files);
25adf50f 2223 ctx->user_files = NULL;
6b06314c
JA
2224 ctx->nr_user_files = 0;
2225 return ret;
2226 }
2227
2228 ret = io_sqe_files_scm(ctx);
2229 if (ret)
2230 io_sqe_files_unregister(ctx);
2231
2232 return ret;
2233}
2234
6c271ce2
JA
2235static int io_sq_offload_start(struct io_ring_ctx *ctx,
2236 struct io_uring_params *p)
2b188cc1
JA
2237{
2238 int ret;
2239
6c271ce2 2240 init_waitqueue_head(&ctx->sqo_wait);
2b188cc1
JA
2241 mmgrab(current->mm);
2242 ctx->sqo_mm = current->mm;
2243
6c271ce2
JA
2244 ret = -EINVAL;
2245 if (!cpu_possible(p->sq_thread_cpu))
2246 goto err;
2247
2248 if (ctx->flags & IORING_SETUP_SQPOLL) {
3ec482d1
JA
2249 ret = -EPERM;
2250 if (!capable(CAP_SYS_ADMIN))
2251 goto err;
2252
917257da
JA
2253 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
2254 if (!ctx->sq_thread_idle)
2255 ctx->sq_thread_idle = HZ;
2256
6c271ce2
JA
2257 if (p->flags & IORING_SETUP_SQ_AFF) {
2258 int cpu;
2259
2260 cpu = array_index_nospec(p->sq_thread_cpu, NR_CPUS);
917257da
JA
2261 ret = -EINVAL;
2262 if (!cpu_possible(p->sq_thread_cpu))
2263 goto err;
2264
6c271ce2
JA
2265 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
2266 ctx, cpu,
2267 "io_uring-sq");
2268 } else {
2269 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
2270 "io_uring-sq");
2271 }
2272 if (IS_ERR(ctx->sqo_thread)) {
2273 ret = PTR_ERR(ctx->sqo_thread);
2274 ctx->sqo_thread = NULL;
2275 goto err;
2276 }
2277 wake_up_process(ctx->sqo_thread);
2278 } else if (p->flags & IORING_SETUP_SQ_AFF) {
2279 /* Can't have SQ_AFF without SQPOLL */
2280 ret = -EINVAL;
2281 goto err;
2282 }
2283
2b188cc1
JA
2284 /* Do QD, or 2 * CPUS, whatever is smallest */
2285 ctx->sqo_wq = alloc_workqueue("io_ring-wq", WQ_UNBOUND | WQ_FREEZABLE,
2286 min(ctx->sq_entries - 1, 2 * num_online_cpus()));
2287 if (!ctx->sqo_wq) {
2288 ret = -ENOMEM;
2289 goto err;
2290 }
2291
2292 return 0;
2293err:
6c271ce2 2294 io_sq_thread_stop(ctx);
2b188cc1
JA
2295 mmdrop(ctx->sqo_mm);
2296 ctx->sqo_mm = NULL;
2297 return ret;
2298}
2299
2300static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
2301{
2302 atomic_long_sub(nr_pages, &user->locked_vm);
2303}
2304
2305static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
2306{
2307 unsigned long page_limit, cur_pages, new_pages;
2308
2309 /* Don't allow more pages than we can safely lock */
2310 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
2311
2312 do {
2313 cur_pages = atomic_long_read(&user->locked_vm);
2314 new_pages = cur_pages + nr_pages;
2315 if (new_pages > page_limit)
2316 return -ENOMEM;
2317 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
2318 new_pages) != cur_pages);
2319
2320 return 0;
2321}
2322
2323static void io_mem_free(void *ptr)
2324{
2325 struct page *page = virt_to_head_page(ptr);
2326
2327 if (put_page_testzero(page))
2328 free_compound_page(page);
2329}
2330
2331static void *io_mem_alloc(size_t size)
2332{
2333 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
2334 __GFP_NORETRY;
2335
2336 return (void *) __get_free_pages(gfp_flags, get_order(size));
2337}
2338
2339static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
2340{
2341 struct io_sq_ring *sq_ring;
2342 struct io_cq_ring *cq_ring;
2343 size_t bytes;
2344
2345 bytes = struct_size(sq_ring, array, sq_entries);
2346 bytes += array_size(sizeof(struct io_uring_sqe), sq_entries);
2347 bytes += struct_size(cq_ring, cqes, cq_entries);
2348
2349 return (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
2350}
2351
edafccee
JA
2352static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
2353{
2354 int i, j;
2355
2356 if (!ctx->user_bufs)
2357 return -ENXIO;
2358
2359 for (i = 0; i < ctx->nr_user_bufs; i++) {
2360 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
2361
2362 for (j = 0; j < imu->nr_bvecs; j++)
2363 put_page(imu->bvec[j].bv_page);
2364
2365 if (ctx->account_mem)
2366 io_unaccount_mem(ctx->user, imu->nr_bvecs);
2367 kfree(imu->bvec);
2368 imu->nr_bvecs = 0;
2369 }
2370
2371 kfree(ctx->user_bufs);
2372 ctx->user_bufs = NULL;
2373 ctx->nr_user_bufs = 0;
2374 return 0;
2375}
2376
2377static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
2378 void __user *arg, unsigned index)
2379{
2380 struct iovec __user *src;
2381
2382#ifdef CONFIG_COMPAT
2383 if (ctx->compat) {
2384 struct compat_iovec __user *ciovs;
2385 struct compat_iovec ciov;
2386
2387 ciovs = (struct compat_iovec __user *) arg;
2388 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
2389 return -EFAULT;
2390
2391 dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
2392 dst->iov_len = ciov.iov_len;
2393 return 0;
2394 }
2395#endif
2396 src = (struct iovec __user *) arg;
2397 if (copy_from_user(dst, &src[index], sizeof(*dst)))
2398 return -EFAULT;
2399 return 0;
2400}
2401
2402static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
2403 unsigned nr_args)
2404{
2405 struct vm_area_struct **vmas = NULL;
2406 struct page **pages = NULL;
2407 int i, j, got_pages = 0;
2408 int ret = -EINVAL;
2409
2410 if (ctx->user_bufs)
2411 return -EBUSY;
2412 if (!nr_args || nr_args > UIO_MAXIOV)
2413 return -EINVAL;
2414
2415 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
2416 GFP_KERNEL);
2417 if (!ctx->user_bufs)
2418 return -ENOMEM;
2419
2420 for (i = 0; i < nr_args; i++) {
2421 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
2422 unsigned long off, start, end, ubuf;
2423 int pret, nr_pages;
2424 struct iovec iov;
2425 size_t size;
2426
2427 ret = io_copy_iov(ctx, &iov, arg, i);
2428 if (ret)
2429 break;
2430
2431 /*
2432 * Don't impose further limits on the size and buffer
2433 * constraints here, we'll -EINVAL later when IO is
2434 * submitted if they are wrong.
2435 */
2436 ret = -EFAULT;
2437 if (!iov.iov_base || !iov.iov_len)
2438 goto err;
2439
2440 /* arbitrary limit, but we need something */
2441 if (iov.iov_len > SZ_1G)
2442 goto err;
2443
2444 ubuf = (unsigned long) iov.iov_base;
2445 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2446 start = ubuf >> PAGE_SHIFT;
2447 nr_pages = end - start;
2448
2449 if (ctx->account_mem) {
2450 ret = io_account_mem(ctx->user, nr_pages);
2451 if (ret)
2452 goto err;
2453 }
2454
2455 ret = 0;
2456 if (!pages || nr_pages > got_pages) {
2457 kfree(vmas);
2458 kfree(pages);
2459 pages = kmalloc_array(nr_pages, sizeof(struct page *),
2460 GFP_KERNEL);
2461 vmas = kmalloc_array(nr_pages,
2462 sizeof(struct vm_area_struct *),
2463 GFP_KERNEL);
2464 if (!pages || !vmas) {
2465 ret = -ENOMEM;
2466 if (ctx->account_mem)
2467 io_unaccount_mem(ctx->user, nr_pages);
2468 goto err;
2469 }
2470 got_pages = nr_pages;
2471 }
2472
2473 imu->bvec = kmalloc_array(nr_pages, sizeof(struct bio_vec),
2474 GFP_KERNEL);
2475 ret = -ENOMEM;
2476 if (!imu->bvec) {
2477 if (ctx->account_mem)
2478 io_unaccount_mem(ctx->user, nr_pages);
2479 goto err;
2480 }
2481
2482 ret = 0;
2483 down_read(&current->mm->mmap_sem);
2484 pret = get_user_pages_longterm(ubuf, nr_pages, FOLL_WRITE,
2485 pages, vmas);
2486 if (pret == nr_pages) {
2487 /* don't support file backed memory */
2488 for (j = 0; j < nr_pages; j++) {
2489 struct vm_area_struct *vma = vmas[j];
2490
2491 if (vma->vm_file &&
2492 !is_file_hugepages(vma->vm_file)) {
2493 ret = -EOPNOTSUPP;
2494 break;
2495 }
2496 }
2497 } else {
2498 ret = pret < 0 ? pret : -EFAULT;
2499 }
2500 up_read(&current->mm->mmap_sem);
2501 if (ret) {
2502 /*
2503 * if we did partial map, or found file backed vmas,
2504 * release any pages we did get
2505 */
2506 if (pret > 0) {
2507 for (j = 0; j < pret; j++)
2508 put_page(pages[j]);
2509 }
2510 if (ctx->account_mem)
2511 io_unaccount_mem(ctx->user, nr_pages);
2512 goto err;
2513 }
2514
2515 off = ubuf & ~PAGE_MASK;
2516 size = iov.iov_len;
2517 for (j = 0; j < nr_pages; j++) {
2518 size_t vec_len;
2519
2520 vec_len = min_t(size_t, size, PAGE_SIZE - off);
2521 imu->bvec[j].bv_page = pages[j];
2522 imu->bvec[j].bv_len = vec_len;
2523 imu->bvec[j].bv_offset = off;
2524 off = 0;
2525 size -= vec_len;
2526 }
2527 /* store original address for later verification */
2528 imu->ubuf = ubuf;
2529 imu->len = iov.iov_len;
2530 imu->nr_bvecs = nr_pages;
2531
2532 ctx->nr_user_bufs++;
2533 }
2534 kfree(pages);
2535 kfree(vmas);
2536 return 0;
2537err:
2538 kfree(pages);
2539 kfree(vmas);
2540 io_sqe_buffer_unregister(ctx);
2541 return ret;
2542}
2543
2b188cc1
JA
2544static void io_ring_ctx_free(struct io_ring_ctx *ctx)
2545{
6b06314c 2546 io_finish_async(ctx);
2b188cc1
JA
2547 if (ctx->sqo_mm)
2548 mmdrop(ctx->sqo_mm);
def596e9
JA
2549
2550 io_iopoll_reap_events(ctx);
edafccee 2551 io_sqe_buffer_unregister(ctx);
6b06314c 2552 io_sqe_files_unregister(ctx);
def596e9 2553
2b188cc1
JA
2554#if defined(CONFIG_UNIX)
2555 if (ctx->ring_sock)
2556 sock_release(ctx->ring_sock);
2557#endif
2558
2559 io_mem_free(ctx->sq_ring);
2560 io_mem_free(ctx->sq_sqes);
2561 io_mem_free(ctx->cq_ring);
2562
2563 percpu_ref_exit(&ctx->refs);
2564 if (ctx->account_mem)
2565 io_unaccount_mem(ctx->user,
2566 ring_pages(ctx->sq_entries, ctx->cq_entries));
2567 free_uid(ctx->user);
2568 kfree(ctx);
2569}
2570
2571static __poll_t io_uring_poll(struct file *file, poll_table *wait)
2572{
2573 struct io_ring_ctx *ctx = file->private_data;
2574 __poll_t mask = 0;
2575
2576 poll_wait(file, &ctx->cq_wait, wait);
2577 /* See comment at the top of this file */
2578 smp_rmb();
2579 if (READ_ONCE(ctx->sq_ring->r.tail) + 1 != ctx->cached_sq_head)
2580 mask |= EPOLLOUT | EPOLLWRNORM;
2581 if (READ_ONCE(ctx->cq_ring->r.head) != ctx->cached_cq_tail)
2582 mask |= EPOLLIN | EPOLLRDNORM;
2583
2584 return mask;
2585}
2586
2587static int io_uring_fasync(int fd, struct file *file, int on)
2588{
2589 struct io_ring_ctx *ctx = file->private_data;
2590
2591 return fasync_helper(fd, file, on, &ctx->cq_fasync);
2592}
2593
2594static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
2595{
2596 mutex_lock(&ctx->uring_lock);
2597 percpu_ref_kill(&ctx->refs);
2598 mutex_unlock(&ctx->uring_lock);
2599
221c5eb2 2600 io_poll_remove_all(ctx);
def596e9 2601 io_iopoll_reap_events(ctx);
2b188cc1
JA
2602 wait_for_completion(&ctx->ctx_done);
2603 io_ring_ctx_free(ctx);
2604}
2605
2606static int io_uring_release(struct inode *inode, struct file *file)
2607{
2608 struct io_ring_ctx *ctx = file->private_data;
2609
2610 file->private_data = NULL;
2611 io_ring_ctx_wait_and_kill(ctx);
2612 return 0;
2613}
2614
2615static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
2616{
2617 loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
2618 unsigned long sz = vma->vm_end - vma->vm_start;
2619 struct io_ring_ctx *ctx = file->private_data;
2620 unsigned long pfn;
2621 struct page *page;
2622 void *ptr;
2623
2624 switch (offset) {
2625 case IORING_OFF_SQ_RING:
2626 ptr = ctx->sq_ring;
2627 break;
2628 case IORING_OFF_SQES:
2629 ptr = ctx->sq_sqes;
2630 break;
2631 case IORING_OFF_CQ_RING:
2632 ptr = ctx->cq_ring;
2633 break;
2634 default:
2635 return -EINVAL;
2636 }
2637
2638 page = virt_to_head_page(ptr);
2639 if (sz > (PAGE_SIZE << compound_order(page)))
2640 return -EINVAL;
2641
2642 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
2643 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
2644}
2645
2646SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
2647 u32, min_complete, u32, flags, const sigset_t __user *, sig,
2648 size_t, sigsz)
2649{
2650 struct io_ring_ctx *ctx;
2651 long ret = -EBADF;
2652 int submitted = 0;
2653 struct fd f;
2654
6c271ce2 2655 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
2b188cc1
JA
2656 return -EINVAL;
2657
2658 f = fdget(fd);
2659 if (!f.file)
2660 return -EBADF;
2661
2662 ret = -EOPNOTSUPP;
2663 if (f.file->f_op != &io_uring_fops)
2664 goto out_fput;
2665
2666 ret = -ENXIO;
2667 ctx = f.file->private_data;
2668 if (!percpu_ref_tryget(&ctx->refs))
2669 goto out_fput;
2670
6c271ce2
JA
2671 /*
2672 * For SQ polling, the thread will do all submissions and completions.
2673 * Just return the requested submit count, and wake the thread if
2674 * we were asked to.
2675 */
2676 if (ctx->flags & IORING_SETUP_SQPOLL) {
2677 if (flags & IORING_ENTER_SQ_WAKEUP)
2678 wake_up(&ctx->sqo_wait);
2679 submitted = to_submit;
2680 goto out_ctx;
2681 }
2682
2b188cc1
JA
2683 ret = 0;
2684 if (to_submit) {
2685 to_submit = min(to_submit, ctx->sq_entries);
2686
2687 mutex_lock(&ctx->uring_lock);
2688 submitted = io_ring_submit(ctx, to_submit);
2689 mutex_unlock(&ctx->uring_lock);
2690
2691 if (submitted < 0)
2692 goto out_ctx;
2693 }
2694 if (flags & IORING_ENTER_GETEVENTS) {
def596e9
JA
2695 unsigned nr_events = 0;
2696
2b188cc1
JA
2697 min_complete = min(min_complete, ctx->cq_entries);
2698
2699 /*
2700 * The application could have included the 'to_submit' count
2701 * in how many events it wanted to wait for. If we failed to
2702 * submit the desired count, we may need to adjust the number
2703 * of events to poll/wait for.
2704 */
2705 if (submitted < to_submit)
2706 min_complete = min_t(unsigned, submitted, min_complete);
2707
def596e9
JA
2708 if (ctx->flags & IORING_SETUP_IOPOLL) {
2709 mutex_lock(&ctx->uring_lock);
2710 ret = io_iopoll_check(ctx, &nr_events, min_complete);
2711 mutex_unlock(&ctx->uring_lock);
2712 } else {
2713 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
2714 }
2b188cc1
JA
2715 }
2716
2717out_ctx:
2718 io_ring_drop_ctx_refs(ctx, 1);
2719out_fput:
2720 fdput(f);
2721 return submitted ? submitted : ret;
2722}
2723
2724static const struct file_operations io_uring_fops = {
2725 .release = io_uring_release,
2726 .mmap = io_uring_mmap,
2727 .poll = io_uring_poll,
2728 .fasync = io_uring_fasync,
2729};
2730
2731static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
2732 struct io_uring_params *p)
2733{
2734 struct io_sq_ring *sq_ring;
2735 struct io_cq_ring *cq_ring;
2736 size_t size;
2737
2738 sq_ring = io_mem_alloc(struct_size(sq_ring, array, p->sq_entries));
2739 if (!sq_ring)
2740 return -ENOMEM;
2741
2742 ctx->sq_ring = sq_ring;
2743 sq_ring->ring_mask = p->sq_entries - 1;
2744 sq_ring->ring_entries = p->sq_entries;
2745 ctx->sq_mask = sq_ring->ring_mask;
2746 ctx->sq_entries = sq_ring->ring_entries;
2747
2748 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
2749 if (size == SIZE_MAX)
2750 return -EOVERFLOW;
2751
2752 ctx->sq_sqes = io_mem_alloc(size);
2753 if (!ctx->sq_sqes) {
2754 io_mem_free(ctx->sq_ring);
2755 return -ENOMEM;
2756 }
2757
2758 cq_ring = io_mem_alloc(struct_size(cq_ring, cqes, p->cq_entries));
2759 if (!cq_ring) {
2760 io_mem_free(ctx->sq_ring);
2761 io_mem_free(ctx->sq_sqes);
2762 return -ENOMEM;
2763 }
2764
2765 ctx->cq_ring = cq_ring;
2766 cq_ring->ring_mask = p->cq_entries - 1;
2767 cq_ring->ring_entries = p->cq_entries;
2768 ctx->cq_mask = cq_ring->ring_mask;
2769 ctx->cq_entries = cq_ring->ring_entries;
2770 return 0;
2771}
2772
2773/*
2774 * Allocate an anonymous fd, this is what constitutes the application
2775 * visible backing of an io_uring instance. The application mmaps this
2776 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
2777 * we have to tie this fd to a socket for file garbage collection purposes.
2778 */
2779static int io_uring_get_fd(struct io_ring_ctx *ctx)
2780{
2781 struct file *file;
2782 int ret;
2783
2784#if defined(CONFIG_UNIX)
2785 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
2786 &ctx->ring_sock);
2787 if (ret)
2788 return ret;
2789#endif
2790
2791 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
2792 if (ret < 0)
2793 goto err;
2794
2795 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
2796 O_RDWR | O_CLOEXEC);
2797 if (IS_ERR(file)) {
2798 put_unused_fd(ret);
2799 ret = PTR_ERR(file);
2800 goto err;
2801 }
2802
2803#if defined(CONFIG_UNIX)
2804 ctx->ring_sock->file = file;
6b06314c 2805 ctx->ring_sock->sk->sk_user_data = ctx;
2b188cc1
JA
2806#endif
2807 fd_install(ret, file);
2808 return ret;
2809err:
2810#if defined(CONFIG_UNIX)
2811 sock_release(ctx->ring_sock);
2812 ctx->ring_sock = NULL;
2813#endif
2814 return ret;
2815}
2816
2817static int io_uring_create(unsigned entries, struct io_uring_params *p)
2818{
2819 struct user_struct *user = NULL;
2820 struct io_ring_ctx *ctx;
2821 bool account_mem;
2822 int ret;
2823
2824 if (!entries || entries > IORING_MAX_ENTRIES)
2825 return -EINVAL;
2826
2827 /*
2828 * Use twice as many entries for the CQ ring. It's possible for the
2829 * application to drive a higher depth than the size of the SQ ring,
2830 * since the sqes are only used at submission time. This allows for
2831 * some flexibility in overcommitting a bit.
2832 */
2833 p->sq_entries = roundup_pow_of_two(entries);
2834 p->cq_entries = 2 * p->sq_entries;
2835
2836 user = get_uid(current_user());
2837 account_mem = !capable(CAP_IPC_LOCK);
2838
2839 if (account_mem) {
2840 ret = io_account_mem(user,
2841 ring_pages(p->sq_entries, p->cq_entries));
2842 if (ret) {
2843 free_uid(user);
2844 return ret;
2845 }
2846 }
2847
2848 ctx = io_ring_ctx_alloc(p);
2849 if (!ctx) {
2850 if (account_mem)
2851 io_unaccount_mem(user, ring_pages(p->sq_entries,
2852 p->cq_entries));
2853 free_uid(user);
2854 return -ENOMEM;
2855 }
2856 ctx->compat = in_compat_syscall();
2857 ctx->account_mem = account_mem;
2858 ctx->user = user;
2859
2860 ret = io_allocate_scq_urings(ctx, p);
2861 if (ret)
2862 goto err;
2863
6c271ce2 2864 ret = io_sq_offload_start(ctx, p);
2b188cc1
JA
2865 if (ret)
2866 goto err;
2867
2868 ret = io_uring_get_fd(ctx);
2869 if (ret < 0)
2870 goto err;
2871
2872 memset(&p->sq_off, 0, sizeof(p->sq_off));
2873 p->sq_off.head = offsetof(struct io_sq_ring, r.head);
2874 p->sq_off.tail = offsetof(struct io_sq_ring, r.tail);
2875 p->sq_off.ring_mask = offsetof(struct io_sq_ring, ring_mask);
2876 p->sq_off.ring_entries = offsetof(struct io_sq_ring, ring_entries);
2877 p->sq_off.flags = offsetof(struct io_sq_ring, flags);
2878 p->sq_off.dropped = offsetof(struct io_sq_ring, dropped);
2879 p->sq_off.array = offsetof(struct io_sq_ring, array);
2880
2881 memset(&p->cq_off, 0, sizeof(p->cq_off));
2882 p->cq_off.head = offsetof(struct io_cq_ring, r.head);
2883 p->cq_off.tail = offsetof(struct io_cq_ring, r.tail);
2884 p->cq_off.ring_mask = offsetof(struct io_cq_ring, ring_mask);
2885 p->cq_off.ring_entries = offsetof(struct io_cq_ring, ring_entries);
2886 p->cq_off.overflow = offsetof(struct io_cq_ring, overflow);
2887 p->cq_off.cqes = offsetof(struct io_cq_ring, cqes);
2888 return ret;
2889err:
2890 io_ring_ctx_wait_and_kill(ctx);
2891 return ret;
2892}
2893
2894/*
2895 * Sets up an aio uring context, and returns the fd. Applications asks for a
2896 * ring size, we return the actual sq/cq ring sizes (among other things) in the
2897 * params structure passed in.
2898 */
2899static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
2900{
2901 struct io_uring_params p;
2902 long ret;
2903 int i;
2904
2905 if (copy_from_user(&p, params, sizeof(p)))
2906 return -EFAULT;
2907 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
2908 if (p.resv[i])
2909 return -EINVAL;
2910 }
2911
6c271ce2
JA
2912 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
2913 IORING_SETUP_SQ_AFF))
2b188cc1
JA
2914 return -EINVAL;
2915
2916 ret = io_uring_create(entries, &p);
2917 if (ret < 0)
2918 return ret;
2919
2920 if (copy_to_user(params, &p, sizeof(p)))
2921 return -EFAULT;
2922
2923 return ret;
2924}
2925
2926SYSCALL_DEFINE2(io_uring_setup, u32, entries,
2927 struct io_uring_params __user *, params)
2928{
2929 return io_uring_setup(entries, params);
2930}
2931
edafccee
JA
2932static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
2933 void __user *arg, unsigned nr_args)
2934{
2935 int ret;
2936
2937 percpu_ref_kill(&ctx->refs);
2938 wait_for_completion(&ctx->ctx_done);
2939
2940 switch (opcode) {
2941 case IORING_REGISTER_BUFFERS:
2942 ret = io_sqe_buffer_register(ctx, arg, nr_args);
2943 break;
2944 case IORING_UNREGISTER_BUFFERS:
2945 ret = -EINVAL;
2946 if (arg || nr_args)
2947 break;
2948 ret = io_sqe_buffer_unregister(ctx);
2949 break;
6b06314c
JA
2950 case IORING_REGISTER_FILES:
2951 ret = io_sqe_files_register(ctx, arg, nr_args);
2952 break;
2953 case IORING_UNREGISTER_FILES:
2954 ret = -EINVAL;
2955 if (arg || nr_args)
2956 break;
2957 ret = io_sqe_files_unregister(ctx);
2958 break;
edafccee
JA
2959 default:
2960 ret = -EINVAL;
2961 break;
2962 }
2963
2964 /* bring the ctx back to life */
2965 reinit_completion(&ctx->ctx_done);
2966 percpu_ref_reinit(&ctx->refs);
2967 return ret;
2968}
2969
2970SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
2971 void __user *, arg, unsigned int, nr_args)
2972{
2973 struct io_ring_ctx *ctx;
2974 long ret = -EBADF;
2975 struct fd f;
2976
2977 f = fdget(fd);
2978 if (!f.file)
2979 return -EBADF;
2980
2981 ret = -EOPNOTSUPP;
2982 if (f.file->f_op != &io_uring_fops)
2983 goto out_fput;
2984
2985 ctx = f.file->private_data;
2986
2987 mutex_lock(&ctx->uring_lock);
2988 ret = __io_uring_register(ctx, opcode, arg, nr_args);
2989 mutex_unlock(&ctx->uring_lock);
2990out_fput:
2991 fdput(f);
2992 return ret;
2993}
2994
2b188cc1
JA
2995static int __init io_uring_init(void)
2996{
2997 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
2998 return 0;
2999};
3000__initcall(io_uring_init);