io_uring: allow sparse fixed file sets
[linux-block.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
47#include <linux/refcount.h>
48#include <linux/uio.h>
49
50#include <linux/sched/signal.h>
51#include <linux/fs.h>
52#include <linux/file.h>
53#include <linux/fdtable.h>
54#include <linux/mm.h>
55#include <linux/mman.h>
56#include <linux/mmu_context.h>
57#include <linux/percpu.h>
58#include <linux/slab.h>
59#include <linux/workqueue.h>
6c271ce2 60#include <linux/kthread.h>
2b188cc1 61#include <linux/blkdev.h>
edafccee 62#include <linux/bvec.h>
2b188cc1
JA
63#include <linux/net.h>
64#include <net/sock.h>
65#include <net/af_unix.h>
6b06314c 66#include <net/scm.h>
2b188cc1
JA
67#include <linux/anon_inodes.h>
68#include <linux/sched/mm.h>
69#include <linux/uaccess.h>
70#include <linux/nospec.h>
edafccee
JA
71#include <linux/sizes.h>
72#include <linux/hugetlb.h>
2b188cc1
JA
73
74#include <uapi/linux/io_uring.h>
75
76#include "internal.h"
77
5277deaa 78#define IORING_MAX_ENTRIES 32768
6b06314c 79#define IORING_MAX_FIXED_FILES 1024
2b188cc1
JA
80
81struct io_uring {
82 u32 head ____cacheline_aligned_in_smp;
83 u32 tail ____cacheline_aligned_in_smp;
84};
85
1e84b97b 86/*
75b28aff
HV
87 * This data is shared with the application through the mmap at offsets
88 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
89 *
90 * The offsets to the member fields are published through struct
91 * io_sqring_offsets when calling io_uring_setup.
92 */
75b28aff 93struct io_rings {
1e84b97b
SB
94 /*
95 * Head and tail offsets into the ring; the offsets need to be
96 * masked to get valid indices.
97 *
75b28aff
HV
98 * The kernel controls head of the sq ring and the tail of the cq ring,
99 * and the application controls tail of the sq ring and the head of the
100 * cq ring.
1e84b97b 101 */
75b28aff 102 struct io_uring sq, cq;
1e84b97b 103 /*
75b28aff 104 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
105 * ring_entries - 1)
106 */
75b28aff
HV
107 u32 sq_ring_mask, cq_ring_mask;
108 /* Ring sizes (constant, power of 2) */
109 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
110 /*
111 * Number of invalid entries dropped by the kernel due to
112 * invalid index stored in array
113 *
114 * Written by the kernel, shouldn't be modified by the
115 * application (i.e. get number of "new events" by comparing to
116 * cached value).
117 *
118 * After a new SQ head value was read by the application this
119 * counter includes all submissions that were dropped reaching
120 * the new SQ head (and possibly more).
121 */
75b28aff 122 u32 sq_dropped;
1e84b97b
SB
123 /*
124 * Runtime flags
125 *
126 * Written by the kernel, shouldn't be modified by the
127 * application.
128 *
129 * The application needs a full memory barrier before checking
130 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
131 */
75b28aff 132 u32 sq_flags;
1e84b97b
SB
133 /*
134 * Number of completion events lost because the queue was full;
135 * this should be avoided by the application by making sure
136 * there are not more requests pending thatn there is space in
137 * the completion queue.
138 *
139 * Written by the kernel, shouldn't be modified by the
140 * application (i.e. get number of "new events" by comparing to
141 * cached value).
142 *
143 * As completion events come in out of order this counter is not
144 * ordered with any other data.
145 */
75b28aff 146 u32 cq_overflow;
1e84b97b
SB
147 /*
148 * Ring buffer of completion events.
149 *
150 * The kernel writes completion events fresh every time they are
151 * produced, so the application is allowed to modify pending
152 * entries.
153 */
75b28aff 154 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
155};
156
edafccee
JA
157struct io_mapped_ubuf {
158 u64 ubuf;
159 size_t len;
160 struct bio_vec *bvec;
161 unsigned int nr_bvecs;
162};
163
31b51510
JA
164struct async_list {
165 spinlock_t lock;
166 atomic_t cnt;
167 struct list_head list;
168
169 struct file *file;
6d5d5ac5 170 off_t io_start;
9310a7ba 171 size_t io_len;
31b51510
JA
172};
173
2b188cc1
JA
174struct io_ring_ctx {
175 struct {
176 struct percpu_ref refs;
177 } ____cacheline_aligned_in_smp;
178
179 struct {
180 unsigned int flags;
181 bool compat;
182 bool account_mem;
183
75b28aff
HV
184 /*
185 * Ring buffer of indices into array of io_uring_sqe, which is
186 * mmapped by the application using the IORING_OFF_SQES offset.
187 *
188 * This indirection could e.g. be used to assign fixed
189 * io_uring_sqe entries to operations and only submit them to
190 * the queue when needed.
191 *
192 * The kernel modifies neither the indices array nor the entries
193 * array.
194 */
195 u32 *sq_array;
2b188cc1
JA
196 unsigned cached_sq_head;
197 unsigned sq_entries;
198 unsigned sq_mask;
6c271ce2 199 unsigned sq_thread_idle;
498ccd9e 200 unsigned cached_sq_dropped;
2b188cc1 201 struct io_uring_sqe *sq_sqes;
de0617e4
JA
202
203 struct list_head defer_list;
5262f567 204 struct list_head timeout_list;
2b188cc1
JA
205 } ____cacheline_aligned_in_smp;
206
207 /* IO offload */
54a91f3b 208 struct workqueue_struct *sqo_wq[2];
6c271ce2 209 struct task_struct *sqo_thread; /* if using sq thread polling */
2b188cc1 210 struct mm_struct *sqo_mm;
6c271ce2 211 wait_queue_head_t sqo_wait;
a4c0b3de 212 struct completion sqo_thread_started;
2b188cc1
JA
213
214 struct {
2b188cc1 215 unsigned cached_cq_tail;
498ccd9e 216 atomic_t cached_cq_overflow;
2b188cc1
JA
217 unsigned cq_entries;
218 unsigned cq_mask;
219 struct wait_queue_head cq_wait;
220 struct fasync_struct *cq_fasync;
9b402849 221 struct eventfd_ctx *cq_ev_fd;
5262f567 222 atomic_t cq_timeouts;
2b188cc1
JA
223 } ____cacheline_aligned_in_smp;
224
75b28aff
HV
225 struct io_rings *rings;
226
6b06314c
JA
227 /*
228 * If used, fixed file set. Writers must ensure that ->refs is dead,
229 * readers must ensure that ->refs is alive as long as the file* is
230 * used. Only updated through io_uring_register(2).
231 */
232 struct file **user_files;
233 unsigned nr_user_files;
234
edafccee
JA
235 /* if used, fixed mapped user buffers */
236 unsigned nr_user_bufs;
237 struct io_mapped_ubuf *user_bufs;
238
2b188cc1
JA
239 struct user_struct *user;
240
241 struct completion ctx_done;
242
243 struct {
244 struct mutex uring_lock;
245 wait_queue_head_t wait;
246 } ____cacheline_aligned_in_smp;
247
248 struct {
249 spinlock_t completion_lock;
def596e9
JA
250 bool poll_multi_file;
251 /*
252 * ->poll_list is protected by the ctx->uring_lock for
253 * io_uring instances that don't use IORING_SETUP_SQPOLL.
254 * For SQPOLL, only the single threaded io_sq_thread() will
255 * manipulate the list, hence no extra locking is needed there.
256 */
257 struct list_head poll_list;
221c5eb2 258 struct list_head cancel_list;
2b188cc1
JA
259 } ____cacheline_aligned_in_smp;
260
31b51510
JA
261 struct async_list pending_async[2];
262
2b188cc1
JA
263#if defined(CONFIG_UNIX)
264 struct socket *ring_sock;
265#endif
266};
267
268struct sqe_submit {
269 const struct io_uring_sqe *sqe;
270 unsigned short index;
8776f3fa 271 u32 sequence;
2b188cc1 272 bool has_user;
def596e9 273 bool needs_lock;
6c271ce2 274 bool needs_fixed_file;
2b188cc1
JA
275};
276
09bb8394
JA
277/*
278 * First field must be the file pointer in all the
279 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
280 */
221c5eb2
JA
281struct io_poll_iocb {
282 struct file *file;
283 struct wait_queue_head *head;
284 __poll_t events;
8c838788 285 bool done;
221c5eb2
JA
286 bool canceled;
287 struct wait_queue_entry wait;
288};
289
5262f567
JA
290struct io_timeout {
291 struct file *file;
292 struct hrtimer timer;
293};
294
09bb8394
JA
295/*
296 * NOTE! Each of the iocb union members has the file pointer
297 * as the first entry in their struct definition. So you can
298 * access the file pointer through any of the sub-structs,
299 * or directly as just 'ki_filp' in this struct.
300 */
2b188cc1 301struct io_kiocb {
221c5eb2 302 union {
09bb8394 303 struct file *file;
221c5eb2
JA
304 struct kiocb rw;
305 struct io_poll_iocb poll;
5262f567 306 struct io_timeout timeout;
221c5eb2 307 };
2b188cc1
JA
308
309 struct sqe_submit submit;
310
311 struct io_ring_ctx *ctx;
312 struct list_head list;
9e645e11 313 struct list_head link_list;
2b188cc1 314 unsigned int flags;
c16361c1 315 refcount_t refs;
8449eeda 316#define REQ_F_NOWAIT 1 /* must not punt to workers */
def596e9 317#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
6b06314c 318#define REQ_F_FIXED_FILE 4 /* ctx owns file */
31b51510 319#define REQ_F_SEQ_PREV 8 /* sequential with previous */
e2033e33
SB
320#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
321#define REQ_F_IO_DRAINED 32 /* drain done */
9e645e11 322#define REQ_F_LINK 64 /* linked sqes */
f7b76ac9
ZL
323#define REQ_F_LINK_DONE 128 /* linked sqes done */
324#define REQ_F_FAIL_LINK 256 /* fail rest of links */
4fe2c963 325#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
5262f567 326#define REQ_F_TIMEOUT 1024 /* timeout request */
491381ce
JA
327#define REQ_F_ISREG 2048 /* regular file */
328#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
2b188cc1 329 u64 user_data;
9e645e11 330 u32 result;
de0617e4 331 u32 sequence;
2b188cc1
JA
332
333 struct work_struct work;
334};
335
336#define IO_PLUG_THRESHOLD 2
def596e9 337#define IO_IOPOLL_BATCH 8
2b188cc1 338
9a56a232
JA
339struct io_submit_state {
340 struct blk_plug plug;
341
2579f913
JA
342 /*
343 * io_kiocb alloc cache
344 */
345 void *reqs[IO_IOPOLL_BATCH];
346 unsigned int free_reqs;
347 unsigned int cur_req;
348
9a56a232
JA
349 /*
350 * File reference cache
351 */
352 struct file *file;
353 unsigned int fd;
354 unsigned int has_refs;
355 unsigned int used_refs;
356 unsigned int ios_left;
357};
358
de0617e4 359static void io_sq_wq_submit_work(struct work_struct *work);
5262f567
JA
360static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
361 long res);
4fe2c963 362static void __io_free_req(struct io_kiocb *req);
de0617e4 363
2b188cc1
JA
364static struct kmem_cache *req_cachep;
365
366static const struct file_operations io_uring_fops;
367
368struct sock *io_uring_get_socket(struct file *file)
369{
370#if defined(CONFIG_UNIX)
371 if (file->f_op == &io_uring_fops) {
372 struct io_ring_ctx *ctx = file->private_data;
373
374 return ctx->ring_sock->sk;
375 }
376#endif
377 return NULL;
378}
379EXPORT_SYMBOL(io_uring_get_socket);
380
381static void io_ring_ctx_ref_free(struct percpu_ref *ref)
382{
383 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
384
385 complete(&ctx->ctx_done);
386}
387
388static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
389{
390 struct io_ring_ctx *ctx;
31b51510 391 int i;
2b188cc1
JA
392
393 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
394 if (!ctx)
395 return NULL;
396
21482896
RG
397 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
398 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
2b188cc1
JA
399 kfree(ctx);
400 return NULL;
401 }
402
403 ctx->flags = p->flags;
404 init_waitqueue_head(&ctx->cq_wait);
405 init_completion(&ctx->ctx_done);
a4c0b3de 406 init_completion(&ctx->sqo_thread_started);
2b188cc1
JA
407 mutex_init(&ctx->uring_lock);
408 init_waitqueue_head(&ctx->wait);
31b51510
JA
409 for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) {
410 spin_lock_init(&ctx->pending_async[i].lock);
411 INIT_LIST_HEAD(&ctx->pending_async[i].list);
412 atomic_set(&ctx->pending_async[i].cnt, 0);
413 }
2b188cc1 414 spin_lock_init(&ctx->completion_lock);
def596e9 415 INIT_LIST_HEAD(&ctx->poll_list);
221c5eb2 416 INIT_LIST_HEAD(&ctx->cancel_list);
de0617e4 417 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 418 INIT_LIST_HEAD(&ctx->timeout_list);
2b188cc1
JA
419 return ctx;
420}
421
7adf4eaf
JA
422static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
423 struct io_kiocb *req)
424{
498ccd9e
JA
425 return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
426 + atomic_read(&ctx->cached_cq_overflow);
7adf4eaf
JA
427}
428
de0617e4
JA
429static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
430 struct io_kiocb *req)
431{
7adf4eaf 432 if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
de0617e4
JA
433 return false;
434
7adf4eaf 435 return __io_sequence_defer(ctx, req);
de0617e4
JA
436}
437
7adf4eaf 438static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
de0617e4
JA
439{
440 struct io_kiocb *req;
441
7adf4eaf
JA
442 req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
443 if (req && !io_sequence_defer(ctx, req)) {
de0617e4
JA
444 list_del_init(&req->list);
445 return req;
446 }
447
448 return NULL;
449}
450
5262f567
JA
451static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
452{
7adf4eaf
JA
453 struct io_kiocb *req;
454
455 req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
456 if (req && !__io_sequence_defer(ctx, req)) {
457 list_del_init(&req->list);
458 return req;
459 }
460
461 return NULL;
5262f567
JA
462}
463
de0617e4 464static void __io_commit_cqring(struct io_ring_ctx *ctx)
2b188cc1 465{
75b28aff 466 struct io_rings *rings = ctx->rings;
2b188cc1 467
75b28aff 468 if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) {
2b188cc1 469 /* order cqe stores with ring update */
75b28aff 470 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
2b188cc1 471
2b188cc1
JA
472 if (wq_has_sleeper(&ctx->cq_wait)) {
473 wake_up_interruptible(&ctx->cq_wait);
474 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
475 }
476 }
477}
478
18d9be1a
JA
479static inline void io_queue_async_work(struct io_ring_ctx *ctx,
480 struct io_kiocb *req)
481{
6cc47d1d 482 int rw = 0;
54a91f3b 483
6cc47d1d
JA
484 if (req->submit.sqe) {
485 switch (req->submit.sqe->opcode) {
486 case IORING_OP_WRITEV:
487 case IORING_OP_WRITE_FIXED:
488 rw = !(req->rw.ki_flags & IOCB_DIRECT);
489 break;
490 }
54a91f3b
JA
491 }
492
493 queue_work(ctx->sqo_wq[rw], &req->work);
18d9be1a
JA
494}
495
5262f567
JA
496static void io_kill_timeout(struct io_kiocb *req)
497{
498 int ret;
499
500 ret = hrtimer_try_to_cancel(&req->timeout.timer);
501 if (ret != -1) {
502 atomic_inc(&req->ctx->cq_timeouts);
503 list_del(&req->list);
504 io_cqring_fill_event(req->ctx, req->user_data, 0);
505 __io_free_req(req);
506 }
507}
508
509static void io_kill_timeouts(struct io_ring_ctx *ctx)
510{
511 struct io_kiocb *req, *tmp;
512
513 spin_lock_irq(&ctx->completion_lock);
514 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
515 io_kill_timeout(req);
516 spin_unlock_irq(&ctx->completion_lock);
517}
518
de0617e4
JA
519static void io_commit_cqring(struct io_ring_ctx *ctx)
520{
521 struct io_kiocb *req;
522
5262f567
JA
523 while ((req = io_get_timeout_req(ctx)) != NULL)
524 io_kill_timeout(req);
525
de0617e4
JA
526 __io_commit_cqring(ctx);
527
528 while ((req = io_get_deferred_req(ctx)) != NULL) {
4fe2c963
JL
529 if (req->flags & REQ_F_SHADOW_DRAIN) {
530 /* Just for drain, free it. */
531 __io_free_req(req);
532 continue;
533 }
de0617e4 534 req->flags |= REQ_F_IO_DRAINED;
18d9be1a 535 io_queue_async_work(ctx, req);
de0617e4
JA
536 }
537}
538
2b188cc1
JA
539static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
540{
75b28aff 541 struct io_rings *rings = ctx->rings;
2b188cc1
JA
542 unsigned tail;
543
544 tail = ctx->cached_cq_tail;
115e12e5
SB
545 /*
546 * writes to the cq entry need to come after reading head; the
547 * control dependency is enough as we're using WRITE_ONCE to
548 * fill the cq entry
549 */
75b28aff 550 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
2b188cc1
JA
551 return NULL;
552
553 ctx->cached_cq_tail++;
75b28aff 554 return &rings->cqes[tail & ctx->cq_mask];
2b188cc1
JA
555}
556
557static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
c71ffb67 558 long res)
2b188cc1
JA
559{
560 struct io_uring_cqe *cqe;
561
562 /*
563 * If we can't get a cq entry, userspace overflowed the
564 * submission (by quite a lot). Increment the overflow count in
565 * the ring.
566 */
567 cqe = io_get_cqring(ctx);
568 if (cqe) {
569 WRITE_ONCE(cqe->user_data, ki_user_data);
570 WRITE_ONCE(cqe->res, res);
c71ffb67 571 WRITE_ONCE(cqe->flags, 0);
2b188cc1 572 } else {
498ccd9e
JA
573 WRITE_ONCE(ctx->rings->cq_overflow,
574 atomic_inc_return(&ctx->cached_cq_overflow));
2b188cc1
JA
575 }
576}
577
8c838788
JA
578static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
579{
580 if (waitqueue_active(&ctx->wait))
581 wake_up(&ctx->wait);
582 if (waitqueue_active(&ctx->sqo_wait))
583 wake_up(&ctx->sqo_wait);
9b402849
JA
584 if (ctx->cq_ev_fd)
585 eventfd_signal(ctx->cq_ev_fd, 1);
8c838788
JA
586}
587
588static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
c71ffb67 589 long res)
2b188cc1
JA
590{
591 unsigned long flags;
592
593 spin_lock_irqsave(&ctx->completion_lock, flags);
c71ffb67 594 io_cqring_fill_event(ctx, user_data, res);
2b188cc1
JA
595 io_commit_cqring(ctx);
596 spin_unlock_irqrestore(&ctx->completion_lock, flags);
597
8c838788 598 io_cqring_ev_posted(ctx);
2b188cc1
JA
599}
600
2579f913
JA
601static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
602 struct io_submit_state *state)
2b188cc1 603{
fd6fab2c 604 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2b188cc1
JA
605 struct io_kiocb *req;
606
607 if (!percpu_ref_tryget(&ctx->refs))
608 return NULL;
609
2579f913 610 if (!state) {
fd6fab2c 611 req = kmem_cache_alloc(req_cachep, gfp);
2579f913
JA
612 if (unlikely(!req))
613 goto out;
614 } else if (!state->free_reqs) {
615 size_t sz;
616 int ret;
617
618 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
fd6fab2c
JA
619 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
620
621 /*
622 * Bulk alloc is all-or-nothing. If we fail to get a batch,
623 * retry single alloc to be on the safe side.
624 */
625 if (unlikely(ret <= 0)) {
626 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
627 if (!state->reqs[0])
628 goto out;
629 ret = 1;
630 }
2579f913
JA
631 state->free_reqs = ret - 1;
632 state->cur_req = 1;
633 req = state->reqs[0];
634 } else {
635 req = state->reqs[state->cur_req];
636 state->free_reqs--;
637 state->cur_req++;
2b188cc1
JA
638 }
639
60c112b0 640 req->file = NULL;
2579f913
JA
641 req->ctx = ctx;
642 req->flags = 0;
e65ef56d
JA
643 /* one is dropped after submission, the other at completion */
644 refcount_set(&req->refs, 2);
9e645e11 645 req->result = 0;
2579f913
JA
646 return req;
647out:
6805b32e 648 percpu_ref_put(&ctx->refs);
2b188cc1
JA
649 return NULL;
650}
651
def596e9
JA
652static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
653{
654 if (*nr) {
655 kmem_cache_free_bulk(req_cachep, *nr, reqs);
6805b32e 656 percpu_ref_put_many(&ctx->refs, *nr);
def596e9
JA
657 *nr = 0;
658 }
659}
660
9e645e11 661static void __io_free_req(struct io_kiocb *req)
2b188cc1 662{
09bb8394
JA
663 if (req->file && !(req->flags & REQ_F_FIXED_FILE))
664 fput(req->file);
6805b32e 665 percpu_ref_put(&req->ctx->refs);
e65ef56d
JA
666 kmem_cache_free(req_cachep, req);
667}
668
ba816ad6 669static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
9e645e11
JA
670{
671 struct io_kiocb *nxt;
672
673 /*
674 * The list should never be empty when we are called here. But could
675 * potentially happen if the chain is messed up, check to be on the
676 * safe side.
677 */
678 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
679 if (nxt) {
680 list_del(&nxt->list);
681 if (!list_empty(&req->link_list)) {
682 INIT_LIST_HEAD(&nxt->link_list);
683 list_splice(&req->link_list, &nxt->link_list);
684 nxt->flags |= REQ_F_LINK;
685 }
686
f7b76ac9 687 nxt->flags |= REQ_F_LINK_DONE;
ba816ad6
JA
688 /*
689 * If we're in async work, we can continue processing the chain
690 * in this context instead of having to queue up new async work.
691 */
692 if (nxtptr && current_work()) {
693 *nxtptr = nxt;
694 } else {
695 INIT_WORK(&nxt->work, io_sq_wq_submit_work);
696 io_queue_async_work(req->ctx, nxt);
697 }
9e645e11
JA
698 }
699}
700
701/*
702 * Called if REQ_F_LINK is set, and we fail the head request
703 */
704static void io_fail_links(struct io_kiocb *req)
705{
706 struct io_kiocb *link;
707
708 while (!list_empty(&req->link_list)) {
709 link = list_first_entry(&req->link_list, struct io_kiocb, list);
710 list_del(&link->list);
711
712 io_cqring_add_event(req->ctx, link->user_data, -ECANCELED);
713 __io_free_req(link);
714 }
715}
716
ba816ad6 717static void io_free_req(struct io_kiocb *req, struct io_kiocb **nxt)
9e645e11
JA
718{
719 /*
720 * If LINK is set, we have dependent requests in this chain. If we
721 * didn't fail this request, queue the first one up, moving any other
722 * dependencies to the next request. In case of failure, fail the rest
723 * of the chain.
724 */
725 if (req->flags & REQ_F_LINK) {
726 if (req->flags & REQ_F_FAIL_LINK)
727 io_fail_links(req);
728 else
ba816ad6 729 io_req_link_next(req, nxt);
9e645e11
JA
730 }
731
732 __io_free_req(req);
733}
734
ba816ad6
JA
735/*
736 * Drop reference to request, return next in chain (if there is one) if this
737 * was the last reference to this request.
738 */
739static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
e65ef56d 740{
ba816ad6
JA
741 struct io_kiocb *nxt = NULL;
742
e65ef56d 743 if (refcount_dec_and_test(&req->refs))
ba816ad6
JA
744 io_free_req(req, &nxt);
745
746 return nxt;
747}
748
749static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr)
750{
751 struct io_kiocb *nxt;
752
753 nxt = io_put_req_find_next(req);
754 if (nxt) {
755 if (nxtptr) {
756 *nxtptr = nxt;
757 } else {
758 INIT_WORK(&nxt->work, io_sq_wq_submit_work);
759 io_queue_async_work(nxt->ctx, nxt);
760 }
761 }
2b188cc1
JA
762}
763
75b28aff 764static unsigned io_cqring_events(struct io_rings *rings)
a3a0e43f
JA
765{
766 /* See comment at the top of this file */
767 smp_rmb();
75b28aff 768 return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
a3a0e43f
JA
769}
770
fb5ccc98
PB
771static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
772{
773 struct io_rings *rings = ctx->rings;
774
775 /* make sure SQ entry isn't read before tail */
776 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
777}
778
def596e9
JA
779/*
780 * Find and free completed poll iocbs
781 */
782static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
783 struct list_head *done)
784{
785 void *reqs[IO_IOPOLL_BATCH];
786 struct io_kiocb *req;
09bb8394 787 int to_free;
def596e9 788
09bb8394 789 to_free = 0;
def596e9
JA
790 while (!list_empty(done)) {
791 req = list_first_entry(done, struct io_kiocb, list);
792 list_del(&req->list);
793
9e645e11 794 io_cqring_fill_event(ctx, req->user_data, req->result);
def596e9
JA
795 (*nr_events)++;
796
09bb8394
JA
797 if (refcount_dec_and_test(&req->refs)) {
798 /* If we're not using fixed files, we have to pair the
799 * completion part with the file put. Use regular
800 * completions for those, only batch free for fixed
9e645e11 801 * file and non-linked commands.
09bb8394 802 */
9e645e11
JA
803 if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
804 REQ_F_FIXED_FILE) {
09bb8394
JA
805 reqs[to_free++] = req;
806 if (to_free == ARRAY_SIZE(reqs))
807 io_free_req_many(ctx, reqs, &to_free);
6b06314c 808 } else {
ba816ad6 809 io_free_req(req, NULL);
6b06314c 810 }
9a56a232 811 }
def596e9 812 }
def596e9 813
09bb8394 814 io_commit_cqring(ctx);
def596e9
JA
815 io_free_req_many(ctx, reqs, &to_free);
816}
817
818static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
819 long min)
820{
821 struct io_kiocb *req, *tmp;
822 LIST_HEAD(done);
823 bool spin;
824 int ret;
825
826 /*
827 * Only spin for completions if we don't have multiple devices hanging
828 * off our complete list, and we're under the requested amount.
829 */
830 spin = !ctx->poll_multi_file && *nr_events < min;
831
832 ret = 0;
833 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
834 struct kiocb *kiocb = &req->rw;
835
836 /*
837 * Move completed entries to our local list. If we find a
838 * request that requires polling, break out and complete
839 * the done list first, if we have entries there.
840 */
841 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
842 list_move_tail(&req->list, &done);
843 continue;
844 }
845 if (!list_empty(&done))
846 break;
847
848 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
849 if (ret < 0)
850 break;
851
852 if (ret && spin)
853 spin = false;
854 ret = 0;
855 }
856
857 if (!list_empty(&done))
858 io_iopoll_complete(ctx, nr_events, &done);
859
860 return ret;
861}
862
863/*
864 * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a
865 * non-spinning poll check - we'll still enter the driver poll loop, but only
866 * as a non-spinning completion check.
867 */
868static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
869 long min)
870{
08f5439f 871 while (!list_empty(&ctx->poll_list) && !need_resched()) {
def596e9
JA
872 int ret;
873
874 ret = io_do_iopoll(ctx, nr_events, min);
875 if (ret < 0)
876 return ret;
877 if (!min || *nr_events >= min)
878 return 0;
879 }
880
881 return 1;
882}
883
884/*
885 * We can't just wait for polled events to come to us, we have to actively
886 * find and complete them.
887 */
888static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
889{
890 if (!(ctx->flags & IORING_SETUP_IOPOLL))
891 return;
892
893 mutex_lock(&ctx->uring_lock);
894 while (!list_empty(&ctx->poll_list)) {
895 unsigned int nr_events = 0;
896
897 io_iopoll_getevents(ctx, &nr_events, 1);
08f5439f
JA
898
899 /*
900 * Ensure we allow local-to-the-cpu processing to take place,
901 * in this case we need to ensure that we reap all events.
902 */
903 cond_resched();
def596e9
JA
904 }
905 mutex_unlock(&ctx->uring_lock);
906}
907
2b2ed975
JA
908static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
909 long min)
def596e9 910{
2b2ed975 911 int iters = 0, ret = 0;
500f9fba 912
def596e9
JA
913 do {
914 int tmin = 0;
915
a3a0e43f
JA
916 /*
917 * Don't enter poll loop if we already have events pending.
918 * If we do, we can potentially be spinning for commands that
919 * already triggered a CQE (eg in error).
920 */
75b28aff 921 if (io_cqring_events(ctx->rings))
a3a0e43f
JA
922 break;
923
500f9fba
JA
924 /*
925 * If a submit got punted to a workqueue, we can have the
926 * application entering polling for a command before it gets
927 * issued. That app will hold the uring_lock for the duration
928 * of the poll right here, so we need to take a breather every
929 * now and then to ensure that the issue has a chance to add
930 * the poll to the issued list. Otherwise we can spin here
931 * forever, while the workqueue is stuck trying to acquire the
932 * very same mutex.
933 */
934 if (!(++iters & 7)) {
935 mutex_unlock(&ctx->uring_lock);
936 mutex_lock(&ctx->uring_lock);
937 }
938
def596e9
JA
939 if (*nr_events < min)
940 tmin = min - *nr_events;
941
942 ret = io_iopoll_getevents(ctx, nr_events, tmin);
943 if (ret <= 0)
944 break;
945 ret = 0;
946 } while (min && !*nr_events && !need_resched());
947
2b2ed975
JA
948 return ret;
949}
950
951static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
952 long min)
953{
954 int ret;
955
956 /*
957 * We disallow the app entering submit/complete with polling, but we
958 * still need to lock the ring to prevent racing with polled issue
959 * that got punted to a workqueue.
960 */
961 mutex_lock(&ctx->uring_lock);
962 ret = __io_iopoll_check(ctx, nr_events, min);
500f9fba 963 mutex_unlock(&ctx->uring_lock);
def596e9
JA
964 return ret;
965}
966
491381ce 967static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 968{
491381ce
JA
969 /*
970 * Tell lockdep we inherited freeze protection from submission
971 * thread.
972 */
973 if (req->flags & REQ_F_ISREG) {
974 struct inode *inode = file_inode(req->file);
2b188cc1 975
491381ce 976 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2b188cc1 977 }
491381ce 978 file_end_write(req->file);
2b188cc1
JA
979}
980
ba816ad6 981static void io_complete_rw_common(struct kiocb *kiocb, long res)
2b188cc1
JA
982{
983 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
984
491381ce
JA
985 if (kiocb->ki_flags & IOCB_WRITE)
986 kiocb_end_write(req);
2b188cc1 987
9e645e11
JA
988 if ((req->flags & REQ_F_LINK) && res != req->result)
989 req->flags |= REQ_F_FAIL_LINK;
c71ffb67 990 io_cqring_add_event(req->ctx, req->user_data, res);
ba816ad6
JA
991}
992
993static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
994{
995 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
996
997 io_complete_rw_common(kiocb, res);
998 io_put_req(req, NULL);
999}
1000
1001static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
1002{
1003 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1004
1005 io_complete_rw_common(kiocb, res);
1006 return io_put_req_find_next(req);
2b188cc1
JA
1007}
1008
def596e9
JA
1009static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
1010{
1011 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1012
491381ce
JA
1013 if (kiocb->ki_flags & IOCB_WRITE)
1014 kiocb_end_write(req);
def596e9 1015
9e645e11
JA
1016 if ((req->flags & REQ_F_LINK) && res != req->result)
1017 req->flags |= REQ_F_FAIL_LINK;
1018 req->result = res;
def596e9
JA
1019 if (res != -EAGAIN)
1020 req->flags |= REQ_F_IOPOLL_COMPLETED;
1021}
1022
1023/*
1024 * After the iocb has been issued, it's safe to be found on the poll list.
1025 * Adding the kiocb to the list AFTER submission ensures that we don't
1026 * find it from a io_iopoll_getevents() thread before the issuer is done
1027 * accessing the kiocb cookie.
1028 */
1029static void io_iopoll_req_issued(struct io_kiocb *req)
1030{
1031 struct io_ring_ctx *ctx = req->ctx;
1032
1033 /*
1034 * Track whether we have multiple files in our lists. This will impact
1035 * how we do polling eventually, not spinning if we're on potentially
1036 * different devices.
1037 */
1038 if (list_empty(&ctx->poll_list)) {
1039 ctx->poll_multi_file = false;
1040 } else if (!ctx->poll_multi_file) {
1041 struct io_kiocb *list_req;
1042
1043 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
1044 list);
1045 if (list_req->rw.ki_filp != req->rw.ki_filp)
1046 ctx->poll_multi_file = true;
1047 }
1048
1049 /*
1050 * For fast devices, IO may have already completed. If it has, add
1051 * it to the front so we find it first.
1052 */
1053 if (req->flags & REQ_F_IOPOLL_COMPLETED)
1054 list_add(&req->list, &ctx->poll_list);
1055 else
1056 list_add_tail(&req->list, &ctx->poll_list);
1057}
1058
3d6770fb 1059static void io_file_put(struct io_submit_state *state)
9a56a232 1060{
3d6770fb 1061 if (state->file) {
9a56a232
JA
1062 int diff = state->has_refs - state->used_refs;
1063
1064 if (diff)
1065 fput_many(state->file, diff);
1066 state->file = NULL;
1067 }
1068}
1069
1070/*
1071 * Get as many references to a file as we have IOs left in this submission,
1072 * assuming most submissions are for one file, or at least that each file
1073 * has more than one submission.
1074 */
1075static struct file *io_file_get(struct io_submit_state *state, int fd)
1076{
1077 if (!state)
1078 return fget(fd);
1079
1080 if (state->file) {
1081 if (state->fd == fd) {
1082 state->used_refs++;
1083 state->ios_left--;
1084 return state->file;
1085 }
3d6770fb 1086 io_file_put(state);
9a56a232
JA
1087 }
1088 state->file = fget_many(fd, state->ios_left);
1089 if (!state->file)
1090 return NULL;
1091
1092 state->fd = fd;
1093 state->has_refs = state->ios_left;
1094 state->used_refs = 1;
1095 state->ios_left--;
1096 return state->file;
1097}
1098
2b188cc1
JA
1099/*
1100 * If we tracked the file through the SCM inflight mechanism, we could support
1101 * any file. For now, just ensure that anything potentially problematic is done
1102 * inline.
1103 */
1104static bool io_file_supports_async(struct file *file)
1105{
1106 umode_t mode = file_inode(file)->i_mode;
1107
1108 if (S_ISBLK(mode) || S_ISCHR(mode))
1109 return true;
1110 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
1111 return true;
1112
1113 return false;
1114}
1115
6c271ce2 1116static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
8358e3a8 1117 bool force_nonblock)
2b188cc1 1118{
6c271ce2 1119 const struct io_uring_sqe *sqe = s->sqe;
def596e9 1120 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 1121 struct kiocb *kiocb = &req->rw;
09bb8394
JA
1122 unsigned ioprio;
1123 int ret;
2b188cc1 1124
09bb8394
JA
1125 if (!req->file)
1126 return -EBADF;
2b188cc1 1127
491381ce
JA
1128 if (S_ISREG(file_inode(req->file)->i_mode))
1129 req->flags |= REQ_F_ISREG;
1130
1131 /*
1132 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
1133 * we know to async punt it even if it was opened O_NONBLOCK
1134 */
1135 if (force_nonblock && !io_file_supports_async(req->file)) {
1136 req->flags |= REQ_F_MUST_PUNT;
1137 return -EAGAIN;
1138 }
6b06314c 1139
2b188cc1
JA
1140 kiocb->ki_pos = READ_ONCE(sqe->off);
1141 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
1142 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
1143
1144 ioprio = READ_ONCE(sqe->ioprio);
1145 if (ioprio) {
1146 ret = ioprio_check_cap(ioprio);
1147 if (ret)
09bb8394 1148 return ret;
2b188cc1
JA
1149
1150 kiocb->ki_ioprio = ioprio;
1151 } else
1152 kiocb->ki_ioprio = get_current_ioprio();
1153
1154 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
1155 if (unlikely(ret))
09bb8394 1156 return ret;
8449eeda
SB
1157
1158 /* don't allow async punt if RWF_NOWAIT was requested */
491381ce
JA
1159 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
1160 (req->file->f_flags & O_NONBLOCK))
8449eeda
SB
1161 req->flags |= REQ_F_NOWAIT;
1162
1163 if (force_nonblock)
2b188cc1 1164 kiocb->ki_flags |= IOCB_NOWAIT;
8449eeda 1165
def596e9 1166 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
1167 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
1168 !kiocb->ki_filp->f_op->iopoll)
09bb8394 1169 return -EOPNOTSUPP;
2b188cc1 1170
def596e9
JA
1171 kiocb->ki_flags |= IOCB_HIPRI;
1172 kiocb->ki_complete = io_complete_rw_iopoll;
1173 } else {
09bb8394
JA
1174 if (kiocb->ki_flags & IOCB_HIPRI)
1175 return -EINVAL;
def596e9
JA
1176 kiocb->ki_complete = io_complete_rw;
1177 }
2b188cc1 1178 return 0;
2b188cc1
JA
1179}
1180
1181static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
1182{
1183 switch (ret) {
1184 case -EIOCBQUEUED:
1185 break;
1186 case -ERESTARTSYS:
1187 case -ERESTARTNOINTR:
1188 case -ERESTARTNOHAND:
1189 case -ERESTART_RESTARTBLOCK:
1190 /*
1191 * We can't just restart the syscall, since previously
1192 * submitted sqes may already be in progress. Just fail this
1193 * IO with EINTR.
1194 */
1195 ret = -EINTR;
1196 /* fall through */
1197 default:
1198 kiocb->ki_complete(kiocb, ret, 0);
1199 }
1200}
1201
ba816ad6
JA
1202static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
1203 bool in_async)
1204{
1205 if (in_async && ret >= 0 && nxt && kiocb->ki_complete == io_complete_rw)
1206 *nxt = __io_complete_rw(kiocb, ret);
1207 else
1208 io_rw_done(kiocb, ret);
1209}
1210
edafccee
JA
1211static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
1212 const struct io_uring_sqe *sqe,
1213 struct iov_iter *iter)
1214{
1215 size_t len = READ_ONCE(sqe->len);
1216 struct io_mapped_ubuf *imu;
1217 unsigned index, buf_index;
1218 size_t offset;
1219 u64 buf_addr;
1220
1221 /* attempt to use fixed buffers without having provided iovecs */
1222 if (unlikely(!ctx->user_bufs))
1223 return -EFAULT;
1224
1225 buf_index = READ_ONCE(sqe->buf_index);
1226 if (unlikely(buf_index >= ctx->nr_user_bufs))
1227 return -EFAULT;
1228
1229 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
1230 imu = &ctx->user_bufs[index];
1231 buf_addr = READ_ONCE(sqe->addr);
1232
1233 /* overflow */
1234 if (buf_addr + len < buf_addr)
1235 return -EFAULT;
1236 /* not inside the mapped region */
1237 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
1238 return -EFAULT;
1239
1240 /*
1241 * May not be a start of buffer, set size appropriately
1242 * and advance us to the beginning.
1243 */
1244 offset = buf_addr - imu->ubuf;
1245 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
1246
1247 if (offset) {
1248 /*
1249 * Don't use iov_iter_advance() here, as it's really slow for
1250 * using the latter parts of a big fixed buffer - it iterates
1251 * over each segment manually. We can cheat a bit here, because
1252 * we know that:
1253 *
1254 * 1) it's a BVEC iter, we set it up
1255 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1256 * first and last bvec
1257 *
1258 * So just find our index, and adjust the iterator afterwards.
1259 * If the offset is within the first bvec (or the whole first
1260 * bvec, just use iov_iter_advance(). This makes it easier
1261 * since we can just skip the first segment, which may not
1262 * be PAGE_SIZE aligned.
1263 */
1264 const struct bio_vec *bvec = imu->bvec;
1265
1266 if (offset <= bvec->bv_len) {
1267 iov_iter_advance(iter, offset);
1268 } else {
1269 unsigned long seg_skip;
1270
1271 /* skip first vec */
1272 offset -= bvec->bv_len;
1273 seg_skip = 1 + (offset >> PAGE_SHIFT);
1274
1275 iter->bvec = bvec + seg_skip;
1276 iter->nr_segs -= seg_skip;
99c79f66 1277 iter->count -= bvec->bv_len + offset;
bd11b3a3 1278 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
1279 }
1280 }
1281
edafccee
JA
1282 return 0;
1283}
1284
87e5e6da
JA
1285static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
1286 const struct sqe_submit *s, struct iovec **iovec,
1287 struct iov_iter *iter)
2b188cc1
JA
1288{
1289 const struct io_uring_sqe *sqe = s->sqe;
1290 void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1291 size_t sqe_len = READ_ONCE(sqe->len);
edafccee
JA
1292 u8 opcode;
1293
1294 /*
1295 * We're reading ->opcode for the second time, but the first read
1296 * doesn't care whether it's _FIXED or not, so it doesn't matter
1297 * whether ->opcode changes concurrently. The first read does care
1298 * about whether it is a READ or a WRITE, so we don't trust this read
1299 * for that purpose and instead let the caller pass in the read/write
1300 * flag.
1301 */
1302 opcode = READ_ONCE(sqe->opcode);
1303 if (opcode == IORING_OP_READ_FIXED ||
1304 opcode == IORING_OP_WRITE_FIXED) {
87e5e6da 1305 ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
edafccee
JA
1306 *iovec = NULL;
1307 return ret;
1308 }
2b188cc1
JA
1309
1310 if (!s->has_user)
1311 return -EFAULT;
1312
1313#ifdef CONFIG_COMPAT
1314 if (ctx->compat)
1315 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
1316 iovec, iter);
1317#endif
1318
1319 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
1320}
1321
6d5d5ac5
JA
1322static inline bool io_should_merge(struct async_list *al, struct kiocb *kiocb)
1323{
1324 if (al->file == kiocb->ki_filp) {
1325 off_t start, end;
1326
1327 /*
1328 * Allow merging if we're anywhere in the range of the same
1329 * page. Generally this happens for sub-page reads or writes,
1330 * and it's beneficial to allow the first worker to bring the
1331 * page in and the piggy backed work can then work on the
1332 * cached page.
1333 */
1334 start = al->io_start & PAGE_MASK;
1335 end = (al->io_start + al->io_len + PAGE_SIZE - 1) & PAGE_MASK;
1336 if (kiocb->ki_pos >= start && kiocb->ki_pos <= end)
1337 return true;
1338 }
1339
1340 al->file = NULL;
1341 return false;
1342}
1343
31b51510
JA
1344/*
1345 * Make a note of the last file/offset/direction we punted to async
1346 * context. We'll use this information to see if we can piggy back a
1347 * sequential request onto the previous one, if it's still hasn't been
1348 * completed by the async worker.
1349 */
1350static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
1351{
1352 struct async_list *async_list = &req->ctx->pending_async[rw];
1353 struct kiocb *kiocb = &req->rw;
1354 struct file *filp = kiocb->ki_filp;
31b51510 1355
6d5d5ac5 1356 if (io_should_merge(async_list, kiocb)) {
9310a7ba 1357 unsigned long max_bytes;
31b51510
JA
1358
1359 /* Use 8x RA size as a decent limiter for both reads/writes */
9310a7ba
ZL
1360 max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3);
1361 if (!max_bytes)
1362 max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3);
1363
1364 /* If max len are exceeded, reset the state */
1365 if (async_list->io_len + len <= max_bytes) {
31b51510 1366 req->flags |= REQ_F_SEQ_PREV;
9310a7ba 1367 async_list->io_len += len;
31b51510 1368 } else {
6d5d5ac5 1369 async_list->file = NULL;
31b51510
JA
1370 }
1371 }
1372
1373 /* New file? Reset state. */
1374 if (async_list->file != filp) {
6d5d5ac5
JA
1375 async_list->io_start = kiocb->ki_pos;
1376 async_list->io_len = len;
31b51510
JA
1377 async_list->file = filp;
1378 }
31b51510
JA
1379}
1380
32960613
JA
1381/*
1382 * For files that don't have ->read_iter() and ->write_iter(), handle them
1383 * by looping over ->read() or ->write() manually.
1384 */
1385static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
1386 struct iov_iter *iter)
1387{
1388 ssize_t ret = 0;
1389
1390 /*
1391 * Don't support polled IO through this interface, and we can't
1392 * support non-blocking either. For the latter, this just causes
1393 * the kiocb to be handled from an async context.
1394 */
1395 if (kiocb->ki_flags & IOCB_HIPRI)
1396 return -EOPNOTSUPP;
1397 if (kiocb->ki_flags & IOCB_NOWAIT)
1398 return -EAGAIN;
1399
1400 while (iov_iter_count(iter)) {
1401 struct iovec iovec = iov_iter_iovec(iter);
1402 ssize_t nr;
1403
1404 if (rw == READ) {
1405 nr = file->f_op->read(file, iovec.iov_base,
1406 iovec.iov_len, &kiocb->ki_pos);
1407 } else {
1408 nr = file->f_op->write(file, iovec.iov_base,
1409 iovec.iov_len, &kiocb->ki_pos);
1410 }
1411
1412 if (nr < 0) {
1413 if (!ret)
1414 ret = nr;
1415 break;
1416 }
1417 ret += nr;
1418 if (nr != iovec.iov_len)
1419 break;
1420 iov_iter_advance(iter, nr);
1421 }
1422
1423 return ret;
1424}
1425
e0c5c576 1426static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
ba816ad6 1427 struct io_kiocb **nxt, bool force_nonblock)
2b188cc1
JA
1428{
1429 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1430 struct kiocb *kiocb = &req->rw;
1431 struct iov_iter iter;
1432 struct file *file;
31b51510 1433 size_t iov_count;
9d93a3f5 1434 ssize_t read_size, ret;
2b188cc1 1435
8358e3a8 1436 ret = io_prep_rw(req, s, force_nonblock);
2b188cc1
JA
1437 if (ret)
1438 return ret;
1439 file = kiocb->ki_filp;
1440
2b188cc1 1441 if (unlikely(!(file->f_mode & FMODE_READ)))
09bb8394 1442 return -EBADF;
2b188cc1
JA
1443
1444 ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
87e5e6da 1445 if (ret < 0)
09bb8394 1446 return ret;
2b188cc1 1447
9d93a3f5 1448 read_size = ret;
9e645e11
JA
1449 if (req->flags & REQ_F_LINK)
1450 req->result = read_size;
1451
31b51510
JA
1452 iov_count = iov_iter_count(&iter);
1453 ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
2b188cc1
JA
1454 if (!ret) {
1455 ssize_t ret2;
1456
32960613
JA
1457 if (file->f_op->read_iter)
1458 ret2 = call_read_iter(file, kiocb, &iter);
1459 else
1460 ret2 = loop_rw_iter(READ, file, kiocb, &iter);
1461
9d93a3f5
JA
1462 /*
1463 * In case of a short read, punt to async. This can happen
1464 * if we have data partially cached. Alternatively we can
1465 * return the short read, in which case the application will
1466 * need to issue another SQE and wait for it. That SQE will
1467 * need async punt anyway, so it's more efficient to do it
1468 * here.
1469 */
491381ce
JA
1470 if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
1471 (req->flags & REQ_F_ISREG) &&
1472 ret2 > 0 && ret2 < read_size)
9d93a3f5
JA
1473 ret2 = -EAGAIN;
1474 /* Catch -EAGAIN return for forced non-blocking submission */
31b51510 1475 if (!force_nonblock || ret2 != -EAGAIN) {
ba816ad6 1476 kiocb_done(kiocb, ret2, nxt, s->needs_lock);
31b51510
JA
1477 } else {
1478 /*
1479 * If ->needs_lock is true, we're already in async
1480 * context.
1481 */
1482 if (!s->needs_lock)
1483 io_async_list_note(READ, req, iov_count);
2b188cc1 1484 ret = -EAGAIN;
31b51510 1485 }
2b188cc1
JA
1486 }
1487 kfree(iovec);
2b188cc1
JA
1488 return ret;
1489}
1490
e0c5c576 1491static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
ba816ad6 1492 struct io_kiocb **nxt, bool force_nonblock)
2b188cc1
JA
1493{
1494 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1495 struct kiocb *kiocb = &req->rw;
1496 struct iov_iter iter;
1497 struct file *file;
31b51510 1498 size_t iov_count;
87e5e6da 1499 ssize_t ret;
2b188cc1 1500
8358e3a8 1501 ret = io_prep_rw(req, s, force_nonblock);
2b188cc1
JA
1502 if (ret)
1503 return ret;
2b188cc1 1504
2b188cc1
JA
1505 file = kiocb->ki_filp;
1506 if (unlikely(!(file->f_mode & FMODE_WRITE)))
09bb8394 1507 return -EBADF;
2b188cc1
JA
1508
1509 ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
87e5e6da 1510 if (ret < 0)
09bb8394 1511 return ret;
2b188cc1 1512
9e645e11
JA
1513 if (req->flags & REQ_F_LINK)
1514 req->result = ret;
1515
31b51510
JA
1516 iov_count = iov_iter_count(&iter);
1517
1518 ret = -EAGAIN;
1519 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) {
1520 /* If ->needs_lock is true, we're already in async context. */
1521 if (!s->needs_lock)
1522 io_async_list_note(WRITE, req, iov_count);
1523 goto out_free;
1524 }
1525
1526 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
2b188cc1 1527 if (!ret) {
9bf7933f
RP
1528 ssize_t ret2;
1529
2b188cc1
JA
1530 /*
1531 * Open-code file_start_write here to grab freeze protection,
1532 * which will be released by another thread in
1533 * io_complete_rw(). Fool lockdep by telling it the lock got
1534 * released so that it doesn't complain about the held lock when
1535 * we return to userspace.
1536 */
491381ce 1537 if (req->flags & REQ_F_ISREG) {
2b188cc1
JA
1538 __sb_start_write(file_inode(file)->i_sb,
1539 SB_FREEZE_WRITE, true);
1540 __sb_writers_release(file_inode(file)->i_sb,
1541 SB_FREEZE_WRITE);
1542 }
1543 kiocb->ki_flags |= IOCB_WRITE;
9bf7933f 1544
32960613
JA
1545 if (file->f_op->write_iter)
1546 ret2 = call_write_iter(file, kiocb, &iter);
1547 else
1548 ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
9bf7933f 1549 if (!force_nonblock || ret2 != -EAGAIN) {
ba816ad6 1550 kiocb_done(kiocb, ret2, nxt, s->needs_lock);
9bf7933f
RP
1551 } else {
1552 /*
1553 * If ->needs_lock is true, we're already in async
1554 * context.
1555 */
1556 if (!s->needs_lock)
1557 io_async_list_note(WRITE, req, iov_count);
1558 ret = -EAGAIN;
1559 }
2b188cc1 1560 }
31b51510 1561out_free:
2b188cc1 1562 kfree(iovec);
2b188cc1
JA
1563 return ret;
1564}
1565
1566/*
1567 * IORING_OP_NOP just posts a completion event, nothing else.
1568 */
1569static int io_nop(struct io_kiocb *req, u64 user_data)
1570{
1571 struct io_ring_ctx *ctx = req->ctx;
1572 long err = 0;
1573
def596e9
JA
1574 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1575 return -EINVAL;
1576
c71ffb67 1577 io_cqring_add_event(ctx, user_data, err);
ba816ad6 1578 io_put_req(req, NULL);
2b188cc1
JA
1579 return 0;
1580}
1581
c992fe29
CH
1582static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1583{
6b06314c 1584 struct io_ring_ctx *ctx = req->ctx;
c992fe29 1585
09bb8394
JA
1586 if (!req->file)
1587 return -EBADF;
c992fe29 1588
6b06314c 1589 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 1590 return -EINVAL;
edafccee 1591 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
1592 return -EINVAL;
1593
c992fe29
CH
1594 return 0;
1595}
1596
1597static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ba816ad6 1598 struct io_kiocb **nxt, bool force_nonblock)
c992fe29
CH
1599{
1600 loff_t sqe_off = READ_ONCE(sqe->off);
1601 loff_t sqe_len = READ_ONCE(sqe->len);
1602 loff_t end = sqe_off + sqe_len;
1603 unsigned fsync_flags;
1604 int ret;
1605
1606 fsync_flags = READ_ONCE(sqe->fsync_flags);
1607 if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
1608 return -EINVAL;
1609
1610 ret = io_prep_fsync(req, sqe);
1611 if (ret)
1612 return ret;
1613
1614 /* fsync always requires a blocking context */
1615 if (force_nonblock)
1616 return -EAGAIN;
1617
1618 ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
1619 end > 0 ? end : LLONG_MAX,
1620 fsync_flags & IORING_FSYNC_DATASYNC);
1621
9e645e11
JA
1622 if (ret < 0 && (req->flags & REQ_F_LINK))
1623 req->flags |= REQ_F_FAIL_LINK;
c71ffb67 1624 io_cqring_add_event(req->ctx, sqe->user_data, ret);
ba816ad6 1625 io_put_req(req, nxt);
c992fe29
CH
1626 return 0;
1627}
1628
5d17b4a4
JA
1629static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1630{
1631 struct io_ring_ctx *ctx = req->ctx;
1632 int ret = 0;
1633
1634 if (!req->file)
1635 return -EBADF;
5d17b4a4
JA
1636
1637 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1638 return -EINVAL;
1639 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
1640 return -EINVAL;
1641
5d17b4a4
JA
1642 return ret;
1643}
1644
1645static int io_sync_file_range(struct io_kiocb *req,
1646 const struct io_uring_sqe *sqe,
ba816ad6 1647 struct io_kiocb **nxt,
5d17b4a4
JA
1648 bool force_nonblock)
1649{
1650 loff_t sqe_off;
1651 loff_t sqe_len;
1652 unsigned flags;
1653 int ret;
1654
1655 ret = io_prep_sfr(req, sqe);
1656 if (ret)
1657 return ret;
1658
1659 /* sync_file_range always requires a blocking context */
1660 if (force_nonblock)
1661 return -EAGAIN;
1662
1663 sqe_off = READ_ONCE(sqe->off);
1664 sqe_len = READ_ONCE(sqe->len);
1665 flags = READ_ONCE(sqe->sync_range_flags);
1666
1667 ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
1668
9e645e11
JA
1669 if (ret < 0 && (req->flags & REQ_F_LINK))
1670 req->flags |= REQ_F_FAIL_LINK;
c71ffb67 1671 io_cqring_add_event(req->ctx, sqe->user_data, ret);
ba816ad6 1672 io_put_req(req, nxt);
5d17b4a4
JA
1673 return 0;
1674}
1675
0fa03c62 1676#if defined(CONFIG_NET)
aa1fa28f 1677static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ba816ad6 1678 struct io_kiocb **nxt, bool force_nonblock,
aa1fa28f
JA
1679 long (*fn)(struct socket *, struct user_msghdr __user *,
1680 unsigned int))
1681{
0fa03c62
JA
1682 struct socket *sock;
1683 int ret;
1684
1685 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1686 return -EINVAL;
1687
1688 sock = sock_from_file(req->file, &ret);
1689 if (sock) {
1690 struct user_msghdr __user *msg;
1691 unsigned flags;
1692
1693 flags = READ_ONCE(sqe->msg_flags);
1694 if (flags & MSG_DONTWAIT)
1695 req->flags |= REQ_F_NOWAIT;
1696 else if (force_nonblock)
1697 flags |= MSG_DONTWAIT;
1698
1699 msg = (struct user_msghdr __user *) (unsigned long)
1700 READ_ONCE(sqe->addr);
1701
aa1fa28f 1702 ret = fn(sock, msg, flags);
0fa03c62
JA
1703 if (force_nonblock && ret == -EAGAIN)
1704 return ret;
1705 }
1706
c71ffb67 1707 io_cqring_add_event(req->ctx, sqe->user_data, ret);
ba816ad6 1708 io_put_req(req, nxt);
5d17b4a4
JA
1709 return 0;
1710}
aa1fa28f
JA
1711#endif
1712
1713static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ba816ad6 1714 struct io_kiocb **nxt, bool force_nonblock)
aa1fa28f
JA
1715{
1716#if defined(CONFIG_NET)
ba816ad6
JA
1717 return io_send_recvmsg(req, sqe, nxt, force_nonblock,
1718 __sys_sendmsg_sock);
aa1fa28f
JA
1719#else
1720 return -EOPNOTSUPP;
1721#endif
1722}
1723
1724static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ba816ad6 1725 struct io_kiocb **nxt, bool force_nonblock)
aa1fa28f
JA
1726{
1727#if defined(CONFIG_NET)
ba816ad6
JA
1728 return io_send_recvmsg(req, sqe, nxt, force_nonblock,
1729 __sys_recvmsg_sock);
0fa03c62
JA
1730#else
1731 return -EOPNOTSUPP;
1732#endif
1733}
5d17b4a4 1734
221c5eb2
JA
1735static void io_poll_remove_one(struct io_kiocb *req)
1736{
1737 struct io_poll_iocb *poll = &req->poll;
1738
1739 spin_lock(&poll->head->lock);
1740 WRITE_ONCE(poll->canceled, true);
1741 if (!list_empty(&poll->wait.entry)) {
1742 list_del_init(&poll->wait.entry);
18d9be1a 1743 io_queue_async_work(req->ctx, req);
221c5eb2
JA
1744 }
1745 spin_unlock(&poll->head->lock);
1746
1747 list_del_init(&req->list);
1748}
1749
1750static void io_poll_remove_all(struct io_ring_ctx *ctx)
1751{
1752 struct io_kiocb *req;
1753
1754 spin_lock_irq(&ctx->completion_lock);
1755 while (!list_empty(&ctx->cancel_list)) {
1756 req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
1757 io_poll_remove_one(req);
1758 }
1759 spin_unlock_irq(&ctx->completion_lock);
1760}
1761
1762/*
1763 * Find a running poll command that matches one specified in sqe->addr,
1764 * and remove it if found.
1765 */
1766static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1767{
1768 struct io_ring_ctx *ctx = req->ctx;
1769 struct io_kiocb *poll_req, *next;
1770 int ret = -ENOENT;
1771
1772 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1773 return -EINVAL;
1774 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
1775 sqe->poll_events)
1776 return -EINVAL;
1777
1778 spin_lock_irq(&ctx->completion_lock);
1779 list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) {
1780 if (READ_ONCE(sqe->addr) == poll_req->user_data) {
1781 io_poll_remove_one(poll_req);
1782 ret = 0;
1783 break;
1784 }
1785 }
1786 spin_unlock_irq(&ctx->completion_lock);
1787
c71ffb67 1788 io_cqring_add_event(req->ctx, sqe->user_data, ret);
ba816ad6 1789 io_put_req(req, NULL);
221c5eb2
JA
1790 return 0;
1791}
1792
8c838788
JA
1793static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
1794 __poll_t mask)
221c5eb2 1795{
8c838788 1796 req->poll.done = true;
c71ffb67 1797 io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask));
8c838788 1798 io_commit_cqring(ctx);
221c5eb2
JA
1799}
1800
1801static void io_poll_complete_work(struct work_struct *work)
1802{
1803 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1804 struct io_poll_iocb *poll = &req->poll;
1805 struct poll_table_struct pt = { ._key = poll->events };
1806 struct io_ring_ctx *ctx = req->ctx;
1807 __poll_t mask = 0;
1808
1809 if (!READ_ONCE(poll->canceled))
1810 mask = vfs_poll(poll->file, &pt) & poll->events;
1811
1812 /*
1813 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1814 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
1815 * synchronize with them. In the cancellation case the list_del_init
1816 * itself is not actually needed, but harmless so we keep it in to
1817 * avoid further branches in the fast path.
1818 */
1819 spin_lock_irq(&ctx->completion_lock);
1820 if (!mask && !READ_ONCE(poll->canceled)) {
1821 add_wait_queue(poll->head, &poll->wait);
1822 spin_unlock_irq(&ctx->completion_lock);
1823 return;
1824 }
1825 list_del_init(&req->list);
8c838788 1826 io_poll_complete(ctx, req, mask);
221c5eb2
JA
1827 spin_unlock_irq(&ctx->completion_lock);
1828
8c838788 1829 io_cqring_ev_posted(ctx);
ba816ad6 1830 io_put_req(req, NULL);
221c5eb2
JA
1831}
1832
1833static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1834 void *key)
1835{
1836 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
1837 wait);
1838 struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
1839 struct io_ring_ctx *ctx = req->ctx;
1840 __poll_t mask = key_to_poll(key);
8c838788 1841 unsigned long flags;
221c5eb2
JA
1842
1843 /* for instances that support it check for an event match first: */
8c838788
JA
1844 if (mask && !(mask & poll->events))
1845 return 0;
221c5eb2 1846
8c838788 1847 list_del_init(&poll->wait.entry);
221c5eb2 1848
8c838788
JA
1849 if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
1850 list_del(&req->list);
1851 io_poll_complete(ctx, req, mask);
1852 spin_unlock_irqrestore(&ctx->completion_lock, flags);
221c5eb2 1853
8c838788 1854 io_cqring_ev_posted(ctx);
ba816ad6 1855 io_put_req(req, NULL);
8c838788 1856 } else {
18d9be1a 1857 io_queue_async_work(ctx, req);
221c5eb2
JA
1858 }
1859
221c5eb2
JA
1860 return 1;
1861}
1862
1863struct io_poll_table {
1864 struct poll_table_struct pt;
1865 struct io_kiocb *req;
1866 int error;
1867};
1868
1869static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1870 struct poll_table_struct *p)
1871{
1872 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
1873
1874 if (unlikely(pt->req->poll.head)) {
1875 pt->error = -EINVAL;
1876 return;
1877 }
1878
1879 pt->error = 0;
1880 pt->req->poll.head = head;
1881 add_wait_queue(head, &pt->req->poll.wait);
1882}
1883
1884static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1885{
1886 struct io_poll_iocb *poll = &req->poll;
1887 struct io_ring_ctx *ctx = req->ctx;
1888 struct io_poll_table ipt;
8c838788 1889 bool cancel = false;
221c5eb2
JA
1890 __poll_t mask;
1891 u16 events;
221c5eb2
JA
1892
1893 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1894 return -EINVAL;
1895 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1896 return -EINVAL;
09bb8394
JA
1897 if (!poll->file)
1898 return -EBADF;
221c5eb2 1899
6cc47d1d 1900 req->submit.sqe = NULL;
221c5eb2
JA
1901 INIT_WORK(&req->work, io_poll_complete_work);
1902 events = READ_ONCE(sqe->poll_events);
1903 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
1904
221c5eb2 1905 poll->head = NULL;
8c838788 1906 poll->done = false;
221c5eb2
JA
1907 poll->canceled = false;
1908
1909 ipt.pt._qproc = io_poll_queue_proc;
1910 ipt.pt._key = poll->events;
1911 ipt.req = req;
1912 ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1913
1914 /* initialized the list so that we can do list_empty checks */
1915 INIT_LIST_HEAD(&poll->wait.entry);
1916 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
1917
36703247
JA
1918 INIT_LIST_HEAD(&req->list);
1919
221c5eb2 1920 mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
221c5eb2
JA
1921
1922 spin_lock_irq(&ctx->completion_lock);
8c838788
JA
1923 if (likely(poll->head)) {
1924 spin_lock(&poll->head->lock);
1925 if (unlikely(list_empty(&poll->wait.entry))) {
1926 if (ipt.error)
1927 cancel = true;
1928 ipt.error = 0;
1929 mask = 0;
1930 }
1931 if (mask || ipt.error)
1932 list_del_init(&poll->wait.entry);
1933 else if (cancel)
1934 WRITE_ONCE(poll->canceled, true);
1935 else if (!poll->done) /* actually waiting for an event */
1936 list_add_tail(&req->list, &ctx->cancel_list);
1937 spin_unlock(&poll->head->lock);
1938 }
1939 if (mask) { /* no async, we'd stolen it */
221c5eb2 1940 ipt.error = 0;
8c838788 1941 io_poll_complete(ctx, req, mask);
221c5eb2 1942 }
221c5eb2
JA
1943 spin_unlock_irq(&ctx->completion_lock);
1944
8c838788
JA
1945 if (mask) {
1946 io_cqring_ev_posted(ctx);
ba816ad6 1947 io_put_req(req, NULL);
221c5eb2 1948 }
8c838788 1949 return ipt.error;
221c5eb2
JA
1950}
1951
5262f567
JA
1952static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
1953{
1954 struct io_ring_ctx *ctx;
ef03681a 1955 struct io_kiocb *req, *prev;
5262f567
JA
1956 unsigned long flags;
1957
1958 req = container_of(timer, struct io_kiocb, timeout.timer);
1959 ctx = req->ctx;
1960 atomic_inc(&ctx->cq_timeouts);
1961
1962 spin_lock_irqsave(&ctx->completion_lock, flags);
ef03681a 1963 /*
1964 * Adjust the reqs sequence before the current one because it
1965 * will consume a slot in the cq_ring and the the cq_tail pointer
1966 * will be increased, otherwise other timeout reqs may return in
1967 * advance without waiting for enough wait_nr.
1968 */
1969 prev = req;
1970 list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
1971 prev->sequence++;
5262f567
JA
1972 list_del(&req->list);
1973
1974 io_cqring_fill_event(ctx, req->user_data, -ETIME);
1975 io_commit_cqring(ctx);
1976 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1977
1978 io_cqring_ev_posted(ctx);
1979
ba816ad6 1980 io_put_req(req, NULL);
5262f567
JA
1981 return HRTIMER_NORESTART;
1982}
1983
1984static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1985{
5da0fb1a 1986 unsigned count;
5262f567
JA
1987 struct io_ring_ctx *ctx = req->ctx;
1988 struct list_head *entry;
bdf20073 1989 struct timespec64 ts;
a1f58ba4 1990 unsigned span = 0;
5262f567
JA
1991
1992 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1993 return -EINVAL;
1994 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags ||
1995 sqe->len != 1)
1996 return -EINVAL;
bdf20073
AB
1997
1998 if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
1999 return -EFAULT;
2000
2001 /*
2002 * sqe->off holds how many events that need to occur for this
2003 * timeout event to be satisfied.
2004 */
2005 count = READ_ONCE(sqe->off);
2006 if (!count)
2007 count = 1;
2008
2009 req->sequence = ctx->cached_sq_head + count - 1;
5da0fb1a 2010 /* reuse it to store the count */
2011 req->submit.sequence = count;
5262f567
JA
2012 req->flags |= REQ_F_TIMEOUT;
2013
2014 /*
2015 * Insertion sort, ensuring the first entry in the list is always
2016 * the one we need first.
2017 */
5262f567
JA
2018 spin_lock_irq(&ctx->completion_lock);
2019 list_for_each_prev(entry, &ctx->timeout_list) {
2020 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
5da0fb1a 2021 unsigned nxt_sq_head;
2022 long long tmp, tmp_nxt;
5262f567 2023
5da0fb1a 2024 /*
2025 * Since cached_sq_head + count - 1 can overflow, use type long
2026 * long to store it.
2027 */
2028 tmp = (long long)ctx->cached_sq_head + count - 1;
2029 nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
2030 tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
2031
2032 /*
2033 * cached_sq_head may overflow, and it will never overflow twice
2034 * once there is some timeout req still be valid.
2035 */
2036 if (ctx->cached_sq_head < nxt_sq_head)
8b07a65a 2037 tmp += UINT_MAX;
5da0fb1a 2038
a1f58ba4 2039 if (tmp > tmp_nxt)
5262f567 2040 break;
a1f58ba4 2041
2042 /*
2043 * Sequence of reqs after the insert one and itself should
2044 * be adjusted because each timeout req consumes a slot.
2045 */
2046 span++;
2047 nxt->sequence++;
5262f567 2048 }
a1f58ba4 2049 req->sequence -= span;
5262f567
JA
2050 list_add(&req->list, entry);
2051 spin_unlock_irq(&ctx->completion_lock);
2052
2053 hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2054 req->timeout.timer.function = io_timeout_fn;
bdf20073 2055 hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts),
5262f567
JA
2056 HRTIMER_MODE_REL);
2057 return 0;
2058}
2059
de0617e4
JA
2060static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
2061 const struct io_uring_sqe *sqe)
2062{
2063 struct io_uring_sqe *sqe_copy;
2064
2065 if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
2066 return 0;
2067
2068 sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
2069 if (!sqe_copy)
2070 return -EAGAIN;
2071
2072 spin_lock_irq(&ctx->completion_lock);
2073 if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) {
2074 spin_unlock_irq(&ctx->completion_lock);
2075 kfree(sqe_copy);
2076 return 0;
2077 }
2078
2079 memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
2080 req->submit.sqe = sqe_copy;
2081
2082 INIT_WORK(&req->work, io_sq_wq_submit_work);
2083 list_add_tail(&req->list, &ctx->defer_list);
2084 spin_unlock_irq(&ctx->completion_lock);
2085 return -EIOCBQUEUED;
2086}
2087
2b188cc1 2088static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
ba816ad6
JA
2089 const struct sqe_submit *s, struct io_kiocb **nxt,
2090 bool force_nonblock)
2b188cc1 2091{
e0c5c576 2092 int ret, opcode;
2b188cc1 2093
9e645e11
JA
2094 req->user_data = READ_ONCE(s->sqe->user_data);
2095
2b188cc1
JA
2096 if (unlikely(s->index >= ctx->sq_entries))
2097 return -EINVAL;
2b188cc1
JA
2098
2099 opcode = READ_ONCE(s->sqe->opcode);
2100 switch (opcode) {
2101 case IORING_OP_NOP:
2102 ret = io_nop(req, req->user_data);
2103 break;
2104 case IORING_OP_READV:
edafccee
JA
2105 if (unlikely(s->sqe->buf_index))
2106 return -EINVAL;
ba816ad6 2107 ret = io_read(req, s, nxt, force_nonblock);
2b188cc1
JA
2108 break;
2109 case IORING_OP_WRITEV:
edafccee
JA
2110 if (unlikely(s->sqe->buf_index))
2111 return -EINVAL;
ba816ad6 2112 ret = io_write(req, s, nxt, force_nonblock);
edafccee
JA
2113 break;
2114 case IORING_OP_READ_FIXED:
ba816ad6 2115 ret = io_read(req, s, nxt, force_nonblock);
edafccee
JA
2116 break;
2117 case IORING_OP_WRITE_FIXED:
ba816ad6 2118 ret = io_write(req, s, nxt, force_nonblock);
2b188cc1 2119 break;
c992fe29 2120 case IORING_OP_FSYNC:
ba816ad6 2121 ret = io_fsync(req, s->sqe, nxt, force_nonblock);
c992fe29 2122 break;
221c5eb2
JA
2123 case IORING_OP_POLL_ADD:
2124 ret = io_poll_add(req, s->sqe);
2125 break;
2126 case IORING_OP_POLL_REMOVE:
2127 ret = io_poll_remove(req, s->sqe);
2128 break;
5d17b4a4 2129 case IORING_OP_SYNC_FILE_RANGE:
ba816ad6 2130 ret = io_sync_file_range(req, s->sqe, nxt, force_nonblock);
5d17b4a4 2131 break;
0fa03c62 2132 case IORING_OP_SENDMSG:
ba816ad6 2133 ret = io_sendmsg(req, s->sqe, nxt, force_nonblock);
0fa03c62 2134 break;
aa1fa28f 2135 case IORING_OP_RECVMSG:
ba816ad6 2136 ret = io_recvmsg(req, s->sqe, nxt, force_nonblock);
aa1fa28f 2137 break;
5262f567
JA
2138 case IORING_OP_TIMEOUT:
2139 ret = io_timeout(req, s->sqe);
2140 break;
2b188cc1
JA
2141 default:
2142 ret = -EINVAL;
2143 break;
2144 }
2145
def596e9
JA
2146 if (ret)
2147 return ret;
2148
2149 if (ctx->flags & IORING_SETUP_IOPOLL) {
9e645e11 2150 if (req->result == -EAGAIN)
def596e9
JA
2151 return -EAGAIN;
2152
2153 /* workqueue context doesn't hold uring_lock, grab it now */
2154 if (s->needs_lock)
2155 mutex_lock(&ctx->uring_lock);
2156 io_iopoll_req_issued(req);
2157 if (s->needs_lock)
2158 mutex_unlock(&ctx->uring_lock);
2159 }
2160
2161 return 0;
2b188cc1
JA
2162}
2163
31b51510
JA
2164static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx,
2165 const struct io_uring_sqe *sqe)
2166{
2167 switch (sqe->opcode) {
2168 case IORING_OP_READV:
2169 case IORING_OP_READ_FIXED:
2170 return &ctx->pending_async[READ];
2171 case IORING_OP_WRITEV:
2172 case IORING_OP_WRITE_FIXED:
2173 return &ctx->pending_async[WRITE];
2174 default:
2175 return NULL;
2176 }
2177}
2178
edafccee
JA
2179static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
2180{
2181 u8 opcode = READ_ONCE(sqe->opcode);
2182
2183 return !(opcode == IORING_OP_READ_FIXED ||
2184 opcode == IORING_OP_WRITE_FIXED);
2185}
2186
2b188cc1
JA
2187static void io_sq_wq_submit_work(struct work_struct *work)
2188{
2189 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2b188cc1 2190 struct io_ring_ctx *ctx = req->ctx;
31b51510
JA
2191 struct mm_struct *cur_mm = NULL;
2192 struct async_list *async_list;
2193 LIST_HEAD(req_list);
edafccee 2194 mm_segment_t old_fs;
2b188cc1
JA
2195 int ret;
2196
31b51510
JA
2197 async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
2198restart:
2199 do {
2200 struct sqe_submit *s = &req->submit;
2201 const struct io_uring_sqe *sqe = s->sqe;
d0ee8791 2202 unsigned int flags = req->flags;
ba816ad6 2203 struct io_kiocb *nxt = NULL;
2b188cc1 2204
8449eeda 2205 /* Ensure we clear previously set non-block flag */
31b51510
JA
2206 req->rw.ki_flags &= ~IOCB_NOWAIT;
2207
2208 ret = 0;
2209 if (io_sqe_needs_user(sqe) && !cur_mm) {
2210 if (!mmget_not_zero(ctx->sqo_mm)) {
2211 ret = -EFAULT;
2212 } else {
2213 cur_mm = ctx->sqo_mm;
2214 use_mm(cur_mm);
2215 old_fs = get_fs();
2216 set_fs(USER_DS);
2217 }
2218 }
2219
2220 if (!ret) {
2221 s->has_user = cur_mm != NULL;
2222 s->needs_lock = true;
2223 do {
ba816ad6 2224 ret = __io_submit_sqe(ctx, req, s, &nxt, false);
31b51510
JA
2225 /*
2226 * We can get EAGAIN for polled IO even though
2227 * we're forcing a sync submission from here,
2228 * since we can't wait for request slots on the
2229 * block side.
2230 */
2231 if (ret != -EAGAIN)
2232 break;
2233 cond_resched();
2234 } while (1);
2235 }
817869d2
JA
2236
2237 /* drop submission reference */
ba816ad6 2238 io_put_req(req, NULL);
817869d2 2239
31b51510 2240 if (ret) {
c71ffb67 2241 io_cqring_add_event(ctx, sqe->user_data, ret);
ba816ad6 2242 io_put_req(req, NULL);
31b51510
JA
2243 }
2244
2245 /* async context always use a copy of the sqe */
2246 kfree(sqe);
2247
ba816ad6
JA
2248 /* if a dependent link is ready, do that as the next one */
2249 if (!ret && nxt) {
2250 req = nxt;
2251 continue;
2252 }
2253
f7b76ac9 2254 /* req from defer and link list needn't decrease async cnt */
d0ee8791 2255 if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
f7b76ac9
ZL
2256 goto out;
2257
31b51510
JA
2258 if (!async_list)
2259 break;
2260 if (!list_empty(&req_list)) {
2261 req = list_first_entry(&req_list, struct io_kiocb,
2262 list);
2263 list_del(&req->list);
2264 continue;
2265 }
2266 if (list_empty(&async_list->list))
2267 break;
2268
2269 req = NULL;
2270 spin_lock(&async_list->lock);
2271 if (list_empty(&async_list->list)) {
2272 spin_unlock(&async_list->lock);
2273 break;
2274 }
2275 list_splice_init(&async_list->list, &req_list);
2276 spin_unlock(&async_list->lock);
2277
2278 req = list_first_entry(&req_list, struct io_kiocb, list);
2279 list_del(&req->list);
2280 } while (req);
edafccee
JA
2281
2282 /*
31b51510
JA
2283 * Rare case of racing with a submitter. If we find the count has
2284 * dropped to zero AND we have pending work items, then restart
2285 * the processing. This is a tiny race window.
edafccee 2286 */
31b51510
JA
2287 if (async_list) {
2288 ret = atomic_dec_return(&async_list->cnt);
2289 while (!ret && !list_empty(&async_list->list)) {
2290 spin_lock(&async_list->lock);
2291 atomic_inc(&async_list->cnt);
2292 list_splice_init(&async_list->list, &req_list);
2293 spin_unlock(&async_list->lock);
2294
2295 if (!list_empty(&req_list)) {
2296 req = list_first_entry(&req_list,
2297 struct io_kiocb, list);
2298 list_del(&req->list);
2299 goto restart;
2300 }
2301 ret = atomic_dec_return(&async_list->cnt);
edafccee 2302 }
edafccee 2303 }
2b188cc1 2304
f7b76ac9 2305out:
31b51510 2306 if (cur_mm) {
edafccee 2307 set_fs(old_fs);
31b51510
JA
2308 unuse_mm(cur_mm);
2309 mmput(cur_mm);
2b188cc1 2310 }
31b51510 2311}
2b188cc1 2312
31b51510
JA
2313/*
2314 * See if we can piggy back onto previously submitted work, that is still
2315 * running. We currently only allow this if the new request is sequential
2316 * to the previous one we punted.
2317 */
2318static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
2319{
6d5d5ac5 2320 bool ret;
31b51510
JA
2321
2322 if (!list)
2323 return false;
2324 if (!(req->flags & REQ_F_SEQ_PREV))
2325 return false;
2326 if (!atomic_read(&list->cnt))
2327 return false;
2328
2329 ret = true;
2330 spin_lock(&list->lock);
2331 list_add_tail(&req->list, &list->list);
c0e48f9d
ZL
2332 /*
2333 * Ensure we see a simultaneous modification from io_sq_wq_submit_work()
2334 */
2335 smp_mb();
31b51510
JA
2336 if (!atomic_read(&list->cnt)) {
2337 list_del_init(&req->list);
2338 ret = false;
2339 }
2340 spin_unlock(&list->lock);
2341 return ret;
2b188cc1
JA
2342}
2343
09bb8394
JA
2344static bool io_op_needs_file(const struct io_uring_sqe *sqe)
2345{
2346 int op = READ_ONCE(sqe->opcode);
2347
2348 switch (op) {
2349 case IORING_OP_NOP:
2350 case IORING_OP_POLL_REMOVE:
2351 return false;
2352 default:
2353 return true;
2354 }
2355}
2356
2357static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
2358 struct io_submit_state *state, struct io_kiocb *req)
2359{
2360 unsigned flags;
2361 int fd;
2362
2363 flags = READ_ONCE(s->sqe->flags);
2364 fd = READ_ONCE(s->sqe->fd);
2365
4fe2c963 2366 if (flags & IOSQE_IO_DRAIN)
de0617e4 2367 req->flags |= REQ_F_IO_DRAIN;
4fe2c963
JL
2368 /*
2369 * All io need record the previous position, if LINK vs DARIN,
2370 * it can be used to mark the position of the first IO in the
2371 * link list.
2372 */
2373 req->sequence = s->sequence;
de0617e4 2374
60c112b0 2375 if (!io_op_needs_file(s->sqe))
09bb8394 2376 return 0;
09bb8394
JA
2377
2378 if (flags & IOSQE_FIXED_FILE) {
2379 if (unlikely(!ctx->user_files ||
2380 (unsigned) fd >= ctx->nr_user_files))
2381 return -EBADF;
08a45173
JA
2382 if (!ctx->user_files[fd])
2383 return -EBADF;
09bb8394
JA
2384 req->file = ctx->user_files[fd];
2385 req->flags |= REQ_F_FIXED_FILE;
2386 } else {
2387 if (s->needs_fixed_file)
2388 return -EBADF;
2389 req->file = io_file_get(state, fd);
2390 if (unlikely(!req->file))
2391 return -EBADF;
2392 }
2393
2394 return 0;
2395}
2396
4fe2c963 2397static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
bc808bce 2398 struct sqe_submit *s)
2b188cc1 2399{
e0c5c576 2400 int ret;
2b188cc1 2401
ba816ad6 2402 ret = __io_submit_sqe(ctx, req, s, NULL, true);
491381ce
JA
2403
2404 /*
2405 * We async punt it if the file wasn't marked NOWAIT, or if the file
2406 * doesn't support non-blocking read/write attempts
2407 */
2408 if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
2409 (req->flags & REQ_F_MUST_PUNT))) {
2b188cc1
JA
2410 struct io_uring_sqe *sqe_copy;
2411
954dab19 2412 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2b188cc1 2413 if (sqe_copy) {
31b51510
JA
2414 struct async_list *list;
2415
2b188cc1 2416 s->sqe = sqe_copy;
2b188cc1 2417 memcpy(&req->submit, s, sizeof(*s));
31b51510
JA
2418 list = io_async_list_from_sqe(ctx, s->sqe);
2419 if (!io_add_to_prev_work(list, req)) {
2420 if (list)
2421 atomic_inc(&list->cnt);
2422 INIT_WORK(&req->work, io_sq_wq_submit_work);
18d9be1a 2423 io_queue_async_work(ctx, req);
31b51510 2424 }
e65ef56d
JA
2425
2426 /*
2427 * Queued up for async execution, worker will release
9e645e11 2428 * submit reference when the iocb is actually submitted.
e65ef56d
JA
2429 */
2430 return 0;
2b188cc1
JA
2431 }
2432 }
e65ef56d
JA
2433
2434 /* drop submission reference */
ba816ad6 2435 io_put_req(req, NULL);
e65ef56d
JA
2436
2437 /* and drop final reference, if we failed */
9e645e11
JA
2438 if (ret) {
2439 io_cqring_add_event(ctx, req->user_data, ret);
2440 if (req->flags & REQ_F_LINK)
2441 req->flags |= REQ_F_FAIL_LINK;
ba816ad6 2442 io_put_req(req, NULL);
9e645e11 2443 }
2b188cc1
JA
2444
2445 return ret;
2446}
2447
4fe2c963 2448static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
bc808bce 2449 struct sqe_submit *s)
4fe2c963
JL
2450{
2451 int ret;
2452
2453 ret = io_req_defer(ctx, req, s->sqe);
2454 if (ret) {
2455 if (ret != -EIOCBQUEUED) {
ba816ad6 2456 io_free_req(req, NULL);
4fe2c963
JL
2457 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2458 }
2459 return 0;
2460 }
2461
bc808bce 2462 return __io_queue_sqe(ctx, req, s);
4fe2c963
JL
2463}
2464
2465static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
bc808bce 2466 struct sqe_submit *s, struct io_kiocb *shadow)
4fe2c963
JL
2467{
2468 int ret;
2469 int need_submit = false;
2470
2471 if (!shadow)
bc808bce 2472 return io_queue_sqe(ctx, req, s);
4fe2c963
JL
2473
2474 /*
2475 * Mark the first IO in link list as DRAIN, let all the following
2476 * IOs enter the defer list. all IO needs to be completed before link
2477 * list.
2478 */
2479 req->flags |= REQ_F_IO_DRAIN;
2480 ret = io_req_defer(ctx, req, s->sqe);
2481 if (ret) {
2482 if (ret != -EIOCBQUEUED) {
ba816ad6 2483 io_free_req(req, NULL);
7b20238d 2484 __io_free_req(shadow);
4fe2c963
JL
2485 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2486 return 0;
2487 }
2488 } else {
2489 /*
2490 * If ret == 0 means that all IOs in front of link io are
2491 * running done. let's queue link head.
2492 */
2493 need_submit = true;
2494 }
2495
2496 /* Insert shadow req to defer_list, blocking next IOs */
2497 spin_lock_irq(&ctx->completion_lock);
2498 list_add_tail(&shadow->list, &ctx->defer_list);
2499 spin_unlock_irq(&ctx->completion_lock);
2500
2501 if (need_submit)
bc808bce 2502 return __io_queue_sqe(ctx, req, s);
4fe2c963
JL
2503
2504 return 0;
2505}
2506
9e645e11
JA
2507#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
2508
2509static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
bc808bce 2510 struct io_submit_state *state, struct io_kiocb **link)
9e645e11
JA
2511{
2512 struct io_uring_sqe *sqe_copy;
2513 struct io_kiocb *req;
2514 int ret;
2515
2516 /* enforce forwards compatibility on users */
2517 if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
2518 ret = -EINVAL;
2519 goto err;
2520 }
2521
2522 req = io_get_req(ctx, state);
2523 if (unlikely(!req)) {
2524 ret = -EAGAIN;
2525 goto err;
2526 }
2527
2528 ret = io_req_set_file(ctx, s, state, req);
2529 if (unlikely(ret)) {
2530err_req:
ba816ad6 2531 io_free_req(req, NULL);
9e645e11
JA
2532err:
2533 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2534 return;
2535 }
2536
84d55dc5
PB
2537 req->user_data = s->sqe->user_data;
2538
9e645e11
JA
2539 /*
2540 * If we already have a head request, queue this one for async
2541 * submittal once the head completes. If we don't have a head but
2542 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2543 * submitted sync once the chain is complete. If none of those
2544 * conditions are true (normal request), then just queue it.
2545 */
2546 if (*link) {
2547 struct io_kiocb *prev = *link;
2548
2549 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2550 if (!sqe_copy) {
2551 ret = -EAGAIN;
2552 goto err_req;
2553 }
2554
2555 s->sqe = sqe_copy;
2556 memcpy(&req->submit, s, sizeof(*s));
2557 list_add_tail(&req->list, &prev->link_list);
2558 } else if (s->sqe->flags & IOSQE_IO_LINK) {
2559 req->flags |= REQ_F_LINK;
2560
2561 memcpy(&req->submit, s, sizeof(*s));
2562 INIT_LIST_HEAD(&req->link_list);
2563 *link = req;
2564 } else {
bc808bce 2565 io_queue_sqe(ctx, req, s);
9e645e11
JA
2566 }
2567}
2568
9a56a232
JA
2569/*
2570 * Batched submission is done, ensure local IO is flushed out.
2571 */
2572static void io_submit_state_end(struct io_submit_state *state)
2573{
2574 blk_finish_plug(&state->plug);
3d6770fb 2575 io_file_put(state);
2579f913
JA
2576 if (state->free_reqs)
2577 kmem_cache_free_bulk(req_cachep, state->free_reqs,
2578 &state->reqs[state->cur_req]);
9a56a232
JA
2579}
2580
2581/*
2582 * Start submission side cache.
2583 */
2584static void io_submit_state_start(struct io_submit_state *state,
2585 struct io_ring_ctx *ctx, unsigned max_ios)
2586{
2587 blk_start_plug(&state->plug);
2579f913 2588 state->free_reqs = 0;
9a56a232
JA
2589 state->file = NULL;
2590 state->ios_left = max_ios;
2591}
2592
2b188cc1
JA
2593static void io_commit_sqring(struct io_ring_ctx *ctx)
2594{
75b28aff 2595 struct io_rings *rings = ctx->rings;
2b188cc1 2596
75b28aff 2597 if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) {
2b188cc1
JA
2598 /*
2599 * Ensure any loads from the SQEs are done at this point,
2600 * since once we write the new head, the application could
2601 * write new data to them.
2602 */
75b28aff 2603 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
2604 }
2605}
2606
2b188cc1
JA
2607/*
2608 * Fetch an sqe, if one is available. Note that s->sqe will point to memory
2609 * that is mapped by userspace. This means that care needs to be taken to
2610 * ensure that reads are stable, as we cannot rely on userspace always
2611 * being a good citizen. If members of the sqe are validated and then later
2612 * used, it's important that those reads are done through READ_ONCE() to
2613 * prevent a re-load down the line.
2614 */
2615static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
2616{
75b28aff
HV
2617 struct io_rings *rings = ctx->rings;
2618 u32 *sq_array = ctx->sq_array;
2b188cc1
JA
2619 unsigned head;
2620
2621 /*
2622 * The cached sq head (or cq tail) serves two purposes:
2623 *
2624 * 1) allows us to batch the cost of updating the user visible
2625 * head updates.
2626 * 2) allows the kernel side to track the head on its own, even
2627 * though the application is the one updating it.
2628 */
2629 head = ctx->cached_sq_head;
e523a29c 2630 /* make sure SQ entry isn't read before tail */
75b28aff 2631 if (head == smp_load_acquire(&rings->sq.tail))
2b188cc1
JA
2632 return false;
2633
75b28aff 2634 head = READ_ONCE(sq_array[head & ctx->sq_mask]);
2b188cc1
JA
2635 if (head < ctx->sq_entries) {
2636 s->index = head;
2637 s->sqe = &ctx->sq_sqes[head];
8776f3fa 2638 s->sequence = ctx->cached_sq_head;
2b188cc1
JA
2639 ctx->cached_sq_head++;
2640 return true;
2641 }
2642
2643 /* drop invalid entries */
2644 ctx->cached_sq_head++;
498ccd9e
JA
2645 ctx->cached_sq_dropped++;
2646 WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
2b188cc1
JA
2647 return false;
2648}
2649
fb5ccc98
PB
2650static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
2651 bool has_user, bool mm_fault)
6c271ce2
JA
2652{
2653 struct io_submit_state state, *statep = NULL;
9e645e11 2654 struct io_kiocb *link = NULL;
4fe2c963 2655 struct io_kiocb *shadow_req = NULL;
9e645e11
JA
2656 bool prev_was_link = false;
2657 int i, submitted = 0;
6c271ce2
JA
2658
2659 if (nr > IO_PLUG_THRESHOLD) {
2660 io_submit_state_start(&state, ctx, nr);
2661 statep = &state;
2662 }
2663
2664 for (i = 0; i < nr; i++) {
fb5ccc98
PB
2665 struct sqe_submit s;
2666
2667 if (!io_get_sqring(ctx, &s))
2668 break;
2669
9e645e11
JA
2670 /*
2671 * If previous wasn't linked and we have a linked command,
2672 * that's the end of the chain. Submit the previous link.
2673 */
2674 if (!prev_was_link && link) {
bc808bce 2675 io_queue_link_head(ctx, link, &link->submit, shadow_req);
9e645e11 2676 link = NULL;
5f5ad9ce 2677 shadow_req = NULL;
9e645e11 2678 }
fb5ccc98 2679 prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
9e645e11 2680
fb5ccc98 2681 if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
4fe2c963
JL
2682 if (!shadow_req) {
2683 shadow_req = io_get_req(ctx, NULL);
a1041c27
JL
2684 if (unlikely(!shadow_req))
2685 goto out;
4fe2c963
JL
2686 shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
2687 refcount_dec(&shadow_req->refs);
2688 }
fb5ccc98 2689 shadow_req->sequence = s.sequence;
4fe2c963
JL
2690 }
2691
a1041c27 2692out:
6c271ce2 2693 if (unlikely(mm_fault)) {
fb5ccc98 2694 io_cqring_add_event(ctx, s.sqe->user_data,
9e645e11 2695 -EFAULT);
6c271ce2 2696 } else {
fb5ccc98
PB
2697 s.has_user = has_user;
2698 s.needs_lock = true;
2699 s.needs_fixed_file = true;
2700 io_submit_sqe(ctx, &s, statep, &link);
6c271ce2 2701 submitted++;
6c271ce2 2702 }
6c271ce2
JA
2703 }
2704
9e645e11 2705 if (link)
bc808bce 2706 io_queue_link_head(ctx, link, &link->submit, shadow_req);
6c271ce2
JA
2707 if (statep)
2708 io_submit_state_end(&state);
2709
2710 return submitted;
2711}
2712
2713static int io_sq_thread(void *data)
2714{
6c271ce2
JA
2715 struct io_ring_ctx *ctx = data;
2716 struct mm_struct *cur_mm = NULL;
2717 mm_segment_t old_fs;
2718 DEFINE_WAIT(wait);
2719 unsigned inflight;
2720 unsigned long timeout;
2721
a4c0b3de
JL
2722 complete(&ctx->sqo_thread_started);
2723
6c271ce2
JA
2724 old_fs = get_fs();
2725 set_fs(USER_DS);
2726
2727 timeout = inflight = 0;
2bbcd6d3 2728 while (!kthread_should_park()) {
fb5ccc98
PB
2729 bool mm_fault = false;
2730 unsigned int to_submit;
6c271ce2
JA
2731
2732 if (inflight) {
2733 unsigned nr_events = 0;
2734
2735 if (ctx->flags & IORING_SETUP_IOPOLL) {
2b2ed975
JA
2736 /*
2737 * inflight is the count of the maximum possible
2738 * entries we submitted, but it can be smaller
2739 * if we dropped some of them. If we don't have
2740 * poll entries available, then we know that we
2741 * have nothing left to poll for. Reset the
2742 * inflight count to zero in that case.
2743 */
2744 mutex_lock(&ctx->uring_lock);
2745 if (!list_empty(&ctx->poll_list))
2746 __io_iopoll_check(ctx, &nr_events, 0);
2747 else
2748 inflight = 0;
2749 mutex_unlock(&ctx->uring_lock);
6c271ce2
JA
2750 } else {
2751 /*
2752 * Normal IO, just pretend everything completed.
2753 * We don't have to poll completions for that.
2754 */
2755 nr_events = inflight;
2756 }
2757
2758 inflight -= nr_events;
2759 if (!inflight)
2760 timeout = jiffies + ctx->sq_thread_idle;
2761 }
2762
fb5ccc98
PB
2763 to_submit = io_sqring_entries(ctx);
2764 if (!to_submit) {
6c271ce2
JA
2765 /*
2766 * We're polling. If we're within the defined idle
2767 * period, then let us spin without work before going
2768 * to sleep.
2769 */
2770 if (inflight || !time_after(jiffies, timeout)) {
9831a90c 2771 cond_resched();
6c271ce2
JA
2772 continue;
2773 }
2774
2775 /*
2776 * Drop cur_mm before scheduling, we can't hold it for
2777 * long periods (or over schedule()). Do this before
2778 * adding ourselves to the waitqueue, as the unuse/drop
2779 * may sleep.
2780 */
2781 if (cur_mm) {
2782 unuse_mm(cur_mm);
2783 mmput(cur_mm);
2784 cur_mm = NULL;
2785 }
2786
2787 prepare_to_wait(&ctx->sqo_wait, &wait,
2788 TASK_INTERRUPTIBLE);
2789
2790 /* Tell userspace we may need a wakeup call */
75b28aff 2791 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
0d7bae69
SB
2792 /* make sure to read SQ tail after writing flags */
2793 smp_mb();
6c271ce2 2794
fb5ccc98
PB
2795 to_submit = io_sqring_entries(ctx);
2796 if (!to_submit) {
2bbcd6d3 2797 if (kthread_should_park()) {
6c271ce2
JA
2798 finish_wait(&ctx->sqo_wait, &wait);
2799 break;
2800 }
2801 if (signal_pending(current))
2802 flush_signals(current);
2803 schedule();
2804 finish_wait(&ctx->sqo_wait, &wait);
2805
75b28aff 2806 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
2807 continue;
2808 }
2809 finish_wait(&ctx->sqo_wait, &wait);
2810
75b28aff 2811 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
2812 }
2813
6c271ce2 2814 /* Unless all new commands are FIXED regions, grab mm */
fb5ccc98 2815 if (!cur_mm) {
6c271ce2
JA
2816 mm_fault = !mmget_not_zero(ctx->sqo_mm);
2817 if (!mm_fault) {
2818 use_mm(ctx->sqo_mm);
2819 cur_mm = ctx->sqo_mm;
2820 }
2821 }
2822
fb5ccc98
PB
2823 to_submit = min(to_submit, ctx->sq_entries);
2824 inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL,
2825 mm_fault);
6c271ce2
JA
2826
2827 /* Commit SQ ring head once we've consumed all SQEs */
2828 io_commit_sqring(ctx);
2829 }
2830
2831 set_fs(old_fs);
2832 if (cur_mm) {
2833 unuse_mm(cur_mm);
2834 mmput(cur_mm);
2835 }
06058632 2836
2bbcd6d3 2837 kthread_parkme();
06058632 2838
6c271ce2
JA
2839 return 0;
2840}
2841
bc808bce 2842static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
2b188cc1 2843{
9a56a232 2844 struct io_submit_state state, *statep = NULL;
9e645e11 2845 struct io_kiocb *link = NULL;
4fe2c963 2846 struct io_kiocb *shadow_req = NULL;
9e645e11 2847 bool prev_was_link = false;
5c8b0b54 2848 int i, submit = 0;
2b188cc1 2849
9a56a232
JA
2850 if (to_submit > IO_PLUG_THRESHOLD) {
2851 io_submit_state_start(&state, ctx, to_submit);
2852 statep = &state;
2853 }
2b188cc1
JA
2854
2855 for (i = 0; i < to_submit; i++) {
2856 struct sqe_submit s;
2857
2858 if (!io_get_sqring(ctx, &s))
2859 break;
2860
9e645e11
JA
2861 /*
2862 * If previous wasn't linked and we have a linked command,
2863 * that's the end of the chain. Submit the previous link.
2864 */
2865 if (!prev_was_link && link) {
bc808bce 2866 io_queue_link_head(ctx, link, &link->submit, shadow_req);
9e645e11 2867 link = NULL;
5f5ad9ce 2868 shadow_req = NULL;
9e645e11
JA
2869 }
2870 prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
2871
4fe2c963
JL
2872 if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
2873 if (!shadow_req) {
2874 shadow_req = io_get_req(ctx, NULL);
a1041c27
JL
2875 if (unlikely(!shadow_req))
2876 goto out;
4fe2c963
JL
2877 shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
2878 refcount_dec(&shadow_req->refs);
2879 }
2880 shadow_req->sequence = s.sequence;
2881 }
2882
a1041c27 2883out:
2b188cc1 2884 s.has_user = true;
def596e9 2885 s.needs_lock = false;
6c271ce2 2886 s.needs_fixed_file = false;
5c8b0b54 2887 submit++;
bc808bce 2888 io_submit_sqe(ctx, &s, statep, &link);
2b188cc1 2889 }
2b188cc1 2890
9e645e11 2891 if (link)
bc808bce 2892 io_queue_link_head(ctx, link, &link->submit, shadow_req);
9a56a232
JA
2893 if (statep)
2894 io_submit_state_end(statep);
2b188cc1 2895
935d1e45
PB
2896 io_commit_sqring(ctx);
2897
5c8b0b54 2898 return submit;
2b188cc1
JA
2899}
2900
bda52162
JA
2901struct io_wait_queue {
2902 struct wait_queue_entry wq;
2903 struct io_ring_ctx *ctx;
2904 unsigned to_wait;
2905 unsigned nr_timeouts;
2906};
2907
2908static inline bool io_should_wake(struct io_wait_queue *iowq)
2909{
2910 struct io_ring_ctx *ctx = iowq->ctx;
2911
2912 /*
2913 * Wake up if we have enough events, or if a timeout occured since we
2914 * started waiting. For timeouts, we always want to return to userspace,
2915 * regardless of event count.
2916 */
2917 return io_cqring_events(ctx->rings) >= iowq->to_wait ||
2918 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
2919}
2920
2921static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
2922 int wake_flags, void *key)
2923{
2924 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
2925 wq);
2926
2927 if (!io_should_wake(iowq))
2928 return -1;
2929
2930 return autoremove_wake_function(curr, mode, wake_flags, key);
2931}
2932
2b188cc1
JA
2933/*
2934 * Wait until events become available, if we don't already have some. The
2935 * application must reap them itself, as they reside on the shared cq ring.
2936 */
2937static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
2938 const sigset_t __user *sig, size_t sigsz)
2939{
bda52162
JA
2940 struct io_wait_queue iowq = {
2941 .wq = {
2942 .private = current,
2943 .func = io_wake_function,
2944 .entry = LIST_HEAD_INIT(iowq.wq.entry),
2945 },
2946 .ctx = ctx,
2947 .to_wait = min_events,
2948 };
75b28aff 2949 struct io_rings *rings = ctx->rings;
2b188cc1
JA
2950 int ret;
2951
75b28aff 2952 if (io_cqring_events(rings) >= min_events)
2b188cc1
JA
2953 return 0;
2954
2955 if (sig) {
9e75ad5d
AB
2956#ifdef CONFIG_COMPAT
2957 if (in_compat_syscall())
2958 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 2959 sigsz);
9e75ad5d
AB
2960 else
2961#endif
b772434b 2962 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 2963
2b188cc1
JA
2964 if (ret)
2965 return ret;
2966 }
2967
bda52162
JA
2968 ret = 0;
2969 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
2970 do {
2971 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
2972 TASK_INTERRUPTIBLE);
2973 if (io_should_wake(&iowq))
2974 break;
2975 schedule();
2976 if (signal_pending(current)) {
2977 ret = -ERESTARTSYS;
2978 break;
2979 }
2980 } while (1);
2981 finish_wait(&ctx->wait, &iowq.wq);
2982
b772434b 2983 restore_saved_sigmask_unless(ret == -ERESTARTSYS);
97abc889
ON
2984 if (ret == -ERESTARTSYS)
2985 ret = -EINTR;
2b188cc1 2986
75b28aff 2987 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
2988}
2989
6b06314c
JA
2990static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
2991{
2992#if defined(CONFIG_UNIX)
2993 if (ctx->ring_sock) {
2994 struct sock *sock = ctx->ring_sock->sk;
2995 struct sk_buff *skb;
2996
2997 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
2998 kfree_skb(skb);
2999 }
3000#else
3001 int i;
3002
3003 for (i = 0; i < ctx->nr_user_files; i++)
08a45173
JA
3004 if (ctx->user_files[i])
3005 fput(ctx->user_files[i]);
6b06314c
JA
3006#endif
3007}
3008
3009static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
3010{
3011 if (!ctx->user_files)
3012 return -ENXIO;
3013
3014 __io_sqe_files_unregister(ctx);
3015 kfree(ctx->user_files);
3016 ctx->user_files = NULL;
3017 ctx->nr_user_files = 0;
3018 return 0;
3019}
3020
6c271ce2
JA
3021static void io_sq_thread_stop(struct io_ring_ctx *ctx)
3022{
3023 if (ctx->sqo_thread) {
a4c0b3de 3024 wait_for_completion(&ctx->sqo_thread_started);
2bbcd6d3
RP
3025 /*
3026 * The park is a bit of a work-around, without it we get
3027 * warning spews on shutdown with SQPOLL set and affinity
3028 * set to a single CPU.
3029 */
06058632 3030 kthread_park(ctx->sqo_thread);
6c271ce2
JA
3031 kthread_stop(ctx->sqo_thread);
3032 ctx->sqo_thread = NULL;
3033 }
3034}
3035
6b06314c
JA
3036static void io_finish_async(struct io_ring_ctx *ctx)
3037{
54a91f3b
JA
3038 int i;
3039
6c271ce2
JA
3040 io_sq_thread_stop(ctx);
3041
54a91f3b
JA
3042 for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++) {
3043 if (ctx->sqo_wq[i]) {
3044 destroy_workqueue(ctx->sqo_wq[i]);
3045 ctx->sqo_wq[i] = NULL;
3046 }
6b06314c
JA
3047 }
3048}
3049
3050#if defined(CONFIG_UNIX)
3051static void io_destruct_skb(struct sk_buff *skb)
3052{
3053 struct io_ring_ctx *ctx = skb->sk->sk_user_data;
8a997340
JA
3054 int i;
3055
3056 for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++)
3057 if (ctx->sqo_wq[i])
3058 flush_workqueue(ctx->sqo_wq[i]);
6b06314c 3059
6b06314c
JA
3060 unix_destruct_scm(skb);
3061}
3062
3063/*
3064 * Ensure the UNIX gc is aware of our file set, so we are certain that
3065 * the io_uring can be safely unregistered on process exit, even if we have
3066 * loops in the file referencing.
3067 */
3068static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
3069{
3070 struct sock *sk = ctx->ring_sock->sk;
3071 struct scm_fp_list *fpl;
3072 struct sk_buff *skb;
08a45173 3073 int i, nr_files;
6b06314c
JA
3074
3075 if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
3076 unsigned long inflight = ctx->user->unix_inflight + nr;
3077
3078 if (inflight > task_rlimit(current, RLIMIT_NOFILE))
3079 return -EMFILE;
3080 }
3081
3082 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
3083 if (!fpl)
3084 return -ENOMEM;
3085
3086 skb = alloc_skb(0, GFP_KERNEL);
3087 if (!skb) {
3088 kfree(fpl);
3089 return -ENOMEM;
3090 }
3091
3092 skb->sk = sk;
6b06314c 3093
08a45173 3094 nr_files = 0;
6b06314c
JA
3095 fpl->user = get_uid(ctx->user);
3096 for (i = 0; i < nr; i++) {
08a45173
JA
3097 if (!ctx->user_files[i + offset])
3098 continue;
3099 fpl->fp[nr_files] = get_file(ctx->user_files[i + offset]);
3100 unix_inflight(fpl->user, fpl->fp[nr_files]);
3101 nr_files++;
6b06314c
JA
3102 }
3103
08a45173
JA
3104 if (nr_files) {
3105 fpl->max = SCM_MAX_FD;
3106 fpl->count = nr_files;
3107 UNIXCB(skb).fp = fpl;
3108 skb->destructor = io_destruct_skb;
3109 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
3110 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 3111
08a45173
JA
3112 for (i = 0; i < nr_files; i++)
3113 fput(fpl->fp[i]);
3114 } else {
3115 kfree_skb(skb);
3116 kfree(fpl);
3117 }
6b06314c
JA
3118
3119 return 0;
3120}
3121
3122/*
3123 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
3124 * causes regular reference counting to break down. We rely on the UNIX
3125 * garbage collection to take care of this problem for us.
3126 */
3127static int io_sqe_files_scm(struct io_ring_ctx *ctx)
3128{
3129 unsigned left, total;
3130 int ret = 0;
3131
3132 total = 0;
3133 left = ctx->nr_user_files;
3134 while (left) {
3135 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
3136
3137 ret = __io_sqe_files_scm(ctx, this_files, total);
3138 if (ret)
3139 break;
3140 left -= this_files;
3141 total += this_files;
3142 }
3143
3144 if (!ret)
3145 return 0;
3146
3147 while (total < ctx->nr_user_files) {
08a45173
JA
3148 if (ctx->user_files[total])
3149 fput(ctx->user_files[total]);
6b06314c
JA
3150 total++;
3151 }
3152
3153 return ret;
3154}
3155#else
3156static int io_sqe_files_scm(struct io_ring_ctx *ctx)
3157{
3158 return 0;
3159}
3160#endif
3161
3162static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
3163 unsigned nr_args)
3164{
3165 __s32 __user *fds = (__s32 __user *) arg;
3166 int fd, ret = 0;
3167 unsigned i;
3168
3169 if (ctx->user_files)
3170 return -EBUSY;
3171 if (!nr_args)
3172 return -EINVAL;
3173 if (nr_args > IORING_MAX_FIXED_FILES)
3174 return -EMFILE;
3175
3176 ctx->user_files = kcalloc(nr_args, sizeof(struct file *), GFP_KERNEL);
3177 if (!ctx->user_files)
3178 return -ENOMEM;
3179
08a45173 3180 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
6b06314c
JA
3181 ret = -EFAULT;
3182 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
3183 break;
08a45173
JA
3184 /* allow sparse sets */
3185 if (fd == -1) {
3186 ret = 0;
3187 continue;
3188 }
6b06314c
JA
3189
3190 ctx->user_files[i] = fget(fd);
3191
3192 ret = -EBADF;
3193 if (!ctx->user_files[i])
3194 break;
3195 /*
3196 * Don't allow io_uring instances to be registered. If UNIX
3197 * isn't enabled, then this causes a reference cycle and this
3198 * instance can never get freed. If UNIX is enabled we'll
3199 * handle it just fine, but there's still no point in allowing
3200 * a ring fd as it doesn't support regular read/write anyway.
3201 */
3202 if (ctx->user_files[i]->f_op == &io_uring_fops) {
3203 fput(ctx->user_files[i]);
3204 break;
3205 }
6b06314c
JA
3206 ret = 0;
3207 }
3208
3209 if (ret) {
3210 for (i = 0; i < ctx->nr_user_files; i++)
08a45173
JA
3211 if (ctx->user_files[i])
3212 fput(ctx->user_files[i]);
6b06314c
JA
3213
3214 kfree(ctx->user_files);
25adf50f 3215 ctx->user_files = NULL;
6b06314c
JA
3216 ctx->nr_user_files = 0;
3217 return ret;
3218 }
3219
3220 ret = io_sqe_files_scm(ctx);
3221 if (ret)
3222 io_sqe_files_unregister(ctx);
3223
3224 return ret;
3225}
3226
6c271ce2
JA
3227static int io_sq_offload_start(struct io_ring_ctx *ctx,
3228 struct io_uring_params *p)
2b188cc1
JA
3229{
3230 int ret;
3231
6c271ce2 3232 init_waitqueue_head(&ctx->sqo_wait);
2b188cc1
JA
3233 mmgrab(current->mm);
3234 ctx->sqo_mm = current->mm;
3235
6c271ce2 3236 if (ctx->flags & IORING_SETUP_SQPOLL) {
3ec482d1
JA
3237 ret = -EPERM;
3238 if (!capable(CAP_SYS_ADMIN))
3239 goto err;
3240
917257da
JA
3241 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
3242 if (!ctx->sq_thread_idle)
3243 ctx->sq_thread_idle = HZ;
3244
6c271ce2 3245 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 3246 int cpu = p->sq_thread_cpu;
6c271ce2 3247
917257da 3248 ret = -EINVAL;
44a9bd18
JA
3249 if (cpu >= nr_cpu_ids)
3250 goto err;
7889f44d 3251 if (!cpu_online(cpu))
917257da
JA
3252 goto err;
3253
6c271ce2
JA
3254 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
3255 ctx, cpu,
3256 "io_uring-sq");
3257 } else {
3258 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
3259 "io_uring-sq");
3260 }
3261 if (IS_ERR(ctx->sqo_thread)) {
3262 ret = PTR_ERR(ctx->sqo_thread);
3263 ctx->sqo_thread = NULL;
3264 goto err;
3265 }
3266 wake_up_process(ctx->sqo_thread);
3267 } else if (p->flags & IORING_SETUP_SQ_AFF) {
3268 /* Can't have SQ_AFF without SQPOLL */
3269 ret = -EINVAL;
3270 goto err;
3271 }
3272
2b188cc1 3273 /* Do QD, or 2 * CPUS, whatever is smallest */
54a91f3b
JA
3274 ctx->sqo_wq[0] = alloc_workqueue("io_ring-wq",
3275 WQ_UNBOUND | WQ_FREEZABLE,
2b188cc1 3276 min(ctx->sq_entries - 1, 2 * num_online_cpus()));
54a91f3b
JA
3277 if (!ctx->sqo_wq[0]) {
3278 ret = -ENOMEM;
3279 goto err;
3280 }
3281
3282 /*
3283 * This is for buffered writes, where we want to limit the parallelism
3284 * due to file locking in file systems. As "normal" buffered writes
3285 * should parellelize on writeout quite nicely, limit us to having 2
3286 * pending. This avoids massive contention on the inode when doing
3287 * buffered async writes.
3288 */
3289 ctx->sqo_wq[1] = alloc_workqueue("io_ring-write-wq",
3290 WQ_UNBOUND | WQ_FREEZABLE, 2);
3291 if (!ctx->sqo_wq[1]) {
2b188cc1
JA
3292 ret = -ENOMEM;
3293 goto err;
3294 }
3295
3296 return 0;
3297err:
54a91f3b 3298 io_finish_async(ctx);
2b188cc1
JA
3299 mmdrop(ctx->sqo_mm);
3300 ctx->sqo_mm = NULL;
3301 return ret;
3302}
3303
3304static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
3305{
3306 atomic_long_sub(nr_pages, &user->locked_vm);
3307}
3308
3309static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
3310{
3311 unsigned long page_limit, cur_pages, new_pages;
3312
3313 /* Don't allow more pages than we can safely lock */
3314 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
3315
3316 do {
3317 cur_pages = atomic_long_read(&user->locked_vm);
3318 new_pages = cur_pages + nr_pages;
3319 if (new_pages > page_limit)
3320 return -ENOMEM;
3321 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
3322 new_pages) != cur_pages);
3323
3324 return 0;
3325}
3326
3327static void io_mem_free(void *ptr)
3328{
52e04ef4
MR
3329 struct page *page;
3330
3331 if (!ptr)
3332 return;
2b188cc1 3333
52e04ef4 3334 page = virt_to_head_page(ptr);
2b188cc1
JA
3335 if (put_page_testzero(page))
3336 free_compound_page(page);
3337}
3338
3339static void *io_mem_alloc(size_t size)
3340{
3341 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
3342 __GFP_NORETRY;
3343
3344 return (void *) __get_free_pages(gfp_flags, get_order(size));
3345}
3346
75b28aff
HV
3347static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
3348 size_t *sq_offset)
3349{
3350 struct io_rings *rings;
3351 size_t off, sq_array_size;
3352
3353 off = struct_size(rings, cqes, cq_entries);
3354 if (off == SIZE_MAX)
3355 return SIZE_MAX;
3356
3357#ifdef CONFIG_SMP
3358 off = ALIGN(off, SMP_CACHE_BYTES);
3359 if (off == 0)
3360 return SIZE_MAX;
3361#endif
3362
3363 sq_array_size = array_size(sizeof(u32), sq_entries);
3364 if (sq_array_size == SIZE_MAX)
3365 return SIZE_MAX;
3366
3367 if (check_add_overflow(off, sq_array_size, &off))
3368 return SIZE_MAX;
3369
3370 if (sq_offset)
3371 *sq_offset = off;
3372
3373 return off;
3374}
3375
2b188cc1
JA
3376static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
3377{
75b28aff 3378 size_t pages;
2b188cc1 3379
75b28aff
HV
3380 pages = (size_t)1 << get_order(
3381 rings_size(sq_entries, cq_entries, NULL));
3382 pages += (size_t)1 << get_order(
3383 array_size(sizeof(struct io_uring_sqe), sq_entries));
2b188cc1 3384
75b28aff 3385 return pages;
2b188cc1
JA
3386}
3387
edafccee
JA
3388static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
3389{
3390 int i, j;
3391
3392 if (!ctx->user_bufs)
3393 return -ENXIO;
3394
3395 for (i = 0; i < ctx->nr_user_bufs; i++) {
3396 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3397
3398 for (j = 0; j < imu->nr_bvecs; j++)
27c4d3a3 3399 put_user_page(imu->bvec[j].bv_page);
edafccee
JA
3400
3401 if (ctx->account_mem)
3402 io_unaccount_mem(ctx->user, imu->nr_bvecs);
d4ef6475 3403 kvfree(imu->bvec);
edafccee
JA
3404 imu->nr_bvecs = 0;
3405 }
3406
3407 kfree(ctx->user_bufs);
3408 ctx->user_bufs = NULL;
3409 ctx->nr_user_bufs = 0;
3410 return 0;
3411}
3412
3413static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
3414 void __user *arg, unsigned index)
3415{
3416 struct iovec __user *src;
3417
3418#ifdef CONFIG_COMPAT
3419 if (ctx->compat) {
3420 struct compat_iovec __user *ciovs;
3421 struct compat_iovec ciov;
3422
3423 ciovs = (struct compat_iovec __user *) arg;
3424 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
3425 return -EFAULT;
3426
3427 dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
3428 dst->iov_len = ciov.iov_len;
3429 return 0;
3430 }
3431#endif
3432 src = (struct iovec __user *) arg;
3433 if (copy_from_user(dst, &src[index], sizeof(*dst)))
3434 return -EFAULT;
3435 return 0;
3436}
3437
3438static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
3439 unsigned nr_args)
3440{
3441 struct vm_area_struct **vmas = NULL;
3442 struct page **pages = NULL;
3443 int i, j, got_pages = 0;
3444 int ret = -EINVAL;
3445
3446 if (ctx->user_bufs)
3447 return -EBUSY;
3448 if (!nr_args || nr_args > UIO_MAXIOV)
3449 return -EINVAL;
3450
3451 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
3452 GFP_KERNEL);
3453 if (!ctx->user_bufs)
3454 return -ENOMEM;
3455
3456 for (i = 0; i < nr_args; i++) {
3457 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3458 unsigned long off, start, end, ubuf;
3459 int pret, nr_pages;
3460 struct iovec iov;
3461 size_t size;
3462
3463 ret = io_copy_iov(ctx, &iov, arg, i);
3464 if (ret)
a278682d 3465 goto err;
edafccee
JA
3466
3467 /*
3468 * Don't impose further limits on the size and buffer
3469 * constraints here, we'll -EINVAL later when IO is
3470 * submitted if they are wrong.
3471 */
3472 ret = -EFAULT;
3473 if (!iov.iov_base || !iov.iov_len)
3474 goto err;
3475
3476 /* arbitrary limit, but we need something */
3477 if (iov.iov_len > SZ_1G)
3478 goto err;
3479
3480 ubuf = (unsigned long) iov.iov_base;
3481 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3482 start = ubuf >> PAGE_SHIFT;
3483 nr_pages = end - start;
3484
3485 if (ctx->account_mem) {
3486 ret = io_account_mem(ctx->user, nr_pages);
3487 if (ret)
3488 goto err;
3489 }
3490
3491 ret = 0;
3492 if (!pages || nr_pages > got_pages) {
3493 kfree(vmas);
3494 kfree(pages);
d4ef6475 3495 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
edafccee 3496 GFP_KERNEL);
d4ef6475 3497 vmas = kvmalloc_array(nr_pages,
edafccee
JA
3498 sizeof(struct vm_area_struct *),
3499 GFP_KERNEL);
3500 if (!pages || !vmas) {
3501 ret = -ENOMEM;
3502 if (ctx->account_mem)
3503 io_unaccount_mem(ctx->user, nr_pages);
3504 goto err;
3505 }
3506 got_pages = nr_pages;
3507 }
3508
d4ef6475 3509 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
edafccee
JA
3510 GFP_KERNEL);
3511 ret = -ENOMEM;
3512 if (!imu->bvec) {
3513 if (ctx->account_mem)
3514 io_unaccount_mem(ctx->user, nr_pages);
3515 goto err;
3516 }
3517
3518 ret = 0;
3519 down_read(&current->mm->mmap_sem);
932f4a63
IW
3520 pret = get_user_pages(ubuf, nr_pages,
3521 FOLL_WRITE | FOLL_LONGTERM,
3522 pages, vmas);
edafccee
JA
3523 if (pret == nr_pages) {
3524 /* don't support file backed memory */
3525 for (j = 0; j < nr_pages; j++) {
3526 struct vm_area_struct *vma = vmas[j];
3527
3528 if (vma->vm_file &&
3529 !is_file_hugepages(vma->vm_file)) {
3530 ret = -EOPNOTSUPP;
3531 break;
3532 }
3533 }
3534 } else {
3535 ret = pret < 0 ? pret : -EFAULT;
3536 }
3537 up_read(&current->mm->mmap_sem);
3538 if (ret) {
3539 /*
3540 * if we did partial map, or found file backed vmas,
3541 * release any pages we did get
3542 */
27c4d3a3
JH
3543 if (pret > 0)
3544 put_user_pages(pages, pret);
edafccee
JA
3545 if (ctx->account_mem)
3546 io_unaccount_mem(ctx->user, nr_pages);
d4ef6475 3547 kvfree(imu->bvec);
edafccee
JA
3548 goto err;
3549 }
3550
3551 off = ubuf & ~PAGE_MASK;
3552 size = iov.iov_len;
3553 for (j = 0; j < nr_pages; j++) {
3554 size_t vec_len;
3555
3556 vec_len = min_t(size_t, size, PAGE_SIZE - off);
3557 imu->bvec[j].bv_page = pages[j];
3558 imu->bvec[j].bv_len = vec_len;
3559 imu->bvec[j].bv_offset = off;
3560 off = 0;
3561 size -= vec_len;
3562 }
3563 /* store original address for later verification */
3564 imu->ubuf = ubuf;
3565 imu->len = iov.iov_len;
3566 imu->nr_bvecs = nr_pages;
3567
3568 ctx->nr_user_bufs++;
3569 }
d4ef6475
MR
3570 kvfree(pages);
3571 kvfree(vmas);
edafccee
JA
3572 return 0;
3573err:
d4ef6475
MR
3574 kvfree(pages);
3575 kvfree(vmas);
edafccee
JA
3576 io_sqe_buffer_unregister(ctx);
3577 return ret;
3578}
3579
9b402849
JA
3580static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
3581{
3582 __s32 __user *fds = arg;
3583 int fd;
3584
3585 if (ctx->cq_ev_fd)
3586 return -EBUSY;
3587
3588 if (copy_from_user(&fd, fds, sizeof(*fds)))
3589 return -EFAULT;
3590
3591 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
3592 if (IS_ERR(ctx->cq_ev_fd)) {
3593 int ret = PTR_ERR(ctx->cq_ev_fd);
3594 ctx->cq_ev_fd = NULL;
3595 return ret;
3596 }
3597
3598 return 0;
3599}
3600
3601static int io_eventfd_unregister(struct io_ring_ctx *ctx)
3602{
3603 if (ctx->cq_ev_fd) {
3604 eventfd_ctx_put(ctx->cq_ev_fd);
3605 ctx->cq_ev_fd = NULL;
3606 return 0;
3607 }
3608
3609 return -ENXIO;
3610}
3611
2b188cc1
JA
3612static void io_ring_ctx_free(struct io_ring_ctx *ctx)
3613{
6b06314c 3614 io_finish_async(ctx);
2b188cc1
JA
3615 if (ctx->sqo_mm)
3616 mmdrop(ctx->sqo_mm);
def596e9
JA
3617
3618 io_iopoll_reap_events(ctx);
edafccee 3619 io_sqe_buffer_unregister(ctx);
6b06314c 3620 io_sqe_files_unregister(ctx);
9b402849 3621 io_eventfd_unregister(ctx);
def596e9 3622
2b188cc1 3623#if defined(CONFIG_UNIX)
355e8d26
EB
3624 if (ctx->ring_sock) {
3625 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 3626 sock_release(ctx->ring_sock);
355e8d26 3627 }
2b188cc1
JA
3628#endif
3629
75b28aff 3630 io_mem_free(ctx->rings);
2b188cc1 3631 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
3632
3633 percpu_ref_exit(&ctx->refs);
3634 if (ctx->account_mem)
3635 io_unaccount_mem(ctx->user,
3636 ring_pages(ctx->sq_entries, ctx->cq_entries));
3637 free_uid(ctx->user);
3638 kfree(ctx);
3639}
3640
3641static __poll_t io_uring_poll(struct file *file, poll_table *wait)
3642{
3643 struct io_ring_ctx *ctx = file->private_data;
3644 __poll_t mask = 0;
3645
3646 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
3647 /*
3648 * synchronizes with barrier from wq_has_sleeper call in
3649 * io_commit_cqring
3650 */
2b188cc1 3651 smp_rmb();
75b28aff
HV
3652 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
3653 ctx->rings->sq_ring_entries)
2b188cc1 3654 mask |= EPOLLOUT | EPOLLWRNORM;
daa5de54 3655 if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail)
2b188cc1
JA
3656 mask |= EPOLLIN | EPOLLRDNORM;
3657
3658 return mask;
3659}
3660
3661static int io_uring_fasync(int fd, struct file *file, int on)
3662{
3663 struct io_ring_ctx *ctx = file->private_data;
3664
3665 return fasync_helper(fd, file, on, &ctx->cq_fasync);
3666}
3667
3668static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
3669{
3670 mutex_lock(&ctx->uring_lock);
3671 percpu_ref_kill(&ctx->refs);
3672 mutex_unlock(&ctx->uring_lock);
3673
5262f567 3674 io_kill_timeouts(ctx);
221c5eb2 3675 io_poll_remove_all(ctx);
def596e9 3676 io_iopoll_reap_events(ctx);
2b188cc1
JA
3677 wait_for_completion(&ctx->ctx_done);
3678 io_ring_ctx_free(ctx);
3679}
3680
3681static int io_uring_release(struct inode *inode, struct file *file)
3682{
3683 struct io_ring_ctx *ctx = file->private_data;
3684
3685 file->private_data = NULL;
3686 io_ring_ctx_wait_and_kill(ctx);
3687 return 0;
3688}
3689
3690static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3691{
3692 loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
3693 unsigned long sz = vma->vm_end - vma->vm_start;
3694 struct io_ring_ctx *ctx = file->private_data;
3695 unsigned long pfn;
3696 struct page *page;
3697 void *ptr;
3698
3699 switch (offset) {
3700 case IORING_OFF_SQ_RING:
75b28aff
HV
3701 case IORING_OFF_CQ_RING:
3702 ptr = ctx->rings;
2b188cc1
JA
3703 break;
3704 case IORING_OFF_SQES:
3705 ptr = ctx->sq_sqes;
3706 break;
2b188cc1
JA
3707 default:
3708 return -EINVAL;
3709 }
3710
3711 page = virt_to_head_page(ptr);
a50b854e 3712 if (sz > page_size(page))
2b188cc1
JA
3713 return -EINVAL;
3714
3715 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
3716 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
3717}
3718
3719SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3720 u32, min_complete, u32, flags, const sigset_t __user *, sig,
3721 size_t, sigsz)
3722{
3723 struct io_ring_ctx *ctx;
3724 long ret = -EBADF;
3725 int submitted = 0;
3726 struct fd f;
3727
6c271ce2 3728 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
2b188cc1
JA
3729 return -EINVAL;
3730
3731 f = fdget(fd);
3732 if (!f.file)
3733 return -EBADF;
3734
3735 ret = -EOPNOTSUPP;
3736 if (f.file->f_op != &io_uring_fops)
3737 goto out_fput;
3738
3739 ret = -ENXIO;
3740 ctx = f.file->private_data;
3741 if (!percpu_ref_tryget(&ctx->refs))
3742 goto out_fput;
3743
6c271ce2
JA
3744 /*
3745 * For SQ polling, the thread will do all submissions and completions.
3746 * Just return the requested submit count, and wake the thread if
3747 * we were asked to.
3748 */
b2a9eada 3749 ret = 0;
6c271ce2
JA
3750 if (ctx->flags & IORING_SETUP_SQPOLL) {
3751 if (flags & IORING_ENTER_SQ_WAKEUP)
3752 wake_up(&ctx->sqo_wait);
3753 submitted = to_submit;
b2a9eada 3754 } else if (to_submit) {
2b188cc1
JA
3755 to_submit = min(to_submit, ctx->sq_entries);
3756
3757 mutex_lock(&ctx->uring_lock);
bc808bce 3758 submitted = io_ring_submit(ctx, to_submit);
2b188cc1 3759 mutex_unlock(&ctx->uring_lock);
2b188cc1
JA
3760 }
3761 if (flags & IORING_ENTER_GETEVENTS) {
def596e9
JA
3762 unsigned nr_events = 0;
3763
2b188cc1
JA
3764 min_complete = min(min_complete, ctx->cq_entries);
3765
def596e9 3766 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9 3767 ret = io_iopoll_check(ctx, &nr_events, min_complete);
def596e9
JA
3768 } else {
3769 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
3770 }
2b188cc1
JA
3771 }
3772
6805b32e 3773 percpu_ref_put(&ctx->refs);
2b188cc1
JA
3774out_fput:
3775 fdput(f);
3776 return submitted ? submitted : ret;
3777}
3778
3779static const struct file_operations io_uring_fops = {
3780 .release = io_uring_release,
3781 .mmap = io_uring_mmap,
3782 .poll = io_uring_poll,
3783 .fasync = io_uring_fasync,
3784};
3785
3786static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3787 struct io_uring_params *p)
3788{
75b28aff
HV
3789 struct io_rings *rings;
3790 size_t size, sq_array_offset;
2b188cc1 3791
75b28aff
HV
3792 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
3793 if (size == SIZE_MAX)
3794 return -EOVERFLOW;
3795
3796 rings = io_mem_alloc(size);
3797 if (!rings)
2b188cc1
JA
3798 return -ENOMEM;
3799
75b28aff
HV
3800 ctx->rings = rings;
3801 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
3802 rings->sq_ring_mask = p->sq_entries - 1;
3803 rings->cq_ring_mask = p->cq_entries - 1;
3804 rings->sq_ring_entries = p->sq_entries;
3805 rings->cq_ring_entries = p->cq_entries;
3806 ctx->sq_mask = rings->sq_ring_mask;
3807 ctx->cq_mask = rings->cq_ring_mask;
3808 ctx->sq_entries = rings->sq_ring_entries;
3809 ctx->cq_entries = rings->cq_ring_entries;
2b188cc1
JA
3810
3811 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
3812 if (size == SIZE_MAX)
3813 return -EOVERFLOW;
3814
3815 ctx->sq_sqes = io_mem_alloc(size);
52e04ef4 3816 if (!ctx->sq_sqes)
2b188cc1 3817 return -ENOMEM;
2b188cc1 3818
2b188cc1
JA
3819 return 0;
3820}
3821
3822/*
3823 * Allocate an anonymous fd, this is what constitutes the application
3824 * visible backing of an io_uring instance. The application mmaps this
3825 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
3826 * we have to tie this fd to a socket for file garbage collection purposes.
3827 */
3828static int io_uring_get_fd(struct io_ring_ctx *ctx)
3829{
3830 struct file *file;
3831 int ret;
3832
3833#if defined(CONFIG_UNIX)
3834 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
3835 &ctx->ring_sock);
3836 if (ret)
3837 return ret;
3838#endif
3839
3840 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
3841 if (ret < 0)
3842 goto err;
3843
3844 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
3845 O_RDWR | O_CLOEXEC);
3846 if (IS_ERR(file)) {
3847 put_unused_fd(ret);
3848 ret = PTR_ERR(file);
3849 goto err;
3850 }
3851
3852#if defined(CONFIG_UNIX)
3853 ctx->ring_sock->file = file;
6b06314c 3854 ctx->ring_sock->sk->sk_user_data = ctx;
2b188cc1
JA
3855#endif
3856 fd_install(ret, file);
3857 return ret;
3858err:
3859#if defined(CONFIG_UNIX)
3860 sock_release(ctx->ring_sock);
3861 ctx->ring_sock = NULL;
3862#endif
3863 return ret;
3864}
3865
3866static int io_uring_create(unsigned entries, struct io_uring_params *p)
3867{
3868 struct user_struct *user = NULL;
3869 struct io_ring_ctx *ctx;
3870 bool account_mem;
3871 int ret;
3872
3873 if (!entries || entries > IORING_MAX_ENTRIES)
3874 return -EINVAL;
3875
3876 /*
3877 * Use twice as many entries for the CQ ring. It's possible for the
3878 * application to drive a higher depth than the size of the SQ ring,
3879 * since the sqes are only used at submission time. This allows for
3880 * some flexibility in overcommitting a bit.
3881 */
3882 p->sq_entries = roundup_pow_of_two(entries);
3883 p->cq_entries = 2 * p->sq_entries;
3884
3885 user = get_uid(current_user());
3886 account_mem = !capable(CAP_IPC_LOCK);
3887
3888 if (account_mem) {
3889 ret = io_account_mem(user,
3890 ring_pages(p->sq_entries, p->cq_entries));
3891 if (ret) {
3892 free_uid(user);
3893 return ret;
3894 }
3895 }
3896
3897 ctx = io_ring_ctx_alloc(p);
3898 if (!ctx) {
3899 if (account_mem)
3900 io_unaccount_mem(user, ring_pages(p->sq_entries,
3901 p->cq_entries));
3902 free_uid(user);
3903 return -ENOMEM;
3904 }
3905 ctx->compat = in_compat_syscall();
3906 ctx->account_mem = account_mem;
3907 ctx->user = user;
3908
3909 ret = io_allocate_scq_urings(ctx, p);
3910 if (ret)
3911 goto err;
3912
6c271ce2 3913 ret = io_sq_offload_start(ctx, p);
2b188cc1
JA
3914 if (ret)
3915 goto err;
3916
2b188cc1 3917 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
3918 p->sq_off.head = offsetof(struct io_rings, sq.head);
3919 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
3920 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
3921 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
3922 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
3923 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
3924 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
3925
3926 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
3927 p->cq_off.head = offsetof(struct io_rings, cq.head);
3928 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
3929 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
3930 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
3931 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
3932 p->cq_off.cqes = offsetof(struct io_rings, cqes);
ac90f249 3933
044c1ab3
JA
3934 /*
3935 * Install ring fd as the very last thing, so we don't risk someone
3936 * having closed it before we finish setup
3937 */
3938 ret = io_uring_get_fd(ctx);
3939 if (ret < 0)
3940 goto err;
3941
ac90f249 3942 p->features = IORING_FEAT_SINGLE_MMAP;
2b188cc1
JA
3943 return ret;
3944err:
3945 io_ring_ctx_wait_and_kill(ctx);
3946 return ret;
3947}
3948
3949/*
3950 * Sets up an aio uring context, and returns the fd. Applications asks for a
3951 * ring size, we return the actual sq/cq ring sizes (among other things) in the
3952 * params structure passed in.
3953 */
3954static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
3955{
3956 struct io_uring_params p;
3957 long ret;
3958 int i;
3959
3960 if (copy_from_user(&p, params, sizeof(p)))
3961 return -EFAULT;
3962 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
3963 if (p.resv[i])
3964 return -EINVAL;
3965 }
3966
6c271ce2
JA
3967 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
3968 IORING_SETUP_SQ_AFF))
2b188cc1
JA
3969 return -EINVAL;
3970
3971 ret = io_uring_create(entries, &p);
3972 if (ret < 0)
3973 return ret;
3974
3975 if (copy_to_user(params, &p, sizeof(p)))
3976 return -EFAULT;
3977
3978 return ret;
3979}
3980
3981SYSCALL_DEFINE2(io_uring_setup, u32, entries,
3982 struct io_uring_params __user *, params)
3983{
3984 return io_uring_setup(entries, params);
3985}
3986
edafccee
JA
3987static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
3988 void __user *arg, unsigned nr_args)
b19062a5
JA
3989 __releases(ctx->uring_lock)
3990 __acquires(ctx->uring_lock)
edafccee
JA
3991{
3992 int ret;
3993
35fa71a0
JA
3994 /*
3995 * We're inside the ring mutex, if the ref is already dying, then
3996 * someone else killed the ctx or is already going through
3997 * io_uring_register().
3998 */
3999 if (percpu_ref_is_dying(&ctx->refs))
4000 return -ENXIO;
4001
edafccee 4002 percpu_ref_kill(&ctx->refs);
b19062a5
JA
4003
4004 /*
4005 * Drop uring mutex before waiting for references to exit. If another
4006 * thread is currently inside io_uring_enter() it might need to grab
4007 * the uring_lock to make progress. If we hold it here across the drain
4008 * wait, then we can deadlock. It's safe to drop the mutex here, since
4009 * no new references will come in after we've killed the percpu ref.
4010 */
4011 mutex_unlock(&ctx->uring_lock);
edafccee 4012 wait_for_completion(&ctx->ctx_done);
b19062a5 4013 mutex_lock(&ctx->uring_lock);
edafccee
JA
4014
4015 switch (opcode) {
4016 case IORING_REGISTER_BUFFERS:
4017 ret = io_sqe_buffer_register(ctx, arg, nr_args);
4018 break;
4019 case IORING_UNREGISTER_BUFFERS:
4020 ret = -EINVAL;
4021 if (arg || nr_args)
4022 break;
4023 ret = io_sqe_buffer_unregister(ctx);
4024 break;
6b06314c
JA
4025 case IORING_REGISTER_FILES:
4026 ret = io_sqe_files_register(ctx, arg, nr_args);
4027 break;
4028 case IORING_UNREGISTER_FILES:
4029 ret = -EINVAL;
4030 if (arg || nr_args)
4031 break;
4032 ret = io_sqe_files_unregister(ctx);
4033 break;
9b402849
JA
4034 case IORING_REGISTER_EVENTFD:
4035 ret = -EINVAL;
4036 if (nr_args != 1)
4037 break;
4038 ret = io_eventfd_register(ctx, arg);
4039 break;
4040 case IORING_UNREGISTER_EVENTFD:
4041 ret = -EINVAL;
4042 if (arg || nr_args)
4043 break;
4044 ret = io_eventfd_unregister(ctx);
4045 break;
edafccee
JA
4046 default:
4047 ret = -EINVAL;
4048 break;
4049 }
4050
4051 /* bring the ctx back to life */
4052 reinit_completion(&ctx->ctx_done);
4053 percpu_ref_reinit(&ctx->refs);
4054 return ret;
4055}
4056
4057SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
4058 void __user *, arg, unsigned int, nr_args)
4059{
4060 struct io_ring_ctx *ctx;
4061 long ret = -EBADF;
4062 struct fd f;
4063
4064 f = fdget(fd);
4065 if (!f.file)
4066 return -EBADF;
4067
4068 ret = -EOPNOTSUPP;
4069 if (f.file->f_op != &io_uring_fops)
4070 goto out_fput;
4071
4072 ctx = f.file->private_data;
4073
4074 mutex_lock(&ctx->uring_lock);
4075 ret = __io_uring_register(ctx, opcode, arg, nr_args);
4076 mutex_unlock(&ctx->uring_lock);
4077out_fput:
4078 fdput(f);
4079 return ret;
4080}
4081
2b188cc1
JA
4082static int __init io_uring_init(void)
4083{
4084 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
4085 return 0;
4086};
4087__initcall(io_uring_init);