io_uring: fix potential deadlock in io_poll_wake()
[linux-2.6-block.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
47#include <linux/refcount.h>
48#include <linux/uio.h>
49
50#include <linux/sched/signal.h>
51#include <linux/fs.h>
52#include <linux/file.h>
53#include <linux/fdtable.h>
54#include <linux/mm.h>
55#include <linux/mman.h>
56#include <linux/mmu_context.h>
57#include <linux/percpu.h>
58#include <linux/slab.h>
6c271ce2 59#include <linux/kthread.h>
2b188cc1 60#include <linux/blkdev.h>
edafccee 61#include <linux/bvec.h>
2b188cc1
JA
62#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
6b06314c 65#include <net/scm.h>
2b188cc1
JA
66#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
edafccee
JA
70#include <linux/sizes.h>
71#include <linux/hugetlb.h>
2b188cc1 72
c826bd7a
DD
73#define CREATE_TRACE_POINTS
74#include <trace/events/io_uring.h>
75
2b188cc1
JA
76#include <uapi/linux/io_uring.h>
77
78#include "internal.h"
561fb04a 79#include "io-wq.h"
2b188cc1 80
5277deaa 81#define IORING_MAX_ENTRIES 32768
33a107f0 82#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
65e19f54
JA
83
84/*
85 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
86 */
87#define IORING_FILE_TABLE_SHIFT 9
88#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
89#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
90#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
2b188cc1
JA
91
92struct io_uring {
93 u32 head ____cacheline_aligned_in_smp;
94 u32 tail ____cacheline_aligned_in_smp;
95};
96
1e84b97b 97/*
75b28aff
HV
98 * This data is shared with the application through the mmap at offsets
99 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
100 *
101 * The offsets to the member fields are published through struct
102 * io_sqring_offsets when calling io_uring_setup.
103 */
75b28aff 104struct io_rings {
1e84b97b
SB
105 /*
106 * Head and tail offsets into the ring; the offsets need to be
107 * masked to get valid indices.
108 *
75b28aff
HV
109 * The kernel controls head of the sq ring and the tail of the cq ring,
110 * and the application controls tail of the sq ring and the head of the
111 * cq ring.
1e84b97b 112 */
75b28aff 113 struct io_uring sq, cq;
1e84b97b 114 /*
75b28aff 115 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
116 * ring_entries - 1)
117 */
75b28aff
HV
118 u32 sq_ring_mask, cq_ring_mask;
119 /* Ring sizes (constant, power of 2) */
120 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
121 /*
122 * Number of invalid entries dropped by the kernel due to
123 * invalid index stored in array
124 *
125 * Written by the kernel, shouldn't be modified by the
126 * application (i.e. get number of "new events" by comparing to
127 * cached value).
128 *
129 * After a new SQ head value was read by the application this
130 * counter includes all submissions that were dropped reaching
131 * the new SQ head (and possibly more).
132 */
75b28aff 133 u32 sq_dropped;
1e84b97b
SB
134 /*
135 * Runtime flags
136 *
137 * Written by the kernel, shouldn't be modified by the
138 * application.
139 *
140 * The application needs a full memory barrier before checking
141 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
142 */
75b28aff 143 u32 sq_flags;
1e84b97b
SB
144 /*
145 * Number of completion events lost because the queue was full;
146 * this should be avoided by the application by making sure
147 * there are not more requests pending thatn there is space in
148 * the completion queue.
149 *
150 * Written by the kernel, shouldn't be modified by the
151 * application (i.e. get number of "new events" by comparing to
152 * cached value).
153 *
154 * As completion events come in out of order this counter is not
155 * ordered with any other data.
156 */
75b28aff 157 u32 cq_overflow;
1e84b97b
SB
158 /*
159 * Ring buffer of completion events.
160 *
161 * The kernel writes completion events fresh every time they are
162 * produced, so the application is allowed to modify pending
163 * entries.
164 */
75b28aff 165 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
166};
167
edafccee
JA
168struct io_mapped_ubuf {
169 u64 ubuf;
170 size_t len;
171 struct bio_vec *bvec;
172 unsigned int nr_bvecs;
173};
174
65e19f54
JA
175struct fixed_file_table {
176 struct file **files;
177};
178
2b188cc1
JA
179struct io_ring_ctx {
180 struct {
181 struct percpu_ref refs;
182 } ____cacheline_aligned_in_smp;
183
184 struct {
185 unsigned int flags;
186 bool compat;
187 bool account_mem;
1d7bb1d5 188 bool cq_overflow_flushed;
2b188cc1 189
75b28aff
HV
190 /*
191 * Ring buffer of indices into array of io_uring_sqe, which is
192 * mmapped by the application using the IORING_OFF_SQES offset.
193 *
194 * This indirection could e.g. be used to assign fixed
195 * io_uring_sqe entries to operations and only submit them to
196 * the queue when needed.
197 *
198 * The kernel modifies neither the indices array nor the entries
199 * array.
200 */
201 u32 *sq_array;
2b188cc1
JA
202 unsigned cached_sq_head;
203 unsigned sq_entries;
204 unsigned sq_mask;
6c271ce2 205 unsigned sq_thread_idle;
498ccd9e 206 unsigned cached_sq_dropped;
206aefde 207 atomic_t cached_cq_overflow;
2b188cc1 208 struct io_uring_sqe *sq_sqes;
de0617e4
JA
209
210 struct list_head defer_list;
5262f567 211 struct list_head timeout_list;
1d7bb1d5 212 struct list_head cq_overflow_list;
fcb323cc
JA
213
214 wait_queue_head_t inflight_wait;
2b188cc1
JA
215 } ____cacheline_aligned_in_smp;
216
206aefde
JA
217 struct io_rings *rings;
218
2b188cc1 219 /* IO offload */
561fb04a 220 struct io_wq *io_wq;
6c271ce2 221 struct task_struct *sqo_thread; /* if using sq thread polling */
2b188cc1 222 struct mm_struct *sqo_mm;
6c271ce2 223 wait_queue_head_t sqo_wait;
75b28aff 224
6b06314c
JA
225 /*
226 * If used, fixed file set. Writers must ensure that ->refs is dead,
227 * readers must ensure that ->refs is alive as long as the file* is
228 * used. Only updated through io_uring_register(2).
229 */
65e19f54 230 struct fixed_file_table *file_table;
6b06314c
JA
231 unsigned nr_user_files;
232
edafccee
JA
233 /* if used, fixed mapped user buffers */
234 unsigned nr_user_bufs;
235 struct io_mapped_ubuf *user_bufs;
236
2b188cc1
JA
237 struct user_struct *user;
238
206aefde
JA
239 /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */
240 struct completion *completions;
241
0ddf92e8
JA
242 /* if all else fails... */
243 struct io_kiocb *fallback_req;
244
206aefde
JA
245#if defined(CONFIG_UNIX)
246 struct socket *ring_sock;
247#endif
248
249 struct {
250 unsigned cached_cq_tail;
251 unsigned cq_entries;
252 unsigned cq_mask;
253 atomic_t cq_timeouts;
254 struct wait_queue_head cq_wait;
255 struct fasync_struct *cq_fasync;
256 struct eventfd_ctx *cq_ev_fd;
257 } ____cacheline_aligned_in_smp;
2b188cc1
JA
258
259 struct {
260 struct mutex uring_lock;
261 wait_queue_head_t wait;
262 } ____cacheline_aligned_in_smp;
263
264 struct {
265 spinlock_t completion_lock;
def596e9
JA
266 bool poll_multi_file;
267 /*
268 * ->poll_list is protected by the ctx->uring_lock for
269 * io_uring instances that don't use IORING_SETUP_SQPOLL.
270 * For SQPOLL, only the single threaded io_sq_thread() will
271 * manipulate the list, hence no extra locking is needed there.
272 */
273 struct list_head poll_list;
221c5eb2 274 struct list_head cancel_list;
fcb323cc
JA
275
276 spinlock_t inflight_lock;
277 struct list_head inflight_list;
2b188cc1 278 } ____cacheline_aligned_in_smp;
2b188cc1
JA
279};
280
281struct sqe_submit {
282 const struct io_uring_sqe *sqe;
fcb323cc
JA
283 struct file *ring_file;
284 int ring_fd;
8776f3fa 285 u32 sequence;
2b188cc1 286 bool has_user;
ba5290cc 287 bool in_async;
6c271ce2 288 bool needs_fixed_file;
2b188cc1
JA
289};
290
09bb8394
JA
291/*
292 * First field must be the file pointer in all the
293 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
294 */
221c5eb2
JA
295struct io_poll_iocb {
296 struct file *file;
297 struct wait_queue_head *head;
298 __poll_t events;
8c838788 299 bool done;
221c5eb2
JA
300 bool canceled;
301 struct wait_queue_entry wait;
302};
303
5262f567
JA
304struct io_timeout {
305 struct file *file;
306 struct hrtimer timer;
307};
308
09bb8394
JA
309/*
310 * NOTE! Each of the iocb union members has the file pointer
311 * as the first entry in their struct definition. So you can
312 * access the file pointer through any of the sub-structs,
313 * or directly as just 'ki_filp' in this struct.
314 */
2b188cc1 315struct io_kiocb {
221c5eb2 316 union {
09bb8394 317 struct file *file;
221c5eb2
JA
318 struct kiocb rw;
319 struct io_poll_iocb poll;
5262f567 320 struct io_timeout timeout;
221c5eb2 321 };
2b188cc1
JA
322
323 struct sqe_submit submit;
324
325 struct io_ring_ctx *ctx;
326 struct list_head list;
9e645e11 327 struct list_head link_list;
2b188cc1 328 unsigned int flags;
c16361c1 329 refcount_t refs;
8449eeda 330#define REQ_F_NOWAIT 1 /* must not punt to workers */
def596e9 331#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
6b06314c 332#define REQ_F_FIXED_FILE 4 /* ctx owns file */
31b51510 333#define REQ_F_SEQ_PREV 8 /* sequential with previous */
e2033e33
SB
334#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
335#define REQ_F_IO_DRAINED 32 /* drain done */
9e645e11 336#define REQ_F_LINK 64 /* linked sqes */
2665abfd 337#define REQ_F_LINK_TIMEOUT 128 /* has linked timeout */
f7b76ac9 338#define REQ_F_FAIL_LINK 256 /* fail rest of links */
4fe2c963 339#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
5262f567 340#define REQ_F_TIMEOUT 1024 /* timeout request */
491381ce
JA
341#define REQ_F_ISREG 2048 /* regular file */
342#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
fcb323cc 343#define REQ_F_INFLIGHT 8192 /* on inflight list */
7c9e7f0f 344#define REQ_F_COMP_LOCKED 16384 /* completion under lock */
2b188cc1 345 u64 user_data;
9e645e11 346 u32 result;
de0617e4 347 u32 sequence;
2b188cc1 348
fcb323cc
JA
349 struct list_head inflight_entry;
350
561fb04a 351 struct io_wq_work work;
2b188cc1
JA
352};
353
354#define IO_PLUG_THRESHOLD 2
def596e9 355#define IO_IOPOLL_BATCH 8
2b188cc1 356
9a56a232
JA
357struct io_submit_state {
358 struct blk_plug plug;
359
2579f913
JA
360 /*
361 * io_kiocb alloc cache
362 */
363 void *reqs[IO_IOPOLL_BATCH];
364 unsigned int free_reqs;
365 unsigned int cur_req;
366
9a56a232
JA
367 /*
368 * File reference cache
369 */
370 struct file *file;
371 unsigned int fd;
372 unsigned int has_refs;
373 unsigned int used_refs;
374 unsigned int ios_left;
375};
376
561fb04a 377static void io_wq_submit_work(struct io_wq_work **workptr);
78e19bbe 378static void io_cqring_fill_event(struct io_kiocb *req, long res);
4fe2c963 379static void __io_free_req(struct io_kiocb *req);
ec9c02ad 380static void io_put_req(struct io_kiocb *req);
78e19bbe 381static void io_double_put_req(struct io_kiocb *req);
de0617e4 382
2b188cc1
JA
383static struct kmem_cache *req_cachep;
384
385static const struct file_operations io_uring_fops;
386
387struct sock *io_uring_get_socket(struct file *file)
388{
389#if defined(CONFIG_UNIX)
390 if (file->f_op == &io_uring_fops) {
391 struct io_ring_ctx *ctx = file->private_data;
392
393 return ctx->ring_sock->sk;
394 }
395#endif
396 return NULL;
397}
398EXPORT_SYMBOL(io_uring_get_socket);
399
400static void io_ring_ctx_ref_free(struct percpu_ref *ref)
401{
402 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
403
206aefde 404 complete(&ctx->completions[0]);
2b188cc1
JA
405}
406
407static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
408{
409 struct io_ring_ctx *ctx;
410
411 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
412 if (!ctx)
413 return NULL;
414
0ddf92e8
JA
415 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
416 if (!ctx->fallback_req)
417 goto err;
418
206aefde
JA
419 ctx->completions = kmalloc(2 * sizeof(struct completion), GFP_KERNEL);
420 if (!ctx->completions)
421 goto err;
422
21482896 423 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
424 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
425 goto err;
2b188cc1
JA
426
427 ctx->flags = p->flags;
428 init_waitqueue_head(&ctx->cq_wait);
1d7bb1d5 429 INIT_LIST_HEAD(&ctx->cq_overflow_list);
206aefde
JA
430 init_completion(&ctx->completions[0]);
431 init_completion(&ctx->completions[1]);
2b188cc1
JA
432 mutex_init(&ctx->uring_lock);
433 init_waitqueue_head(&ctx->wait);
434 spin_lock_init(&ctx->completion_lock);
def596e9 435 INIT_LIST_HEAD(&ctx->poll_list);
221c5eb2 436 INIT_LIST_HEAD(&ctx->cancel_list);
de0617e4 437 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 438 INIT_LIST_HEAD(&ctx->timeout_list);
fcb323cc
JA
439 init_waitqueue_head(&ctx->inflight_wait);
440 spin_lock_init(&ctx->inflight_lock);
441 INIT_LIST_HEAD(&ctx->inflight_list);
2b188cc1 442 return ctx;
206aefde 443err:
0ddf92e8
JA
444 if (ctx->fallback_req)
445 kmem_cache_free(req_cachep, ctx->fallback_req);
206aefde
JA
446 kfree(ctx->completions);
447 kfree(ctx);
448 return NULL;
2b188cc1
JA
449}
450
a197f664 451static inline bool __io_sequence_defer(struct io_kiocb *req)
7adf4eaf 452{
a197f664
JL
453 struct io_ring_ctx *ctx = req->ctx;
454
498ccd9e
JA
455 return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
456 + atomic_read(&ctx->cached_cq_overflow);
7adf4eaf
JA
457}
458
a197f664 459static inline bool io_sequence_defer(struct io_kiocb *req)
de0617e4 460{
7adf4eaf 461 if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
de0617e4
JA
462 return false;
463
a197f664 464 return __io_sequence_defer(req);
de0617e4
JA
465}
466
7adf4eaf 467static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
de0617e4
JA
468{
469 struct io_kiocb *req;
470
7adf4eaf 471 req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
a197f664 472 if (req && !io_sequence_defer(req)) {
de0617e4
JA
473 list_del_init(&req->list);
474 return req;
475 }
476
477 return NULL;
478}
479
5262f567
JA
480static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
481{
7adf4eaf
JA
482 struct io_kiocb *req;
483
484 req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
a197f664 485 if (req && !__io_sequence_defer(req)) {
7adf4eaf
JA
486 list_del_init(&req->list);
487 return req;
488 }
489
490 return NULL;
5262f567
JA
491}
492
de0617e4 493static void __io_commit_cqring(struct io_ring_ctx *ctx)
2b188cc1 494{
75b28aff 495 struct io_rings *rings = ctx->rings;
2b188cc1 496
75b28aff 497 if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) {
2b188cc1 498 /* order cqe stores with ring update */
75b28aff 499 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
2b188cc1 500
2b188cc1
JA
501 if (wq_has_sleeper(&ctx->cq_wait)) {
502 wake_up_interruptible(&ctx->cq_wait);
503 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
504 }
505 }
506}
507
561fb04a 508static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
18d9be1a 509{
561fb04a
JA
510 u8 opcode = READ_ONCE(sqe->opcode);
511
512 return !(opcode == IORING_OP_READ_FIXED ||
513 opcode == IORING_OP_WRITE_FIXED);
514}
515
516static inline bool io_prep_async_work(struct io_kiocb *req)
517{
518 bool do_hashed = false;
54a91f3b 519
6cc47d1d
JA
520 if (req->submit.sqe) {
521 switch (req->submit.sqe->opcode) {
522 case IORING_OP_WRITEV:
523 case IORING_OP_WRITE_FIXED:
561fb04a 524 do_hashed = true;
5f8fd2d3
JA
525 /* fall-through */
526 case IORING_OP_READV:
527 case IORING_OP_READ_FIXED:
528 case IORING_OP_SENDMSG:
529 case IORING_OP_RECVMSG:
530 case IORING_OP_ACCEPT:
531 case IORING_OP_POLL_ADD:
532 /*
533 * We know REQ_F_ISREG is not set on some of these
534 * opcodes, but this enables us to keep the check in
535 * just one place.
536 */
537 if (!(req->flags & REQ_F_ISREG))
538 req->work.flags |= IO_WQ_WORK_UNBOUND;
6cc47d1d
JA
539 break;
540 }
561fb04a
JA
541 if (io_sqe_needs_user(req->submit.sqe))
542 req->work.flags |= IO_WQ_WORK_NEEDS_USER;
54a91f3b
JA
543 }
544
561fb04a
JA
545 return do_hashed;
546}
547
a197f664 548static inline void io_queue_async_work(struct io_kiocb *req)
561fb04a
JA
549{
550 bool do_hashed = io_prep_async_work(req);
a197f664 551 struct io_ring_ctx *ctx = req->ctx;
561fb04a
JA
552
553 trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
554 req->flags);
555 if (!do_hashed) {
556 io_wq_enqueue(ctx->io_wq, &req->work);
557 } else {
558 io_wq_enqueue_hashed(ctx->io_wq, &req->work,
559 file_inode(req->file));
560 }
18d9be1a
JA
561}
562
5262f567
JA
563static void io_kill_timeout(struct io_kiocb *req)
564{
565 int ret;
566
567 ret = hrtimer_try_to_cancel(&req->timeout.timer);
568 if (ret != -1) {
569 atomic_inc(&req->ctx->cq_timeouts);
842f9612 570 list_del_init(&req->list);
78e19bbe 571 io_cqring_fill_event(req, 0);
ec9c02ad 572 io_put_req(req);
5262f567
JA
573 }
574}
575
576static void io_kill_timeouts(struct io_ring_ctx *ctx)
577{
578 struct io_kiocb *req, *tmp;
579
580 spin_lock_irq(&ctx->completion_lock);
581 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
582 io_kill_timeout(req);
583 spin_unlock_irq(&ctx->completion_lock);
584}
585
de0617e4
JA
586static void io_commit_cqring(struct io_ring_ctx *ctx)
587{
588 struct io_kiocb *req;
589
5262f567
JA
590 while ((req = io_get_timeout_req(ctx)) != NULL)
591 io_kill_timeout(req);
592
de0617e4
JA
593 __io_commit_cqring(ctx);
594
595 while ((req = io_get_deferred_req(ctx)) != NULL) {
4fe2c963
JL
596 if (req->flags & REQ_F_SHADOW_DRAIN) {
597 /* Just for drain, free it. */
598 __io_free_req(req);
599 continue;
600 }
de0617e4 601 req->flags |= REQ_F_IO_DRAINED;
a197f664 602 io_queue_async_work(req);
de0617e4
JA
603 }
604}
605
2b188cc1
JA
606static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
607{
75b28aff 608 struct io_rings *rings = ctx->rings;
2b188cc1
JA
609 unsigned tail;
610
611 tail = ctx->cached_cq_tail;
115e12e5
SB
612 /*
613 * writes to the cq entry need to come after reading head; the
614 * control dependency is enough as we're using WRITE_ONCE to
615 * fill the cq entry
616 */
75b28aff 617 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
2b188cc1
JA
618 return NULL;
619
620 ctx->cached_cq_tail++;
75b28aff 621 return &rings->cqes[tail & ctx->cq_mask];
2b188cc1
JA
622}
623
1d7bb1d5
JA
624static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
625{
626 if (waitqueue_active(&ctx->wait))
627 wake_up(&ctx->wait);
628 if (waitqueue_active(&ctx->sqo_wait))
629 wake_up(&ctx->sqo_wait);
630 if (ctx->cq_ev_fd)
631 eventfd_signal(ctx->cq_ev_fd, 1);
632}
633
634static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
635{
636 struct io_rings *rings = ctx->rings;
637 struct io_uring_cqe *cqe;
638 struct io_kiocb *req;
639 unsigned long flags;
640 LIST_HEAD(list);
641
642 if (!force) {
643 if (list_empty_careful(&ctx->cq_overflow_list))
644 return;
645 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
646 rings->cq_ring_entries))
647 return;
648 }
649
650 spin_lock_irqsave(&ctx->completion_lock, flags);
651
652 /* if force is set, the ring is going away. always drop after that */
653 if (force)
654 ctx->cq_overflow_flushed = true;
655
656 while (!list_empty(&ctx->cq_overflow_list)) {
657 cqe = io_get_cqring(ctx);
658 if (!cqe && !force)
659 break;
660
661 req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
662 list);
663 list_move(&req->list, &list);
664 if (cqe) {
665 WRITE_ONCE(cqe->user_data, req->user_data);
666 WRITE_ONCE(cqe->res, req->result);
667 WRITE_ONCE(cqe->flags, 0);
668 } else {
669 WRITE_ONCE(ctx->rings->cq_overflow,
670 atomic_inc_return(&ctx->cached_cq_overflow));
671 }
672 }
673
674 io_commit_cqring(ctx);
675 spin_unlock_irqrestore(&ctx->completion_lock, flags);
676 io_cqring_ev_posted(ctx);
677
678 while (!list_empty(&list)) {
679 req = list_first_entry(&list, struct io_kiocb, list);
680 list_del(&req->list);
ec9c02ad 681 io_put_req(req);
1d7bb1d5
JA
682 }
683}
684
78e19bbe 685static void io_cqring_fill_event(struct io_kiocb *req, long res)
2b188cc1 686{
78e19bbe 687 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
688 struct io_uring_cqe *cqe;
689
78e19bbe 690 trace_io_uring_complete(ctx, req->user_data, res);
51c3ff62 691
2b188cc1
JA
692 /*
693 * If we can't get a cq entry, userspace overflowed the
694 * submission (by quite a lot). Increment the overflow count in
695 * the ring.
696 */
697 cqe = io_get_cqring(ctx);
1d7bb1d5 698 if (likely(cqe)) {
78e19bbe 699 WRITE_ONCE(cqe->user_data, req->user_data);
2b188cc1 700 WRITE_ONCE(cqe->res, res);
c71ffb67 701 WRITE_ONCE(cqe->flags, 0);
1d7bb1d5 702 } else if (ctx->cq_overflow_flushed) {
498ccd9e
JA
703 WRITE_ONCE(ctx->rings->cq_overflow,
704 atomic_inc_return(&ctx->cached_cq_overflow));
1d7bb1d5
JA
705 } else {
706 refcount_inc(&req->refs);
707 req->result = res;
708 list_add_tail(&req->list, &ctx->cq_overflow_list);
2b188cc1
JA
709 }
710}
711
78e19bbe 712static void io_cqring_add_event(struct io_kiocb *req, long res)
2b188cc1 713{
78e19bbe 714 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
715 unsigned long flags;
716
717 spin_lock_irqsave(&ctx->completion_lock, flags);
78e19bbe 718 io_cqring_fill_event(req, res);
2b188cc1
JA
719 io_commit_cqring(ctx);
720 spin_unlock_irqrestore(&ctx->completion_lock, flags);
721
8c838788 722 io_cqring_ev_posted(ctx);
2b188cc1
JA
723}
724
0ddf92e8
JA
725static inline bool io_is_fallback_req(struct io_kiocb *req)
726{
727 return req == (struct io_kiocb *)
728 ((unsigned long) req->ctx->fallback_req & ~1UL);
729}
730
731static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
732{
733 struct io_kiocb *req;
734
735 req = ctx->fallback_req;
736 if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req))
737 return req;
738
739 return NULL;
740}
741
2579f913
JA
742static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
743 struct io_submit_state *state)
2b188cc1 744{
fd6fab2c 745 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2b188cc1
JA
746 struct io_kiocb *req;
747
748 if (!percpu_ref_tryget(&ctx->refs))
749 return NULL;
750
2579f913 751 if (!state) {
fd6fab2c 752 req = kmem_cache_alloc(req_cachep, gfp);
2579f913 753 if (unlikely(!req))
0ddf92e8 754 goto fallback;
2579f913
JA
755 } else if (!state->free_reqs) {
756 size_t sz;
757 int ret;
758
759 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
fd6fab2c
JA
760 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
761
762 /*
763 * Bulk alloc is all-or-nothing. If we fail to get a batch,
764 * retry single alloc to be on the safe side.
765 */
766 if (unlikely(ret <= 0)) {
767 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
768 if (!state->reqs[0])
0ddf92e8 769 goto fallback;
fd6fab2c
JA
770 ret = 1;
771 }
2579f913
JA
772 state->free_reqs = ret - 1;
773 state->cur_req = 1;
774 req = state->reqs[0];
775 } else {
776 req = state->reqs[state->cur_req];
777 state->free_reqs--;
778 state->cur_req++;
2b188cc1
JA
779 }
780
0ddf92e8 781got_it:
60c112b0 782 req->file = NULL;
2579f913
JA
783 req->ctx = ctx;
784 req->flags = 0;
e65ef56d
JA
785 /* one is dropped after submission, the other at completion */
786 refcount_set(&req->refs, 2);
9e645e11 787 req->result = 0;
561fb04a 788 INIT_IO_WORK(&req->work, io_wq_submit_work);
2579f913 789 return req;
0ddf92e8
JA
790fallback:
791 req = io_get_fallback_req(ctx);
792 if (req)
793 goto got_it;
6805b32e 794 percpu_ref_put(&ctx->refs);
2b188cc1
JA
795 return NULL;
796}
797
def596e9
JA
798static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
799{
800 if (*nr) {
801 kmem_cache_free_bulk(req_cachep, *nr, reqs);
6805b32e 802 percpu_ref_put_many(&ctx->refs, *nr);
def596e9
JA
803 *nr = 0;
804 }
805}
806
9e645e11 807static void __io_free_req(struct io_kiocb *req)
2b188cc1 808{
fcb323cc
JA
809 struct io_ring_ctx *ctx = req->ctx;
810
09bb8394
JA
811 if (req->file && !(req->flags & REQ_F_FIXED_FILE))
812 fput(req->file);
fcb323cc
JA
813 if (req->flags & REQ_F_INFLIGHT) {
814 unsigned long flags;
815
816 spin_lock_irqsave(&ctx->inflight_lock, flags);
817 list_del(&req->inflight_entry);
818 if (waitqueue_active(&ctx->inflight_wait))
819 wake_up(&ctx->inflight_wait);
820 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
821 }
822 percpu_ref_put(&ctx->refs);
0ddf92e8
JA
823 if (likely(!io_is_fallback_req(req)))
824 kmem_cache_free(req_cachep, req);
825 else
826 clear_bit_unlock(0, (unsigned long *) ctx->fallback_req);
e65ef56d
JA
827}
828
a197f664 829static bool io_link_cancel_timeout(struct io_kiocb *req)
2665abfd 830{
a197f664 831 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
832 int ret;
833
834 ret = hrtimer_try_to_cancel(&req->timeout.timer);
835 if (ret != -1) {
78e19bbe 836 io_cqring_fill_event(req, -ECANCELED);
2665abfd
JA
837 io_commit_cqring(ctx);
838 req->flags &= ~REQ_F_LINK;
ec9c02ad 839 io_put_req(req);
2665abfd
JA
840 return true;
841 }
842
843 return false;
844}
845
ba816ad6 846static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
9e645e11 847{
2665abfd 848 struct io_ring_ctx *ctx = req->ctx;
9e645e11 849 struct io_kiocb *nxt;
2665abfd 850 bool wake_ev = false;
9e645e11
JA
851
852 /*
853 * The list should never be empty when we are called here. But could
854 * potentially happen if the chain is messed up, check to be on the
855 * safe side.
856 */
857 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
2665abfd 858 while (nxt) {
76a46e06 859 list_del_init(&nxt->list);
9e645e11
JA
860 if (!list_empty(&req->link_list)) {
861 INIT_LIST_HEAD(&nxt->link_list);
862 list_splice(&req->link_list, &nxt->link_list);
863 nxt->flags |= REQ_F_LINK;
864 }
865
ba816ad6
JA
866 /*
867 * If we're in async work, we can continue processing the chain
868 * in this context instead of having to queue up new async work.
869 */
2665abfd 870 if (req->flags & REQ_F_LINK_TIMEOUT) {
a197f664 871 wake_ev = io_link_cancel_timeout(nxt);
2665abfd
JA
872
873 /* we dropped this link, get next */
874 nxt = list_first_entry_or_null(&req->link_list,
875 struct io_kiocb, list);
960e432d 876 } else if (nxtptr && io_wq_current_is_worker()) {
ba816ad6 877 *nxtptr = nxt;
2665abfd
JA
878 break;
879 } else {
a197f664 880 io_queue_async_work(nxt);
2665abfd
JA
881 break;
882 }
9e645e11 883 }
2665abfd
JA
884
885 if (wake_ev)
886 io_cqring_ev_posted(ctx);
9e645e11
JA
887}
888
889/*
890 * Called if REQ_F_LINK is set, and we fail the head request
891 */
892static void io_fail_links(struct io_kiocb *req)
893{
2665abfd 894 struct io_ring_ctx *ctx = req->ctx;
9e645e11 895 struct io_kiocb *link;
2665abfd
JA
896 unsigned long flags;
897
898 spin_lock_irqsave(&ctx->completion_lock, flags);
9e645e11
JA
899
900 while (!list_empty(&req->link_list)) {
901 link = list_first_entry(&req->link_list, struct io_kiocb, list);
2665abfd 902 list_del_init(&link->list);
9e645e11 903
c826bd7a 904 trace_io_uring_fail_link(req, link);
2665abfd
JA
905
906 if ((req->flags & REQ_F_LINK_TIMEOUT) &&
907 link->submit.sqe->opcode == IORING_OP_LINK_TIMEOUT) {
a197f664 908 io_link_cancel_timeout(link);
2665abfd 909 } else {
78e19bbe
JA
910 io_cqring_fill_event(link, -ECANCELED);
911 io_double_put_req(link);
2665abfd 912 }
9e645e11 913 }
2665abfd
JA
914
915 io_commit_cqring(ctx);
916 spin_unlock_irqrestore(&ctx->completion_lock, flags);
917 io_cqring_ev_posted(ctx);
9e645e11
JA
918}
919
c69f8dbe 920static void io_free_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
9e645e11 921{
2665abfd
JA
922 if (likely(!(req->flags & REQ_F_LINK))) {
923 __io_free_req(req);
924 return;
925 }
926
9e645e11
JA
927 /*
928 * If LINK is set, we have dependent requests in this chain. If we
929 * didn't fail this request, queue the first one up, moving any other
930 * dependencies to the next request. In case of failure, fail the rest
931 * of the chain.
932 */
2665abfd
JA
933 if (req->flags & REQ_F_FAIL_LINK) {
934 io_fail_links(req);
7c9e7f0f
JA
935 } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
936 REQ_F_LINK_TIMEOUT) {
2665abfd
JA
937 struct io_ring_ctx *ctx = req->ctx;
938 unsigned long flags;
939
940 /*
941 * If this is a timeout link, we could be racing with the
942 * timeout timer. Grab the completion lock for this case to
7c9e7f0f 943 * protect against that.
2665abfd
JA
944 */
945 spin_lock_irqsave(&ctx->completion_lock, flags);
946 io_req_link_next(req, nxt);
947 spin_unlock_irqrestore(&ctx->completion_lock, flags);
948 } else {
949 io_req_link_next(req, nxt);
9e645e11
JA
950 }
951
952 __io_free_req(req);
953}
954
c69f8dbe
JL
955static void io_free_req(struct io_kiocb *req)
956{
957 io_free_req_find_next(req, NULL);
958}
959
ba816ad6
JA
960/*
961 * Drop reference to request, return next in chain (if there is one) if this
962 * was the last reference to this request.
963 */
ec9c02ad 964static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
e65ef56d 965{
ba816ad6
JA
966 struct io_kiocb *nxt = NULL;
967
e65ef56d 968 if (refcount_dec_and_test(&req->refs))
c69f8dbe 969 io_free_req_find_next(req, &nxt);
ba816ad6 970
ba816ad6 971 if (nxt) {
561fb04a 972 if (nxtptr)
ba816ad6 973 *nxtptr = nxt;
561fb04a 974 else
a197f664 975 io_queue_async_work(nxt);
ba816ad6 976 }
2b188cc1
JA
977}
978
ec9c02ad
JL
979static void io_put_req(struct io_kiocb *req)
980{
981 if (refcount_dec_and_test(&req->refs))
c69f8dbe 982 io_free_req(req);
ec9c02ad
JL
983}
984
78e19bbe
JA
985static void io_double_put_req(struct io_kiocb *req)
986{
987 /* drop both submit and complete references */
988 if (refcount_sub_and_test(2, &req->refs))
989 __io_free_req(req);
990}
991
1d7bb1d5 992static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
a3a0e43f 993{
84f97dc2
JA
994 struct io_rings *rings = ctx->rings;
995
1d7bb1d5
JA
996 /*
997 * noflush == true is from the waitqueue handler, just ensure we wake
998 * up the task, and the next invocation will flush the entries. We
999 * cannot safely to it from here.
1000 */
1001 if (noflush && !list_empty(&ctx->cq_overflow_list))
1002 return -1U;
1003
1004 io_cqring_overflow_flush(ctx, false);
1005
a3a0e43f
JA
1006 /* See comment at the top of this file */
1007 smp_rmb();
75b28aff 1008 return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
a3a0e43f
JA
1009}
1010
fb5ccc98
PB
1011static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
1012{
1013 struct io_rings *rings = ctx->rings;
1014
1015 /* make sure SQ entry isn't read before tail */
1016 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
1017}
1018
def596e9
JA
1019/*
1020 * Find and free completed poll iocbs
1021 */
1022static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
1023 struct list_head *done)
1024{
1025 void *reqs[IO_IOPOLL_BATCH];
1026 struct io_kiocb *req;
09bb8394 1027 int to_free;
def596e9 1028
09bb8394 1029 to_free = 0;
def596e9
JA
1030 while (!list_empty(done)) {
1031 req = list_first_entry(done, struct io_kiocb, list);
1032 list_del(&req->list);
1033
78e19bbe 1034 io_cqring_fill_event(req, req->result);
def596e9
JA
1035 (*nr_events)++;
1036
09bb8394
JA
1037 if (refcount_dec_and_test(&req->refs)) {
1038 /* If we're not using fixed files, we have to pair the
1039 * completion part with the file put. Use regular
1040 * completions for those, only batch free for fixed
9e645e11 1041 * file and non-linked commands.
09bb8394 1042 */
0ddf92e8
JA
1043 if (((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
1044 REQ_F_FIXED_FILE) && !io_is_fallback_req(req)) {
09bb8394
JA
1045 reqs[to_free++] = req;
1046 if (to_free == ARRAY_SIZE(reqs))
1047 io_free_req_many(ctx, reqs, &to_free);
6b06314c 1048 } else {
c69f8dbe 1049 io_free_req(req);
6b06314c 1050 }
9a56a232 1051 }
def596e9 1052 }
def596e9 1053
09bb8394 1054 io_commit_cqring(ctx);
def596e9
JA
1055 io_free_req_many(ctx, reqs, &to_free);
1056}
1057
1058static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
1059 long min)
1060{
1061 struct io_kiocb *req, *tmp;
1062 LIST_HEAD(done);
1063 bool spin;
1064 int ret;
1065
1066 /*
1067 * Only spin for completions if we don't have multiple devices hanging
1068 * off our complete list, and we're under the requested amount.
1069 */
1070 spin = !ctx->poll_multi_file && *nr_events < min;
1071
1072 ret = 0;
1073 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
1074 struct kiocb *kiocb = &req->rw;
1075
1076 /*
1077 * Move completed entries to our local list. If we find a
1078 * request that requires polling, break out and complete
1079 * the done list first, if we have entries there.
1080 */
1081 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
1082 list_move_tail(&req->list, &done);
1083 continue;
1084 }
1085 if (!list_empty(&done))
1086 break;
1087
1088 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
1089 if (ret < 0)
1090 break;
1091
1092 if (ret && spin)
1093 spin = false;
1094 ret = 0;
1095 }
1096
1097 if (!list_empty(&done))
1098 io_iopoll_complete(ctx, nr_events, &done);
1099
1100 return ret;
1101}
1102
1103/*
1104 * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a
1105 * non-spinning poll check - we'll still enter the driver poll loop, but only
1106 * as a non-spinning completion check.
1107 */
1108static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
1109 long min)
1110{
08f5439f 1111 while (!list_empty(&ctx->poll_list) && !need_resched()) {
def596e9
JA
1112 int ret;
1113
1114 ret = io_do_iopoll(ctx, nr_events, min);
1115 if (ret < 0)
1116 return ret;
1117 if (!min || *nr_events >= min)
1118 return 0;
1119 }
1120
1121 return 1;
1122}
1123
1124/*
1125 * We can't just wait for polled events to come to us, we have to actively
1126 * find and complete them.
1127 */
1128static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
1129{
1130 if (!(ctx->flags & IORING_SETUP_IOPOLL))
1131 return;
1132
1133 mutex_lock(&ctx->uring_lock);
1134 while (!list_empty(&ctx->poll_list)) {
1135 unsigned int nr_events = 0;
1136
1137 io_iopoll_getevents(ctx, &nr_events, 1);
08f5439f
JA
1138
1139 /*
1140 * Ensure we allow local-to-the-cpu processing to take place,
1141 * in this case we need to ensure that we reap all events.
1142 */
1143 cond_resched();
def596e9
JA
1144 }
1145 mutex_unlock(&ctx->uring_lock);
1146}
1147
2b2ed975
JA
1148static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1149 long min)
def596e9 1150{
2b2ed975 1151 int iters = 0, ret = 0;
500f9fba 1152
def596e9
JA
1153 do {
1154 int tmin = 0;
1155
a3a0e43f
JA
1156 /*
1157 * Don't enter poll loop if we already have events pending.
1158 * If we do, we can potentially be spinning for commands that
1159 * already triggered a CQE (eg in error).
1160 */
1d7bb1d5 1161 if (io_cqring_events(ctx, false))
a3a0e43f
JA
1162 break;
1163
500f9fba
JA
1164 /*
1165 * If a submit got punted to a workqueue, we can have the
1166 * application entering polling for a command before it gets
1167 * issued. That app will hold the uring_lock for the duration
1168 * of the poll right here, so we need to take a breather every
1169 * now and then to ensure that the issue has a chance to add
1170 * the poll to the issued list. Otherwise we can spin here
1171 * forever, while the workqueue is stuck trying to acquire the
1172 * very same mutex.
1173 */
1174 if (!(++iters & 7)) {
1175 mutex_unlock(&ctx->uring_lock);
1176 mutex_lock(&ctx->uring_lock);
1177 }
1178
def596e9
JA
1179 if (*nr_events < min)
1180 tmin = min - *nr_events;
1181
1182 ret = io_iopoll_getevents(ctx, nr_events, tmin);
1183 if (ret <= 0)
1184 break;
1185 ret = 0;
1186 } while (min && !*nr_events && !need_resched());
1187
2b2ed975
JA
1188 return ret;
1189}
1190
1191static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1192 long min)
1193{
1194 int ret;
1195
1196 /*
1197 * We disallow the app entering submit/complete with polling, but we
1198 * still need to lock the ring to prevent racing with polled issue
1199 * that got punted to a workqueue.
1200 */
1201 mutex_lock(&ctx->uring_lock);
1202 ret = __io_iopoll_check(ctx, nr_events, min);
500f9fba 1203 mutex_unlock(&ctx->uring_lock);
def596e9
JA
1204 return ret;
1205}
1206
491381ce 1207static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 1208{
491381ce
JA
1209 /*
1210 * Tell lockdep we inherited freeze protection from submission
1211 * thread.
1212 */
1213 if (req->flags & REQ_F_ISREG) {
1214 struct inode *inode = file_inode(req->file);
2b188cc1 1215
491381ce 1216 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2b188cc1 1217 }
491381ce 1218 file_end_write(req->file);
2b188cc1
JA
1219}
1220
ba816ad6 1221static void io_complete_rw_common(struct kiocb *kiocb, long res)
2b188cc1
JA
1222{
1223 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1224
491381ce
JA
1225 if (kiocb->ki_flags & IOCB_WRITE)
1226 kiocb_end_write(req);
2b188cc1 1227
9e645e11
JA
1228 if ((req->flags & REQ_F_LINK) && res != req->result)
1229 req->flags |= REQ_F_FAIL_LINK;
78e19bbe 1230 io_cqring_add_event(req, res);
ba816ad6
JA
1231}
1232
1233static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
1234{
1235 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1236
1237 io_complete_rw_common(kiocb, res);
ec9c02ad 1238 io_put_req(req);
ba816ad6
JA
1239}
1240
1241static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
1242{
1243 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
ec9c02ad 1244 struct io_kiocb *nxt = NULL;
ba816ad6
JA
1245
1246 io_complete_rw_common(kiocb, res);
ec9c02ad
JL
1247 io_put_req_find_next(req, &nxt);
1248
1249 return nxt;
2b188cc1
JA
1250}
1251
def596e9
JA
1252static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
1253{
1254 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1255
491381ce
JA
1256 if (kiocb->ki_flags & IOCB_WRITE)
1257 kiocb_end_write(req);
def596e9 1258
9e645e11
JA
1259 if ((req->flags & REQ_F_LINK) && res != req->result)
1260 req->flags |= REQ_F_FAIL_LINK;
1261 req->result = res;
def596e9
JA
1262 if (res != -EAGAIN)
1263 req->flags |= REQ_F_IOPOLL_COMPLETED;
1264}
1265
1266/*
1267 * After the iocb has been issued, it's safe to be found on the poll list.
1268 * Adding the kiocb to the list AFTER submission ensures that we don't
1269 * find it from a io_iopoll_getevents() thread before the issuer is done
1270 * accessing the kiocb cookie.
1271 */
1272static void io_iopoll_req_issued(struct io_kiocb *req)
1273{
1274 struct io_ring_ctx *ctx = req->ctx;
1275
1276 /*
1277 * Track whether we have multiple files in our lists. This will impact
1278 * how we do polling eventually, not spinning if we're on potentially
1279 * different devices.
1280 */
1281 if (list_empty(&ctx->poll_list)) {
1282 ctx->poll_multi_file = false;
1283 } else if (!ctx->poll_multi_file) {
1284 struct io_kiocb *list_req;
1285
1286 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
1287 list);
1288 if (list_req->rw.ki_filp != req->rw.ki_filp)
1289 ctx->poll_multi_file = true;
1290 }
1291
1292 /*
1293 * For fast devices, IO may have already completed. If it has, add
1294 * it to the front so we find it first.
1295 */
1296 if (req->flags & REQ_F_IOPOLL_COMPLETED)
1297 list_add(&req->list, &ctx->poll_list);
1298 else
1299 list_add_tail(&req->list, &ctx->poll_list);
1300}
1301
3d6770fb 1302static void io_file_put(struct io_submit_state *state)
9a56a232 1303{
3d6770fb 1304 if (state->file) {
9a56a232
JA
1305 int diff = state->has_refs - state->used_refs;
1306
1307 if (diff)
1308 fput_many(state->file, diff);
1309 state->file = NULL;
1310 }
1311}
1312
1313/*
1314 * Get as many references to a file as we have IOs left in this submission,
1315 * assuming most submissions are for one file, or at least that each file
1316 * has more than one submission.
1317 */
1318static struct file *io_file_get(struct io_submit_state *state, int fd)
1319{
1320 if (!state)
1321 return fget(fd);
1322
1323 if (state->file) {
1324 if (state->fd == fd) {
1325 state->used_refs++;
1326 state->ios_left--;
1327 return state->file;
1328 }
3d6770fb 1329 io_file_put(state);
9a56a232
JA
1330 }
1331 state->file = fget_many(fd, state->ios_left);
1332 if (!state->file)
1333 return NULL;
1334
1335 state->fd = fd;
1336 state->has_refs = state->ios_left;
1337 state->used_refs = 1;
1338 state->ios_left--;
1339 return state->file;
1340}
1341
2b188cc1
JA
1342/*
1343 * If we tracked the file through the SCM inflight mechanism, we could support
1344 * any file. For now, just ensure that anything potentially problematic is done
1345 * inline.
1346 */
1347static bool io_file_supports_async(struct file *file)
1348{
1349 umode_t mode = file_inode(file)->i_mode;
1350
1351 if (S_ISBLK(mode) || S_ISCHR(mode))
1352 return true;
1353 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
1354 return true;
1355
1356 return false;
1357}
1358
267bc904 1359static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
2b188cc1 1360{
267bc904 1361 const struct io_uring_sqe *sqe = req->submit.sqe;
def596e9 1362 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 1363 struct kiocb *kiocb = &req->rw;
09bb8394
JA
1364 unsigned ioprio;
1365 int ret;
2b188cc1 1366
09bb8394
JA
1367 if (!req->file)
1368 return -EBADF;
2b188cc1 1369
491381ce
JA
1370 if (S_ISREG(file_inode(req->file)->i_mode))
1371 req->flags |= REQ_F_ISREG;
1372
1373 /*
1374 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
1375 * we know to async punt it even if it was opened O_NONBLOCK
1376 */
1377 if (force_nonblock && !io_file_supports_async(req->file)) {
1378 req->flags |= REQ_F_MUST_PUNT;
1379 return -EAGAIN;
1380 }
6b06314c 1381
2b188cc1
JA
1382 kiocb->ki_pos = READ_ONCE(sqe->off);
1383 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
1384 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
1385
1386 ioprio = READ_ONCE(sqe->ioprio);
1387 if (ioprio) {
1388 ret = ioprio_check_cap(ioprio);
1389 if (ret)
09bb8394 1390 return ret;
2b188cc1
JA
1391
1392 kiocb->ki_ioprio = ioprio;
1393 } else
1394 kiocb->ki_ioprio = get_current_ioprio();
1395
1396 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
1397 if (unlikely(ret))
09bb8394 1398 return ret;
8449eeda
SB
1399
1400 /* don't allow async punt if RWF_NOWAIT was requested */
491381ce
JA
1401 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
1402 (req->file->f_flags & O_NONBLOCK))
8449eeda
SB
1403 req->flags |= REQ_F_NOWAIT;
1404
1405 if (force_nonblock)
2b188cc1 1406 kiocb->ki_flags |= IOCB_NOWAIT;
8449eeda 1407
def596e9 1408 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
1409 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
1410 !kiocb->ki_filp->f_op->iopoll)
09bb8394 1411 return -EOPNOTSUPP;
2b188cc1 1412
def596e9
JA
1413 kiocb->ki_flags |= IOCB_HIPRI;
1414 kiocb->ki_complete = io_complete_rw_iopoll;
1415 } else {
09bb8394
JA
1416 if (kiocb->ki_flags & IOCB_HIPRI)
1417 return -EINVAL;
def596e9
JA
1418 kiocb->ki_complete = io_complete_rw;
1419 }
2b188cc1 1420 return 0;
2b188cc1
JA
1421}
1422
1423static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
1424{
1425 switch (ret) {
1426 case -EIOCBQUEUED:
1427 break;
1428 case -ERESTARTSYS:
1429 case -ERESTARTNOINTR:
1430 case -ERESTARTNOHAND:
1431 case -ERESTART_RESTARTBLOCK:
1432 /*
1433 * We can't just restart the syscall, since previously
1434 * submitted sqes may already be in progress. Just fail this
1435 * IO with EINTR.
1436 */
1437 ret = -EINTR;
1438 /* fall through */
1439 default:
1440 kiocb->ki_complete(kiocb, ret, 0);
1441 }
1442}
1443
ba816ad6
JA
1444static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
1445 bool in_async)
1446{
1447 if (in_async && ret >= 0 && nxt && kiocb->ki_complete == io_complete_rw)
1448 *nxt = __io_complete_rw(kiocb, ret);
1449 else
1450 io_rw_done(kiocb, ret);
1451}
1452
edafccee
JA
1453static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
1454 const struct io_uring_sqe *sqe,
1455 struct iov_iter *iter)
1456{
1457 size_t len = READ_ONCE(sqe->len);
1458 struct io_mapped_ubuf *imu;
1459 unsigned index, buf_index;
1460 size_t offset;
1461 u64 buf_addr;
1462
1463 /* attempt to use fixed buffers without having provided iovecs */
1464 if (unlikely(!ctx->user_bufs))
1465 return -EFAULT;
1466
1467 buf_index = READ_ONCE(sqe->buf_index);
1468 if (unlikely(buf_index >= ctx->nr_user_bufs))
1469 return -EFAULT;
1470
1471 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
1472 imu = &ctx->user_bufs[index];
1473 buf_addr = READ_ONCE(sqe->addr);
1474
1475 /* overflow */
1476 if (buf_addr + len < buf_addr)
1477 return -EFAULT;
1478 /* not inside the mapped region */
1479 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
1480 return -EFAULT;
1481
1482 /*
1483 * May not be a start of buffer, set size appropriately
1484 * and advance us to the beginning.
1485 */
1486 offset = buf_addr - imu->ubuf;
1487 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
1488
1489 if (offset) {
1490 /*
1491 * Don't use iov_iter_advance() here, as it's really slow for
1492 * using the latter parts of a big fixed buffer - it iterates
1493 * over each segment manually. We can cheat a bit here, because
1494 * we know that:
1495 *
1496 * 1) it's a BVEC iter, we set it up
1497 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1498 * first and last bvec
1499 *
1500 * So just find our index, and adjust the iterator afterwards.
1501 * If the offset is within the first bvec (or the whole first
1502 * bvec, just use iov_iter_advance(). This makes it easier
1503 * since we can just skip the first segment, which may not
1504 * be PAGE_SIZE aligned.
1505 */
1506 const struct bio_vec *bvec = imu->bvec;
1507
1508 if (offset <= bvec->bv_len) {
1509 iov_iter_advance(iter, offset);
1510 } else {
1511 unsigned long seg_skip;
1512
1513 /* skip first vec */
1514 offset -= bvec->bv_len;
1515 seg_skip = 1 + (offset >> PAGE_SHIFT);
1516
1517 iter->bvec = bvec + seg_skip;
1518 iter->nr_segs -= seg_skip;
99c79f66 1519 iter->count -= bvec->bv_len + offset;
bd11b3a3 1520 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
1521 }
1522 }
1523
edafccee
JA
1524 return 0;
1525}
1526
87e5e6da
JA
1527static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
1528 const struct sqe_submit *s, struct iovec **iovec,
1529 struct iov_iter *iter)
2b188cc1
JA
1530{
1531 const struct io_uring_sqe *sqe = s->sqe;
1532 void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1533 size_t sqe_len = READ_ONCE(sqe->len);
edafccee
JA
1534 u8 opcode;
1535
1536 /*
1537 * We're reading ->opcode for the second time, but the first read
1538 * doesn't care whether it's _FIXED or not, so it doesn't matter
1539 * whether ->opcode changes concurrently. The first read does care
1540 * about whether it is a READ or a WRITE, so we don't trust this read
1541 * for that purpose and instead let the caller pass in the read/write
1542 * flag.
1543 */
1544 opcode = READ_ONCE(sqe->opcode);
1545 if (opcode == IORING_OP_READ_FIXED ||
1546 opcode == IORING_OP_WRITE_FIXED) {
87e5e6da 1547 ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
edafccee
JA
1548 *iovec = NULL;
1549 return ret;
1550 }
2b188cc1
JA
1551
1552 if (!s->has_user)
1553 return -EFAULT;
1554
1555#ifdef CONFIG_COMPAT
1556 if (ctx->compat)
1557 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
1558 iovec, iter);
1559#endif
1560
1561 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
1562}
1563
32960613
JA
1564/*
1565 * For files that don't have ->read_iter() and ->write_iter(), handle them
1566 * by looping over ->read() or ->write() manually.
1567 */
1568static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
1569 struct iov_iter *iter)
1570{
1571 ssize_t ret = 0;
1572
1573 /*
1574 * Don't support polled IO through this interface, and we can't
1575 * support non-blocking either. For the latter, this just causes
1576 * the kiocb to be handled from an async context.
1577 */
1578 if (kiocb->ki_flags & IOCB_HIPRI)
1579 return -EOPNOTSUPP;
1580 if (kiocb->ki_flags & IOCB_NOWAIT)
1581 return -EAGAIN;
1582
1583 while (iov_iter_count(iter)) {
1584 struct iovec iovec = iov_iter_iovec(iter);
1585 ssize_t nr;
1586
1587 if (rw == READ) {
1588 nr = file->f_op->read(file, iovec.iov_base,
1589 iovec.iov_len, &kiocb->ki_pos);
1590 } else {
1591 nr = file->f_op->write(file, iovec.iov_base,
1592 iovec.iov_len, &kiocb->ki_pos);
1593 }
1594
1595 if (nr < 0) {
1596 if (!ret)
1597 ret = nr;
1598 break;
1599 }
1600 ret += nr;
1601 if (nr != iovec.iov_len)
1602 break;
1603 iov_iter_advance(iter, nr);
1604 }
1605
1606 return ret;
1607}
1608
267bc904
PB
1609static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
1610 bool force_nonblock)
2b188cc1
JA
1611{
1612 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1613 struct kiocb *kiocb = &req->rw;
1614 struct iov_iter iter;
1615 struct file *file;
31b51510 1616 size_t iov_count;
9d93a3f5 1617 ssize_t read_size, ret;
2b188cc1 1618
267bc904 1619 ret = io_prep_rw(req, force_nonblock);
2b188cc1
JA
1620 if (ret)
1621 return ret;
1622 file = kiocb->ki_filp;
1623
2b188cc1 1624 if (unlikely(!(file->f_mode & FMODE_READ)))
09bb8394 1625 return -EBADF;
2b188cc1 1626
267bc904 1627 ret = io_import_iovec(req->ctx, READ, &req->submit, &iovec, &iter);
87e5e6da 1628 if (ret < 0)
09bb8394 1629 return ret;
2b188cc1 1630
9d93a3f5 1631 read_size = ret;
9e645e11
JA
1632 if (req->flags & REQ_F_LINK)
1633 req->result = read_size;
1634
31b51510
JA
1635 iov_count = iov_iter_count(&iter);
1636 ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
2b188cc1
JA
1637 if (!ret) {
1638 ssize_t ret2;
1639
32960613
JA
1640 if (file->f_op->read_iter)
1641 ret2 = call_read_iter(file, kiocb, &iter);
1642 else
1643 ret2 = loop_rw_iter(READ, file, kiocb, &iter);
1644
9d93a3f5
JA
1645 /*
1646 * In case of a short read, punt to async. This can happen
1647 * if we have data partially cached. Alternatively we can
1648 * return the short read, in which case the application will
1649 * need to issue another SQE and wait for it. That SQE will
1650 * need async punt anyway, so it's more efficient to do it
1651 * here.
1652 */
491381ce
JA
1653 if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
1654 (req->flags & REQ_F_ISREG) &&
1655 ret2 > 0 && ret2 < read_size)
9d93a3f5
JA
1656 ret2 = -EAGAIN;
1657 /* Catch -EAGAIN return for forced non-blocking submission */
561fb04a 1658 if (!force_nonblock || ret2 != -EAGAIN)
267bc904 1659 kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
561fb04a 1660 else
2b188cc1
JA
1661 ret = -EAGAIN;
1662 }
1663 kfree(iovec);
2b188cc1
JA
1664 return ret;
1665}
1666
267bc904
PB
1667static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
1668 bool force_nonblock)
2b188cc1
JA
1669{
1670 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1671 struct kiocb *kiocb = &req->rw;
1672 struct iov_iter iter;
1673 struct file *file;
31b51510 1674 size_t iov_count;
87e5e6da 1675 ssize_t ret;
2b188cc1 1676
267bc904 1677 ret = io_prep_rw(req, force_nonblock);
2b188cc1
JA
1678 if (ret)
1679 return ret;
2b188cc1 1680
2b188cc1
JA
1681 file = kiocb->ki_filp;
1682 if (unlikely(!(file->f_mode & FMODE_WRITE)))
09bb8394 1683 return -EBADF;
2b188cc1 1684
267bc904 1685 ret = io_import_iovec(req->ctx, WRITE, &req->submit, &iovec, &iter);
87e5e6da 1686 if (ret < 0)
09bb8394 1687 return ret;
2b188cc1 1688
9e645e11
JA
1689 if (req->flags & REQ_F_LINK)
1690 req->result = ret;
1691
31b51510
JA
1692 iov_count = iov_iter_count(&iter);
1693
1694 ret = -EAGAIN;
561fb04a 1695 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT))
31b51510 1696 goto out_free;
31b51510
JA
1697
1698 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
2b188cc1 1699 if (!ret) {
9bf7933f
RP
1700 ssize_t ret2;
1701
2b188cc1
JA
1702 /*
1703 * Open-code file_start_write here to grab freeze protection,
1704 * which will be released by another thread in
1705 * io_complete_rw(). Fool lockdep by telling it the lock got
1706 * released so that it doesn't complain about the held lock when
1707 * we return to userspace.
1708 */
491381ce 1709 if (req->flags & REQ_F_ISREG) {
2b188cc1
JA
1710 __sb_start_write(file_inode(file)->i_sb,
1711 SB_FREEZE_WRITE, true);
1712 __sb_writers_release(file_inode(file)->i_sb,
1713 SB_FREEZE_WRITE);
1714 }
1715 kiocb->ki_flags |= IOCB_WRITE;
9bf7933f 1716
32960613
JA
1717 if (file->f_op->write_iter)
1718 ret2 = call_write_iter(file, kiocb, &iter);
1719 else
1720 ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
561fb04a 1721 if (!force_nonblock || ret2 != -EAGAIN)
267bc904 1722 kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
561fb04a 1723 else
9bf7933f 1724 ret = -EAGAIN;
2b188cc1 1725 }
31b51510 1726out_free:
2b188cc1 1727 kfree(iovec);
2b188cc1
JA
1728 return ret;
1729}
1730
1731/*
1732 * IORING_OP_NOP just posts a completion event, nothing else.
1733 */
78e19bbe 1734static int io_nop(struct io_kiocb *req)
2b188cc1
JA
1735{
1736 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 1737
def596e9
JA
1738 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1739 return -EINVAL;
1740
78e19bbe 1741 io_cqring_add_event(req, 0);
ec9c02ad 1742 io_put_req(req);
2b188cc1
JA
1743 return 0;
1744}
1745
c992fe29
CH
1746static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1747{
6b06314c 1748 struct io_ring_ctx *ctx = req->ctx;
c992fe29 1749
09bb8394
JA
1750 if (!req->file)
1751 return -EBADF;
c992fe29 1752
6b06314c 1753 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 1754 return -EINVAL;
edafccee 1755 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
1756 return -EINVAL;
1757
c992fe29
CH
1758 return 0;
1759}
1760
1761static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ba816ad6 1762 struct io_kiocb **nxt, bool force_nonblock)
c992fe29
CH
1763{
1764 loff_t sqe_off = READ_ONCE(sqe->off);
1765 loff_t sqe_len = READ_ONCE(sqe->len);
1766 loff_t end = sqe_off + sqe_len;
1767 unsigned fsync_flags;
1768 int ret;
1769
1770 fsync_flags = READ_ONCE(sqe->fsync_flags);
1771 if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
1772 return -EINVAL;
1773
1774 ret = io_prep_fsync(req, sqe);
1775 if (ret)
1776 return ret;
1777
1778 /* fsync always requires a blocking context */
1779 if (force_nonblock)
1780 return -EAGAIN;
1781
1782 ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
1783 end > 0 ? end : LLONG_MAX,
1784 fsync_flags & IORING_FSYNC_DATASYNC);
1785
9e645e11
JA
1786 if (ret < 0 && (req->flags & REQ_F_LINK))
1787 req->flags |= REQ_F_FAIL_LINK;
78e19bbe 1788 io_cqring_add_event(req, ret);
ec9c02ad 1789 io_put_req_find_next(req, nxt);
c992fe29
CH
1790 return 0;
1791}
1792
5d17b4a4
JA
1793static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1794{
1795 struct io_ring_ctx *ctx = req->ctx;
1796 int ret = 0;
1797
1798 if (!req->file)
1799 return -EBADF;
5d17b4a4
JA
1800
1801 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1802 return -EINVAL;
1803 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
1804 return -EINVAL;
1805
5d17b4a4
JA
1806 return ret;
1807}
1808
1809static int io_sync_file_range(struct io_kiocb *req,
1810 const struct io_uring_sqe *sqe,
ba816ad6 1811 struct io_kiocb **nxt,
5d17b4a4
JA
1812 bool force_nonblock)
1813{
1814 loff_t sqe_off;
1815 loff_t sqe_len;
1816 unsigned flags;
1817 int ret;
1818
1819 ret = io_prep_sfr(req, sqe);
1820 if (ret)
1821 return ret;
1822
1823 /* sync_file_range always requires a blocking context */
1824 if (force_nonblock)
1825 return -EAGAIN;
1826
1827 sqe_off = READ_ONCE(sqe->off);
1828 sqe_len = READ_ONCE(sqe->len);
1829 flags = READ_ONCE(sqe->sync_range_flags);
1830
1831 ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
1832
9e645e11
JA
1833 if (ret < 0 && (req->flags & REQ_F_LINK))
1834 req->flags |= REQ_F_FAIL_LINK;
78e19bbe 1835 io_cqring_add_event(req, ret);
ec9c02ad 1836 io_put_req_find_next(req, nxt);
5d17b4a4
JA
1837 return 0;
1838}
1839
0fa03c62 1840#if defined(CONFIG_NET)
aa1fa28f 1841static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ba816ad6 1842 struct io_kiocb **nxt, bool force_nonblock,
aa1fa28f
JA
1843 long (*fn)(struct socket *, struct user_msghdr __user *,
1844 unsigned int))
1845{
0fa03c62
JA
1846 struct socket *sock;
1847 int ret;
1848
1849 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1850 return -EINVAL;
1851
1852 sock = sock_from_file(req->file, &ret);
1853 if (sock) {
1854 struct user_msghdr __user *msg;
1855 unsigned flags;
1856
1857 flags = READ_ONCE(sqe->msg_flags);
1858 if (flags & MSG_DONTWAIT)
1859 req->flags |= REQ_F_NOWAIT;
1860 else if (force_nonblock)
1861 flags |= MSG_DONTWAIT;
1862
1863 msg = (struct user_msghdr __user *) (unsigned long)
1864 READ_ONCE(sqe->addr);
1865
aa1fa28f 1866 ret = fn(sock, msg, flags);
0fa03c62
JA
1867 if (force_nonblock && ret == -EAGAIN)
1868 return ret;
1869 }
1870
78e19bbe 1871 io_cqring_add_event(req, ret);
f1f40853
JA
1872 if (ret < 0 && (req->flags & REQ_F_LINK))
1873 req->flags |= REQ_F_FAIL_LINK;
ec9c02ad 1874 io_put_req_find_next(req, nxt);
5d17b4a4
JA
1875 return 0;
1876}
aa1fa28f
JA
1877#endif
1878
1879static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ba816ad6 1880 struct io_kiocb **nxt, bool force_nonblock)
aa1fa28f
JA
1881{
1882#if defined(CONFIG_NET)
ba816ad6
JA
1883 return io_send_recvmsg(req, sqe, nxt, force_nonblock,
1884 __sys_sendmsg_sock);
aa1fa28f
JA
1885#else
1886 return -EOPNOTSUPP;
1887#endif
1888}
1889
1890static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ba816ad6 1891 struct io_kiocb **nxt, bool force_nonblock)
aa1fa28f
JA
1892{
1893#if defined(CONFIG_NET)
ba816ad6
JA
1894 return io_send_recvmsg(req, sqe, nxt, force_nonblock,
1895 __sys_recvmsg_sock);
0fa03c62
JA
1896#else
1897 return -EOPNOTSUPP;
1898#endif
1899}
5d17b4a4 1900
17f2fe35
JA
1901static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1902 struct io_kiocb **nxt, bool force_nonblock)
1903{
1904#if defined(CONFIG_NET)
1905 struct sockaddr __user *addr;
1906 int __user *addr_len;
1907 unsigned file_flags;
1908 int flags, ret;
1909
1910 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
1911 return -EINVAL;
1912 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1913 return -EINVAL;
1914
1915 addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
1916 addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2);
1917 flags = READ_ONCE(sqe->accept_flags);
1918 file_flags = force_nonblock ? O_NONBLOCK : 0;
1919
1920 ret = __sys_accept4_file(req->file, file_flags, addr, addr_len, flags);
1921 if (ret == -EAGAIN && force_nonblock) {
1922 req->work.flags |= IO_WQ_WORK_NEEDS_FILES;
1923 return -EAGAIN;
1924 }
8e3cca12
JA
1925 if (ret == -ERESTARTSYS)
1926 ret = -EINTR;
17f2fe35
JA
1927 if (ret < 0 && (req->flags & REQ_F_LINK))
1928 req->flags |= REQ_F_FAIL_LINK;
78e19bbe 1929 io_cqring_add_event(req, ret);
ec9c02ad 1930 io_put_req_find_next(req, nxt);
17f2fe35
JA
1931 return 0;
1932#else
1933 return -EOPNOTSUPP;
1934#endif
1935}
1936
221c5eb2
JA
1937static void io_poll_remove_one(struct io_kiocb *req)
1938{
1939 struct io_poll_iocb *poll = &req->poll;
1940
1941 spin_lock(&poll->head->lock);
1942 WRITE_ONCE(poll->canceled, true);
1943 if (!list_empty(&poll->wait.entry)) {
1944 list_del_init(&poll->wait.entry);
a197f664 1945 io_queue_async_work(req);
221c5eb2
JA
1946 }
1947 spin_unlock(&poll->head->lock);
1948
1949 list_del_init(&req->list);
1950}
1951
1952static void io_poll_remove_all(struct io_ring_ctx *ctx)
1953{
1954 struct io_kiocb *req;
1955
1956 spin_lock_irq(&ctx->completion_lock);
1957 while (!list_empty(&ctx->cancel_list)) {
1958 req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
1959 io_poll_remove_one(req);
1960 }
1961 spin_unlock_irq(&ctx->completion_lock);
1962}
1963
47f46768
JA
1964static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
1965{
1966 struct io_kiocb *req;
1967
1968 list_for_each_entry(req, &ctx->cancel_list, list) {
1969 if (req->user_data != sqe_addr)
1970 continue;
1971 io_poll_remove_one(req);
1972 return 0;
1973 }
1974
1975 return -ENOENT;
1976}
1977
221c5eb2
JA
1978/*
1979 * Find a running poll command that matches one specified in sqe->addr,
1980 * and remove it if found.
1981 */
1982static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1983{
1984 struct io_ring_ctx *ctx = req->ctx;
47f46768 1985 int ret;
221c5eb2
JA
1986
1987 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1988 return -EINVAL;
1989 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
1990 sqe->poll_events)
1991 return -EINVAL;
1992
1993 spin_lock_irq(&ctx->completion_lock);
47f46768 1994 ret = io_poll_cancel(ctx, READ_ONCE(sqe->addr));
221c5eb2
JA
1995 spin_unlock_irq(&ctx->completion_lock);
1996
78e19bbe 1997 io_cqring_add_event(req, ret);
f1f40853
JA
1998 if (ret < 0 && (req->flags & REQ_F_LINK))
1999 req->flags |= REQ_F_FAIL_LINK;
ec9c02ad 2000 io_put_req(req);
221c5eb2
JA
2001 return 0;
2002}
2003
a197f664 2004static void io_poll_complete(struct io_kiocb *req, __poll_t mask)
221c5eb2 2005{
a197f664
JL
2006 struct io_ring_ctx *ctx = req->ctx;
2007
8c838788 2008 req->poll.done = true;
78e19bbe 2009 io_cqring_fill_event(req, mangle_poll(mask));
8c838788 2010 io_commit_cqring(ctx);
221c5eb2
JA
2011}
2012
561fb04a 2013static void io_poll_complete_work(struct io_wq_work **workptr)
221c5eb2 2014{
561fb04a 2015 struct io_wq_work *work = *workptr;
221c5eb2
JA
2016 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2017 struct io_poll_iocb *poll = &req->poll;
2018 struct poll_table_struct pt = { ._key = poll->events };
2019 struct io_ring_ctx *ctx = req->ctx;
89723d0b 2020 struct io_kiocb *nxt = NULL;
221c5eb2
JA
2021 __poll_t mask = 0;
2022
561fb04a
JA
2023 if (work->flags & IO_WQ_WORK_CANCEL)
2024 WRITE_ONCE(poll->canceled, true);
2025
221c5eb2
JA
2026 if (!READ_ONCE(poll->canceled))
2027 mask = vfs_poll(poll->file, &pt) & poll->events;
2028
2029 /*
2030 * Note that ->ki_cancel callers also delete iocb from active_reqs after
2031 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
2032 * synchronize with them. In the cancellation case the list_del_init
2033 * itself is not actually needed, but harmless so we keep it in to
2034 * avoid further branches in the fast path.
2035 */
2036 spin_lock_irq(&ctx->completion_lock);
2037 if (!mask && !READ_ONCE(poll->canceled)) {
2038 add_wait_queue(poll->head, &poll->wait);
2039 spin_unlock_irq(&ctx->completion_lock);
2040 return;
2041 }
2042 list_del_init(&req->list);
a197f664 2043 io_poll_complete(req, mask);
221c5eb2
JA
2044 spin_unlock_irq(&ctx->completion_lock);
2045
8c838788 2046 io_cqring_ev_posted(ctx);
89723d0b 2047
ec9c02ad 2048 io_put_req_find_next(req, &nxt);
89723d0b
JA
2049 if (nxt)
2050 *workptr = &nxt->work;
221c5eb2
JA
2051}
2052
2053static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
2054 void *key)
2055{
2056 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
2057 wait);
2058 struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
2059 struct io_ring_ctx *ctx = req->ctx;
2060 __poll_t mask = key_to_poll(key);
8c838788 2061 unsigned long flags;
221c5eb2
JA
2062
2063 /* for instances that support it check for an event match first: */
8c838788
JA
2064 if (mask && !(mask & poll->events))
2065 return 0;
221c5eb2 2066
8c838788 2067 list_del_init(&poll->wait.entry);
221c5eb2 2068
7c9e7f0f
JA
2069 /*
2070 * Run completion inline if we can. We're using trylock here because
2071 * we are violating the completion_lock -> poll wq lock ordering.
2072 * If we have a link timeout we're going to need the completion_lock
2073 * for finalizing the request, mark us as having grabbed that already.
2074 */
8c838788
JA
2075 if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
2076 list_del(&req->list);
a197f664 2077 io_poll_complete(req, mask);
7c9e7f0f
JA
2078 req->flags |= REQ_F_COMP_LOCKED;
2079 io_put_req(req);
8c838788 2080 spin_unlock_irqrestore(&ctx->completion_lock, flags);
221c5eb2 2081
8c838788 2082 io_cqring_ev_posted(ctx);
8c838788 2083 } else {
a197f664 2084 io_queue_async_work(req);
221c5eb2
JA
2085 }
2086
221c5eb2
JA
2087 return 1;
2088}
2089
2090struct io_poll_table {
2091 struct poll_table_struct pt;
2092 struct io_kiocb *req;
2093 int error;
2094};
2095
2096static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
2097 struct poll_table_struct *p)
2098{
2099 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
2100
2101 if (unlikely(pt->req->poll.head)) {
2102 pt->error = -EINVAL;
2103 return;
2104 }
2105
2106 pt->error = 0;
2107 pt->req->poll.head = head;
2108 add_wait_queue(head, &pt->req->poll.wait);
2109}
2110
89723d0b
JA
2111static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2112 struct io_kiocb **nxt)
221c5eb2
JA
2113{
2114 struct io_poll_iocb *poll = &req->poll;
2115 struct io_ring_ctx *ctx = req->ctx;
2116 struct io_poll_table ipt;
8c838788 2117 bool cancel = false;
221c5eb2
JA
2118 __poll_t mask;
2119 u16 events;
221c5eb2
JA
2120
2121 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
2122 return -EINVAL;
2123 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
2124 return -EINVAL;
09bb8394
JA
2125 if (!poll->file)
2126 return -EBADF;
221c5eb2 2127
6cc47d1d 2128 req->submit.sqe = NULL;
561fb04a 2129 INIT_IO_WORK(&req->work, io_poll_complete_work);
221c5eb2
JA
2130 events = READ_ONCE(sqe->poll_events);
2131 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
2132
221c5eb2 2133 poll->head = NULL;
8c838788 2134 poll->done = false;
221c5eb2
JA
2135 poll->canceled = false;
2136
2137 ipt.pt._qproc = io_poll_queue_proc;
2138 ipt.pt._key = poll->events;
2139 ipt.req = req;
2140 ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
2141
2142 /* initialized the list so that we can do list_empty checks */
2143 INIT_LIST_HEAD(&poll->wait.entry);
2144 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
2145
36703247
JA
2146 INIT_LIST_HEAD(&req->list);
2147
221c5eb2 2148 mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
221c5eb2
JA
2149
2150 spin_lock_irq(&ctx->completion_lock);
8c838788
JA
2151 if (likely(poll->head)) {
2152 spin_lock(&poll->head->lock);
2153 if (unlikely(list_empty(&poll->wait.entry))) {
2154 if (ipt.error)
2155 cancel = true;
2156 ipt.error = 0;
2157 mask = 0;
2158 }
2159 if (mask || ipt.error)
2160 list_del_init(&poll->wait.entry);
2161 else if (cancel)
2162 WRITE_ONCE(poll->canceled, true);
2163 else if (!poll->done) /* actually waiting for an event */
2164 list_add_tail(&req->list, &ctx->cancel_list);
2165 spin_unlock(&poll->head->lock);
2166 }
2167 if (mask) { /* no async, we'd stolen it */
221c5eb2 2168 ipt.error = 0;
a197f664 2169 io_poll_complete(req, mask);
221c5eb2 2170 }
221c5eb2
JA
2171 spin_unlock_irq(&ctx->completion_lock);
2172
8c838788
JA
2173 if (mask) {
2174 io_cqring_ev_posted(ctx);
ec9c02ad 2175 io_put_req_find_next(req, nxt);
221c5eb2 2176 }
8c838788 2177 return ipt.error;
221c5eb2
JA
2178}
2179
5262f567
JA
2180static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
2181{
2182 struct io_ring_ctx *ctx;
11365043 2183 struct io_kiocb *req;
5262f567
JA
2184 unsigned long flags;
2185
2186 req = container_of(timer, struct io_kiocb, timeout.timer);
2187 ctx = req->ctx;
2188 atomic_inc(&ctx->cq_timeouts);
2189
2190 spin_lock_irqsave(&ctx->completion_lock, flags);
ef03681a 2191 /*
11365043
JA
2192 * We could be racing with timeout deletion. If the list is empty,
2193 * then timeout lookup already found it and will be handling it.
ef03681a 2194 */
842f9612 2195 if (!list_empty(&req->list)) {
11365043 2196 struct io_kiocb *prev;
5262f567 2197
11365043
JA
2198 /*
2199 * Adjust the reqs sequence before the current one because it
2200 * will consume a slot in the cq_ring and the the cq_tail
2201 * pointer will be increased, otherwise other timeout reqs may
2202 * return in advance without waiting for enough wait_nr.
2203 */
2204 prev = req;
2205 list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
2206 prev->sequence++;
11365043 2207 list_del_init(&req->list);
11365043 2208 }
842f9612 2209
78e19bbe 2210 io_cqring_fill_event(req, -ETIME);
842f9612 2211 io_commit_cqring(ctx);
5262f567
JA
2212 spin_unlock_irqrestore(&ctx->completion_lock, flags);
2213
842f9612 2214 io_cqring_ev_posted(ctx);
f1f40853
JA
2215 if (req->flags & REQ_F_LINK)
2216 req->flags |= REQ_F_FAIL_LINK;
ec9c02ad 2217 io_put_req(req);
11365043
JA
2218 return HRTIMER_NORESTART;
2219}
2220
47f46768
JA
2221static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
2222{
2223 struct io_kiocb *req;
2224 int ret = -ENOENT;
2225
2226 list_for_each_entry(req, &ctx->timeout_list, list) {
2227 if (user_data == req->user_data) {
2228 list_del_init(&req->list);
2229 ret = 0;
2230 break;
2231 }
2232 }
2233
2234 if (ret == -ENOENT)
2235 return ret;
2236
2237 ret = hrtimer_try_to_cancel(&req->timeout.timer);
2238 if (ret == -1)
2239 return -EALREADY;
2240
2241 io_cqring_fill_event(req, -ECANCELED);
2242 io_put_req(req);
2243 return 0;
2244}
2245
11365043
JA
2246/*
2247 * Remove or update an existing timeout command
2248 */
2249static int io_timeout_remove(struct io_kiocb *req,
2250 const struct io_uring_sqe *sqe)
2251{
2252 struct io_ring_ctx *ctx = req->ctx;
11365043 2253 unsigned flags;
47f46768 2254 int ret;
11365043
JA
2255
2256 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2257 return -EINVAL;
2258 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
2259 return -EINVAL;
2260 flags = READ_ONCE(sqe->timeout_flags);
2261 if (flags)
2262 return -EINVAL;
2263
11365043 2264 spin_lock_irq(&ctx->completion_lock);
47f46768 2265 ret = io_timeout_cancel(ctx, READ_ONCE(sqe->addr));
11365043 2266
47f46768 2267 io_cqring_fill_event(req, ret);
11365043
JA
2268 io_commit_cqring(ctx);
2269 spin_unlock_irq(&ctx->completion_lock);
5262f567 2270 io_cqring_ev_posted(ctx);
47f46768
JA
2271 if (ret < 0 && req->flags & REQ_F_LINK)
2272 req->flags |= REQ_F_FAIL_LINK;
ec9c02ad 2273 io_put_req(req);
11365043 2274 return 0;
5262f567
JA
2275}
2276
2277static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2278{
5da0fb1a 2279 unsigned count;
5262f567
JA
2280 struct io_ring_ctx *ctx = req->ctx;
2281 struct list_head *entry;
a41525ab 2282 enum hrtimer_mode mode;
bdf20073 2283 struct timespec64 ts;
a1f58ba4 2284 unsigned span = 0;
a41525ab 2285 unsigned flags;
5262f567
JA
2286
2287 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2288 return -EINVAL;
a41525ab
JA
2289 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len != 1)
2290 return -EINVAL;
2291 flags = READ_ONCE(sqe->timeout_flags);
2292 if (flags & ~IORING_TIMEOUT_ABS)
5262f567 2293 return -EINVAL;
bdf20073
AB
2294
2295 if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
2296 return -EFAULT;
2297
11365043
JA
2298 if (flags & IORING_TIMEOUT_ABS)
2299 mode = HRTIMER_MODE_ABS;
2300 else
2301 mode = HRTIMER_MODE_REL;
2302
2303 hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, mode);
2304
5262f567
JA
2305 /*
2306 * sqe->off holds how many events that need to occur for this
2307 * timeout event to be satisfied.
2308 */
2309 count = READ_ONCE(sqe->off);
2310 if (!count)
2311 count = 1;
2312
2313 req->sequence = ctx->cached_sq_head + count - 1;
5da0fb1a 2314 /* reuse it to store the count */
2315 req->submit.sequence = count;
5262f567
JA
2316 req->flags |= REQ_F_TIMEOUT;
2317
2318 /*
2319 * Insertion sort, ensuring the first entry in the list is always
2320 * the one we need first.
2321 */
5262f567
JA
2322 spin_lock_irq(&ctx->completion_lock);
2323 list_for_each_prev(entry, &ctx->timeout_list) {
2324 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
5da0fb1a 2325 unsigned nxt_sq_head;
2326 long long tmp, tmp_nxt;
5262f567 2327
5da0fb1a 2328 /*
2329 * Since cached_sq_head + count - 1 can overflow, use type long
2330 * long to store it.
2331 */
2332 tmp = (long long)ctx->cached_sq_head + count - 1;
2333 nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
2334 tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
2335
2336 /*
2337 * cached_sq_head may overflow, and it will never overflow twice
2338 * once there is some timeout req still be valid.
2339 */
2340 if (ctx->cached_sq_head < nxt_sq_head)
8b07a65a 2341 tmp += UINT_MAX;
5da0fb1a 2342
a1f58ba4 2343 if (tmp > tmp_nxt)
5262f567 2344 break;
a1f58ba4 2345
2346 /*
2347 * Sequence of reqs after the insert one and itself should
2348 * be adjusted because each timeout req consumes a slot.
2349 */
2350 span++;
2351 nxt->sequence++;
5262f567 2352 }
a1f58ba4 2353 req->sequence -= span;
5262f567 2354 list_add(&req->list, entry);
5262f567 2355 req->timeout.timer.function = io_timeout_fn;
a41525ab 2356 hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts), mode);
842f9612 2357 spin_unlock_irq(&ctx->completion_lock);
5262f567
JA
2358 return 0;
2359}
2360
62755e35
JA
2361static bool io_cancel_cb(struct io_wq_work *work, void *data)
2362{
2363 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2364
2365 return req->user_data == (unsigned long) data;
2366}
2367
e977d6d3 2368static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
62755e35 2369{
62755e35 2370 enum io_wq_cancel cancel_ret;
62755e35
JA
2371 int ret = 0;
2372
62755e35
JA
2373 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
2374 switch (cancel_ret) {
2375 case IO_WQ_CANCEL_OK:
2376 ret = 0;
2377 break;
2378 case IO_WQ_CANCEL_RUNNING:
2379 ret = -EALREADY;
2380 break;
2381 case IO_WQ_CANCEL_NOTFOUND:
2382 ret = -ENOENT;
2383 break;
2384 }
2385
e977d6d3
JA
2386 return ret;
2387}
2388
47f46768
JA
2389static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
2390 struct io_kiocb *req, __u64 sqe_addr,
2391 struct io_kiocb **nxt)
2392{
2393 unsigned long flags;
2394 int ret;
2395
2396 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
2397 if (ret != -ENOENT) {
2398 spin_lock_irqsave(&ctx->completion_lock, flags);
2399 goto done;
2400 }
2401
2402 spin_lock_irqsave(&ctx->completion_lock, flags);
2403 ret = io_timeout_cancel(ctx, sqe_addr);
2404 if (ret != -ENOENT)
2405 goto done;
2406 ret = io_poll_cancel(ctx, sqe_addr);
2407done:
2408 io_cqring_fill_event(req, ret);
2409 io_commit_cqring(ctx);
2410 spin_unlock_irqrestore(&ctx->completion_lock, flags);
2411 io_cqring_ev_posted(ctx);
2412
2413 if (ret < 0 && (req->flags & REQ_F_LINK))
2414 req->flags |= REQ_F_FAIL_LINK;
2415 io_put_req_find_next(req, nxt);
2416}
2417
e977d6d3
JA
2418static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2419 struct io_kiocb **nxt)
2420{
2421 struct io_ring_ctx *ctx = req->ctx;
e977d6d3
JA
2422
2423 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2424 return -EINVAL;
2425 if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
2426 sqe->cancel_flags)
2427 return -EINVAL;
2428
47f46768 2429 io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), NULL);
62755e35
JA
2430 return 0;
2431}
2432
a197f664 2433static int io_req_defer(struct io_kiocb *req)
de0617e4 2434{
267bc904 2435 const struct io_uring_sqe *sqe = req->submit.sqe;
de0617e4 2436 struct io_uring_sqe *sqe_copy;
a197f664 2437 struct io_ring_ctx *ctx = req->ctx;
de0617e4 2438
a197f664 2439 if (!io_sequence_defer(req) && list_empty(&ctx->defer_list))
de0617e4
JA
2440 return 0;
2441
2442 sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
2443 if (!sqe_copy)
2444 return -EAGAIN;
2445
2446 spin_lock_irq(&ctx->completion_lock);
a197f664 2447 if (!io_sequence_defer(req) && list_empty(&ctx->defer_list)) {
de0617e4
JA
2448 spin_unlock_irq(&ctx->completion_lock);
2449 kfree(sqe_copy);
2450 return 0;
2451 }
2452
2453 memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
2454 req->submit.sqe = sqe_copy;
2455
c826bd7a 2456 trace_io_uring_defer(ctx, req, false);
de0617e4
JA
2457 list_add_tail(&req->list, &ctx->defer_list);
2458 spin_unlock_irq(&ctx->completion_lock);
2459 return -EIOCBQUEUED;
2460}
2461
a197f664
JL
2462static int __io_submit_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
2463 bool force_nonblock)
2b188cc1 2464{
e0c5c576 2465 int ret, opcode;
267bc904 2466 struct sqe_submit *s = &req->submit;
a197f664 2467 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 2468
2b188cc1
JA
2469 opcode = READ_ONCE(s->sqe->opcode);
2470 switch (opcode) {
2471 case IORING_OP_NOP:
78e19bbe 2472 ret = io_nop(req);
2b188cc1
JA
2473 break;
2474 case IORING_OP_READV:
edafccee
JA
2475 if (unlikely(s->sqe->buf_index))
2476 return -EINVAL;
267bc904 2477 ret = io_read(req, nxt, force_nonblock);
2b188cc1
JA
2478 break;
2479 case IORING_OP_WRITEV:
edafccee
JA
2480 if (unlikely(s->sqe->buf_index))
2481 return -EINVAL;
267bc904 2482 ret = io_write(req, nxt, force_nonblock);
edafccee
JA
2483 break;
2484 case IORING_OP_READ_FIXED:
267bc904 2485 ret = io_read(req, nxt, force_nonblock);
edafccee
JA
2486 break;
2487 case IORING_OP_WRITE_FIXED:
267bc904 2488 ret = io_write(req, nxt, force_nonblock);
2b188cc1 2489 break;
c992fe29 2490 case IORING_OP_FSYNC:
ba816ad6 2491 ret = io_fsync(req, s->sqe, nxt, force_nonblock);
c992fe29 2492 break;
221c5eb2 2493 case IORING_OP_POLL_ADD:
89723d0b 2494 ret = io_poll_add(req, s->sqe, nxt);
221c5eb2
JA
2495 break;
2496 case IORING_OP_POLL_REMOVE:
2497 ret = io_poll_remove(req, s->sqe);
2498 break;
5d17b4a4 2499 case IORING_OP_SYNC_FILE_RANGE:
ba816ad6 2500 ret = io_sync_file_range(req, s->sqe, nxt, force_nonblock);
5d17b4a4 2501 break;
0fa03c62 2502 case IORING_OP_SENDMSG:
ba816ad6 2503 ret = io_sendmsg(req, s->sqe, nxt, force_nonblock);
0fa03c62 2504 break;
aa1fa28f 2505 case IORING_OP_RECVMSG:
ba816ad6 2506 ret = io_recvmsg(req, s->sqe, nxt, force_nonblock);
aa1fa28f 2507 break;
5262f567
JA
2508 case IORING_OP_TIMEOUT:
2509 ret = io_timeout(req, s->sqe);
2510 break;
11365043
JA
2511 case IORING_OP_TIMEOUT_REMOVE:
2512 ret = io_timeout_remove(req, s->sqe);
2513 break;
17f2fe35
JA
2514 case IORING_OP_ACCEPT:
2515 ret = io_accept(req, s->sqe, nxt, force_nonblock);
2516 break;
62755e35
JA
2517 case IORING_OP_ASYNC_CANCEL:
2518 ret = io_async_cancel(req, s->sqe, nxt);
2519 break;
2b188cc1
JA
2520 default:
2521 ret = -EINVAL;
2522 break;
2523 }
2524
def596e9
JA
2525 if (ret)
2526 return ret;
2527
2528 if (ctx->flags & IORING_SETUP_IOPOLL) {
9e645e11 2529 if (req->result == -EAGAIN)
def596e9
JA
2530 return -EAGAIN;
2531
2532 /* workqueue context doesn't hold uring_lock, grab it now */
ba5290cc 2533 if (s->in_async)
def596e9
JA
2534 mutex_lock(&ctx->uring_lock);
2535 io_iopoll_req_issued(req);
ba5290cc 2536 if (s->in_async)
def596e9
JA
2537 mutex_unlock(&ctx->uring_lock);
2538 }
2539
2540 return 0;
2b188cc1
JA
2541}
2542
561fb04a 2543static void io_wq_submit_work(struct io_wq_work **workptr)
2b188cc1 2544{
561fb04a 2545 struct io_wq_work *work = *workptr;
2b188cc1 2546 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
561fb04a
JA
2547 struct sqe_submit *s = &req->submit;
2548 const struct io_uring_sqe *sqe = s->sqe;
2549 struct io_kiocb *nxt = NULL;
2550 int ret = 0;
2b188cc1 2551
561fb04a
JA
2552 /* Ensure we clear previously set non-block flag */
2553 req->rw.ki_flags &= ~IOCB_NOWAIT;
2b188cc1 2554
561fb04a
JA
2555 if (work->flags & IO_WQ_WORK_CANCEL)
2556 ret = -ECANCELED;
31b51510 2557
561fb04a
JA
2558 if (!ret) {
2559 s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
2560 s->in_async = true;
2561 do {
a197f664 2562 ret = __io_submit_sqe(req, &nxt, false);
561fb04a
JA
2563 /*
2564 * We can get EAGAIN for polled IO even though we're
2565 * forcing a sync submission from here, since we can't
2566 * wait for request slots on the block side.
2567 */
2568 if (ret != -EAGAIN)
2569 break;
2570 cond_resched();
2571 } while (1);
2572 }
31b51510 2573
561fb04a 2574 /* drop submission reference */
ec9c02ad 2575 io_put_req(req);
817869d2 2576
561fb04a 2577 if (ret) {
f1f40853
JA
2578 if (req->flags & REQ_F_LINK)
2579 req->flags |= REQ_F_FAIL_LINK;
78e19bbe 2580 io_cqring_add_event(req, ret);
ec9c02ad 2581 io_put_req(req);
2b188cc1 2582 }
31b51510 2583
561fb04a
JA
2584 /* async context always use a copy of the sqe */
2585 kfree(sqe);
31b51510 2586
561fb04a
JA
2587 /* if a dependent link is ready, pass it back */
2588 if (!ret && nxt) {
2589 io_prep_async_work(nxt);
2590 *workptr = &nxt->work;
31b51510 2591 }
2b188cc1
JA
2592}
2593
09bb8394
JA
2594static bool io_op_needs_file(const struct io_uring_sqe *sqe)
2595{
2596 int op = READ_ONCE(sqe->opcode);
2597
2598 switch (op) {
2599 case IORING_OP_NOP:
2600 case IORING_OP_POLL_REMOVE:
2601 return false;
2602 default:
2603 return true;
2604 }
2605}
2606
65e19f54
JA
2607static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
2608 int index)
2609{
2610 struct fixed_file_table *table;
2611
2612 table = &ctx->file_table[index >> IORING_FILE_TABLE_SHIFT];
2613 return table->files[index & IORING_FILE_TABLE_MASK];
2614}
2615
a197f664 2616static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
09bb8394 2617{
267bc904 2618 struct sqe_submit *s = &req->submit;
a197f664 2619 struct io_ring_ctx *ctx = req->ctx;
09bb8394
JA
2620 unsigned flags;
2621 int fd;
2622
2623 flags = READ_ONCE(s->sqe->flags);
2624 fd = READ_ONCE(s->sqe->fd);
2625
4fe2c963 2626 if (flags & IOSQE_IO_DRAIN)
de0617e4 2627 req->flags |= REQ_F_IO_DRAIN;
4fe2c963
JL
2628 /*
2629 * All io need record the previous position, if LINK vs DARIN,
2630 * it can be used to mark the position of the first IO in the
2631 * link list.
2632 */
2633 req->sequence = s->sequence;
de0617e4 2634
60c112b0 2635 if (!io_op_needs_file(s->sqe))
09bb8394 2636 return 0;
09bb8394
JA
2637
2638 if (flags & IOSQE_FIXED_FILE) {
65e19f54 2639 if (unlikely(!ctx->file_table ||
09bb8394
JA
2640 (unsigned) fd >= ctx->nr_user_files))
2641 return -EBADF;
b7620121 2642 fd = array_index_nospec(fd, ctx->nr_user_files);
65e19f54
JA
2643 req->file = io_file_from_index(ctx, fd);
2644 if (!req->file)
08a45173 2645 return -EBADF;
09bb8394
JA
2646 req->flags |= REQ_F_FIXED_FILE;
2647 } else {
2648 if (s->needs_fixed_file)
2649 return -EBADF;
c826bd7a 2650 trace_io_uring_file_get(ctx, fd);
09bb8394
JA
2651 req->file = io_file_get(state, fd);
2652 if (unlikely(!req->file))
2653 return -EBADF;
2654 }
2655
2656 return 0;
2657}
2658
a197f664 2659static int io_grab_files(struct io_kiocb *req)
fcb323cc
JA
2660{
2661 int ret = -EBADF;
a197f664 2662 struct io_ring_ctx *ctx = req->ctx;
fcb323cc
JA
2663
2664 rcu_read_lock();
2665 spin_lock_irq(&ctx->inflight_lock);
2666 /*
2667 * We use the f_ops->flush() handler to ensure that we can flush
2668 * out work accessing these files if the fd is closed. Check if
2669 * the fd has changed since we started down this path, and disallow
2670 * this operation if it has.
2671 */
2672 if (fcheck(req->submit.ring_fd) == req->submit.ring_file) {
2673 list_add(&req->inflight_entry, &ctx->inflight_list);
2674 req->flags |= REQ_F_INFLIGHT;
2675 req->work.files = current->files;
2676 ret = 0;
2677 }
2678 spin_unlock_irq(&ctx->inflight_lock);
2679 rcu_read_unlock();
2680
2681 return ret;
2682}
2683
2665abfd
JA
2684static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2685{
2686 struct io_kiocb *req = container_of(timer, struct io_kiocb,
2687 timeout.timer);
2688 struct io_ring_ctx *ctx = req->ctx;
2689 struct io_kiocb *prev = NULL;
2690 unsigned long flags;
2665abfd
JA
2691
2692 spin_lock_irqsave(&ctx->completion_lock, flags);
2693
2694 /*
2695 * We don't expect the list to be empty, that will only happen if we
2696 * race with the completion of the linked work.
2697 */
2698 if (!list_empty(&req->list)) {
2699 prev = list_entry(req->list.prev, struct io_kiocb, link_list);
76a46e06
JA
2700 if (refcount_inc_not_zero(&prev->refs))
2701 list_del_init(&req->list);
2702 else
2703 prev = NULL;
2665abfd
JA
2704 }
2705
2706 spin_unlock_irqrestore(&ctx->completion_lock, flags);
2707
2708 if (prev) {
47f46768 2709 io_async_find_and_cancel(ctx, req, prev->user_data, NULL);
76a46e06 2710 io_put_req(prev);
47f46768
JA
2711 } else {
2712 io_cqring_add_event(req, -ETIME);
2713 io_put_req(req);
2665abfd 2714 }
2665abfd
JA
2715 return HRTIMER_NORESTART;
2716}
2717
76a46e06
JA
2718static void io_queue_linked_timeout(struct io_kiocb *req, struct timespec64 *ts,
2719 enum hrtimer_mode *mode)
2665abfd 2720{
76a46e06 2721 struct io_ring_ctx *ctx = req->ctx;
2665abfd 2722
76a46e06
JA
2723 /*
2724 * If the list is now empty, then our linked request finished before
2725 * we got a chance to setup the timer
2726 */
2727 spin_lock_irq(&ctx->completion_lock);
2728 if (!list_empty(&req->list)) {
2729 req->timeout.timer.function = io_link_timeout_fn;
2730 hrtimer_start(&req->timeout.timer, timespec64_to_ktime(*ts),
2731 *mode);
2665abfd 2732 }
76a46e06 2733 spin_unlock_irq(&ctx->completion_lock);
2665abfd 2734
2665abfd 2735 /* drop submission reference */
76a46e06
JA
2736 io_put_req(req);
2737}
2665abfd 2738
76a46e06
JA
2739static int io_validate_link_timeout(const struct io_uring_sqe *sqe,
2740 struct timespec64 *ts)
2741{
2742 if (sqe->ioprio || sqe->buf_index || sqe->len != 1 || sqe->off)
2743 return -EINVAL;
2744 if (sqe->timeout_flags & ~IORING_TIMEOUT_ABS)
2745 return -EINVAL;
2746 if (get_timespec64(ts, u64_to_user_ptr(sqe->addr)))
2747 return -EFAULT;
2665abfd 2748
76a46e06 2749 return 0;
2665abfd
JA
2750}
2751
76a46e06
JA
2752static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req,
2753 struct timespec64 *ts,
2754 enum hrtimer_mode *mode)
2665abfd
JA
2755{
2756 struct io_kiocb *nxt;
76a46e06 2757 int ret;
2665abfd
JA
2758
2759 if (!(req->flags & REQ_F_LINK))
2760 return NULL;
2761
2762 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
76a46e06
JA
2763 if (!nxt || nxt->submit.sqe->opcode != IORING_OP_LINK_TIMEOUT)
2764 return NULL;
2665abfd 2765
76a46e06
JA
2766 ret = io_validate_link_timeout(nxt->submit.sqe, ts);
2767 if (ret) {
2768 list_del_init(&nxt->list);
2769 io_cqring_add_event(nxt, ret);
2770 io_double_put_req(nxt);
2771 return ERR_PTR(-ECANCELED);
2772 }
2773
2774 if (nxt->submit.sqe->timeout_flags & IORING_TIMEOUT_ABS)
2775 *mode = HRTIMER_MODE_ABS;
2776 else
2777 *mode = HRTIMER_MODE_REL;
2778
2779 req->flags |= REQ_F_LINK_TIMEOUT;
2780 hrtimer_init(&nxt->timeout.timer, CLOCK_MONOTONIC, *mode);
2781 return nxt;
2665abfd
JA
2782}
2783
a197f664 2784static int __io_queue_sqe(struct io_kiocb *req)
2b188cc1 2785{
76a46e06 2786 enum hrtimer_mode mode;
2665abfd 2787 struct io_kiocb *nxt;
76a46e06 2788 struct timespec64 ts;
e0c5c576 2789 int ret;
2b188cc1 2790
76a46e06
JA
2791 nxt = io_prep_linked_timeout(req, &ts, &mode);
2792 if (IS_ERR(nxt)) {
2793 ret = PTR_ERR(nxt);
2794 nxt = NULL;
2795 goto err;
2665abfd
JA
2796 }
2797
a197f664 2798 ret = __io_submit_sqe(req, NULL, true);
491381ce
JA
2799
2800 /*
2801 * We async punt it if the file wasn't marked NOWAIT, or if the file
2802 * doesn't support non-blocking read/write attempts
2803 */
2804 if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
2805 (req->flags & REQ_F_MUST_PUNT))) {
267bc904 2806 struct sqe_submit *s = &req->submit;
2b188cc1
JA
2807 struct io_uring_sqe *sqe_copy;
2808
954dab19 2809 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2b188cc1 2810 if (sqe_copy) {
2b188cc1 2811 s->sqe = sqe_copy;
fcb323cc 2812 if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) {
a197f664 2813 ret = io_grab_files(req);
fcb323cc
JA
2814 if (ret) {
2815 kfree(sqe_copy);
2816 goto err;
2817 }
2818 }
e65ef56d
JA
2819
2820 /*
2821 * Queued up for async execution, worker will release
9e645e11 2822 * submit reference when the iocb is actually submitted.
e65ef56d 2823 */
a197f664 2824 io_queue_async_work(req);
76a46e06
JA
2825
2826 if (nxt)
2827 io_queue_linked_timeout(nxt, &ts, &mode);
2828
e65ef56d 2829 return 0;
2b188cc1
JA
2830 }
2831 }
e65ef56d 2832
fcb323cc 2833err:
76a46e06 2834 /* drop submission reference */
ec9c02ad 2835 io_put_req(req);
e65ef56d 2836
76a46e06
JA
2837 if (nxt) {
2838 if (!ret)
2839 io_queue_linked_timeout(nxt, &ts, &mode);
2840 else
2841 io_put_req(nxt);
2842 }
2843
e65ef56d 2844 /* and drop final reference, if we failed */
9e645e11 2845 if (ret) {
78e19bbe 2846 io_cqring_add_event(req, ret);
9e645e11
JA
2847 if (req->flags & REQ_F_LINK)
2848 req->flags |= REQ_F_FAIL_LINK;
ec9c02ad 2849 io_put_req(req);
9e645e11 2850 }
2b188cc1
JA
2851
2852 return ret;
2853}
2854
a197f664 2855static int io_queue_sqe(struct io_kiocb *req)
4fe2c963
JL
2856{
2857 int ret;
2858
a197f664 2859 ret = io_req_defer(req);
4fe2c963
JL
2860 if (ret) {
2861 if (ret != -EIOCBQUEUED) {
78e19bbe
JA
2862 io_cqring_add_event(req, ret);
2863 io_double_put_req(req);
4fe2c963
JL
2864 }
2865 return 0;
2866 }
2867
a197f664 2868 return __io_queue_sqe(req);
4fe2c963
JL
2869}
2870
a197f664 2871static int io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
4fe2c963
JL
2872{
2873 int ret;
2874 int need_submit = false;
a197f664 2875 struct io_ring_ctx *ctx = req->ctx;
4fe2c963
JL
2876
2877 if (!shadow)
a197f664 2878 return io_queue_sqe(req);
4fe2c963
JL
2879
2880 /*
2881 * Mark the first IO in link list as DRAIN, let all the following
2882 * IOs enter the defer list. all IO needs to be completed before link
2883 * list.
2884 */
2885 req->flags |= REQ_F_IO_DRAIN;
a197f664 2886 ret = io_req_defer(req);
4fe2c963
JL
2887 if (ret) {
2888 if (ret != -EIOCBQUEUED) {
78e19bbe
JA
2889 io_cqring_add_event(req, ret);
2890 io_double_put_req(req);
7b20238d 2891 __io_free_req(shadow);
4fe2c963
JL
2892 return 0;
2893 }
2894 } else {
2895 /*
2896 * If ret == 0 means that all IOs in front of link io are
2897 * running done. let's queue link head.
2898 */
2899 need_submit = true;
2900 }
2901
2902 /* Insert shadow req to defer_list, blocking next IOs */
2903 spin_lock_irq(&ctx->completion_lock);
c826bd7a 2904 trace_io_uring_defer(ctx, shadow, true);
4fe2c963
JL
2905 list_add_tail(&shadow->list, &ctx->defer_list);
2906 spin_unlock_irq(&ctx->completion_lock);
2907
2908 if (need_submit)
a197f664 2909 return __io_queue_sqe(req);
4fe2c963
JL
2910
2911 return 0;
2912}
2913
9e645e11
JA
2914#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
2915
a197f664
JL
2916static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
2917 struct io_kiocb **link)
9e645e11
JA
2918{
2919 struct io_uring_sqe *sqe_copy;
267bc904 2920 struct sqe_submit *s = &req->submit;
a197f664 2921 struct io_ring_ctx *ctx = req->ctx;
9e645e11
JA
2922 int ret;
2923
78e19bbe
JA
2924 req->user_data = s->sqe->user_data;
2925
9e645e11
JA
2926 /* enforce forwards compatibility on users */
2927 if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
2928 ret = -EINVAL;
196be95c 2929 goto err_req;
9e645e11
JA
2930 }
2931
a197f664 2932 ret = io_req_set_file(state, req);
9e645e11
JA
2933 if (unlikely(ret)) {
2934err_req:
78e19bbe
JA
2935 io_cqring_add_event(req, ret);
2936 io_double_put_req(req);
9e645e11
JA
2937 return;
2938 }
2939
9e645e11
JA
2940 /*
2941 * If we already have a head request, queue this one for async
2942 * submittal once the head completes. If we don't have a head but
2943 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2944 * submitted sync once the chain is complete. If none of those
2945 * conditions are true (normal request), then just queue it.
2946 */
2947 if (*link) {
2948 struct io_kiocb *prev = *link;
2949
2950 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2951 if (!sqe_copy) {
2952 ret = -EAGAIN;
2953 goto err_req;
2954 }
2955
2956 s->sqe = sqe_copy;
c826bd7a 2957 trace_io_uring_link(ctx, req, prev);
9e645e11
JA
2958 list_add_tail(&req->list, &prev->link_list);
2959 } else if (s->sqe->flags & IOSQE_IO_LINK) {
2960 req->flags |= REQ_F_LINK;
2961
9e645e11
JA
2962 INIT_LIST_HEAD(&req->link_list);
2963 *link = req;
2665abfd
JA
2964 } else if (READ_ONCE(s->sqe->opcode) == IORING_OP_LINK_TIMEOUT) {
2965 /* Only valid as a linked SQE */
2966 ret = -EINVAL;
2967 goto err_req;
9e645e11 2968 } else {
a197f664 2969 io_queue_sqe(req);
9e645e11
JA
2970 }
2971}
2972
9a56a232
JA
2973/*
2974 * Batched submission is done, ensure local IO is flushed out.
2975 */
2976static void io_submit_state_end(struct io_submit_state *state)
2977{
2978 blk_finish_plug(&state->plug);
3d6770fb 2979 io_file_put(state);
2579f913
JA
2980 if (state->free_reqs)
2981 kmem_cache_free_bulk(req_cachep, state->free_reqs,
2982 &state->reqs[state->cur_req]);
9a56a232
JA
2983}
2984
2985/*
2986 * Start submission side cache.
2987 */
2988static void io_submit_state_start(struct io_submit_state *state,
2989 struct io_ring_ctx *ctx, unsigned max_ios)
2990{
2991 blk_start_plug(&state->plug);
2579f913 2992 state->free_reqs = 0;
9a56a232
JA
2993 state->file = NULL;
2994 state->ios_left = max_ios;
2995}
2996
2b188cc1
JA
2997static void io_commit_sqring(struct io_ring_ctx *ctx)
2998{
75b28aff 2999 struct io_rings *rings = ctx->rings;
2b188cc1 3000
75b28aff 3001 if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) {
2b188cc1
JA
3002 /*
3003 * Ensure any loads from the SQEs are done at this point,
3004 * since once we write the new head, the application could
3005 * write new data to them.
3006 */
75b28aff 3007 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
3008 }
3009}
3010
2b188cc1
JA
3011/*
3012 * Fetch an sqe, if one is available. Note that s->sqe will point to memory
3013 * that is mapped by userspace. This means that care needs to be taken to
3014 * ensure that reads are stable, as we cannot rely on userspace always
3015 * being a good citizen. If members of the sqe are validated and then later
3016 * used, it's important that those reads are done through READ_ONCE() to
3017 * prevent a re-load down the line.
3018 */
3019static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
3020{
75b28aff
HV
3021 struct io_rings *rings = ctx->rings;
3022 u32 *sq_array = ctx->sq_array;
2b188cc1
JA
3023 unsigned head;
3024
3025 /*
3026 * The cached sq head (or cq tail) serves two purposes:
3027 *
3028 * 1) allows us to batch the cost of updating the user visible
3029 * head updates.
3030 * 2) allows the kernel side to track the head on its own, even
3031 * though the application is the one updating it.
3032 */
3033 head = ctx->cached_sq_head;
e523a29c 3034 /* make sure SQ entry isn't read before tail */
75b28aff 3035 if (head == smp_load_acquire(&rings->sq.tail))
2b188cc1
JA
3036 return false;
3037
75b28aff 3038 head = READ_ONCE(sq_array[head & ctx->sq_mask]);
2b188cc1 3039 if (head < ctx->sq_entries) {
fcb323cc 3040 s->ring_file = NULL;
2b188cc1 3041 s->sqe = &ctx->sq_sqes[head];
8776f3fa 3042 s->sequence = ctx->cached_sq_head;
2b188cc1
JA
3043 ctx->cached_sq_head++;
3044 return true;
3045 }
3046
3047 /* drop invalid entries */
3048 ctx->cached_sq_head++;
498ccd9e
JA
3049 ctx->cached_sq_dropped++;
3050 WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
2b188cc1
JA
3051 return false;
3052}
3053
fb5ccc98 3054static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
ae9428ca
PB
3055 struct file *ring_file, int ring_fd,
3056 struct mm_struct **mm, bool async)
6c271ce2
JA
3057{
3058 struct io_submit_state state, *statep = NULL;
9e645e11 3059 struct io_kiocb *link = NULL;
4fe2c963 3060 struct io_kiocb *shadow_req = NULL;
9e645e11 3061 int i, submitted = 0;
95a1b3ff 3062 bool mm_fault = false;
6c271ce2 3063
1d7bb1d5
JA
3064 if (!list_empty(&ctx->cq_overflow_list)) {
3065 io_cqring_overflow_flush(ctx, false);
3066 return -EBUSY;
3067 }
3068
6c271ce2
JA
3069 if (nr > IO_PLUG_THRESHOLD) {
3070 io_submit_state_start(&state, ctx, nr);
3071 statep = &state;
3072 }
3073
3074 for (i = 0; i < nr; i++) {
196be95c 3075 struct io_kiocb *req;
50585b9a 3076 unsigned int sqe_flags;
fb5ccc98 3077
196be95c
PB
3078 req = io_get_req(ctx, statep);
3079 if (unlikely(!req)) {
3080 if (!submitted)
3081 submitted = -EAGAIN;
fb5ccc98 3082 break;
196be95c 3083 }
50585b9a 3084 if (!io_get_sqring(ctx, &req->submit)) {
196be95c
PB
3085 __io_free_req(req);
3086 break;
3087 }
fb5ccc98 3088
50585b9a 3089 if (io_sqe_needs_user(req->submit.sqe) && !*mm) {
95a1b3ff
PB
3090 mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
3091 if (!mm_fault) {
3092 use_mm(ctx->sqo_mm);
3093 *mm = ctx->sqo_mm;
3094 }
3095 }
3096
50585b9a
PB
3097 sqe_flags = req->submit.sqe->flags;
3098
3099 if (link && (sqe_flags & IOSQE_IO_DRAIN)) {
4fe2c963
JL
3100 if (!shadow_req) {
3101 shadow_req = io_get_req(ctx, NULL);
a1041c27
JL
3102 if (unlikely(!shadow_req))
3103 goto out;
4fe2c963
JL
3104 shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
3105 refcount_dec(&shadow_req->refs);
3106 }
50585b9a 3107 shadow_req->sequence = req->submit.sequence;
4fe2c963
JL
3108 }
3109
a1041c27 3110out:
50585b9a
PB
3111 req->submit.ring_file = ring_file;
3112 req->submit.ring_fd = ring_fd;
3113 req->submit.has_user = *mm != NULL;
3114 req->submit.in_async = async;
3115 req->submit.needs_fixed_file = async;
3116 trace_io_uring_submit_sqe(ctx, req->submit.sqe->user_data,
3117 true, async);
a197f664 3118 io_submit_sqe(req, statep, &link);
95a1b3ff 3119 submitted++;
e5eb6366
PB
3120
3121 /*
3122 * If previous wasn't linked and we have a linked command,
3123 * that's the end of the chain. Submit the previous link.
3124 */
50585b9a 3125 if (!(sqe_flags & IOSQE_IO_LINK) && link) {
a197f664 3126 io_queue_link_head(link, shadow_req);
e5eb6366
PB
3127 link = NULL;
3128 shadow_req = NULL;
3129 }
6c271ce2
JA
3130 }
3131
9e645e11 3132 if (link)
a197f664 3133 io_queue_link_head(link, shadow_req);
6c271ce2
JA
3134 if (statep)
3135 io_submit_state_end(&state);
3136
ae9428ca
PB
3137 /* Commit SQ ring head once we've consumed and submitted all SQEs */
3138 io_commit_sqring(ctx);
3139
6c271ce2
JA
3140 return submitted;
3141}
3142
3143static int io_sq_thread(void *data)
3144{
6c271ce2
JA
3145 struct io_ring_ctx *ctx = data;
3146 struct mm_struct *cur_mm = NULL;
3147 mm_segment_t old_fs;
3148 DEFINE_WAIT(wait);
3149 unsigned inflight;
3150 unsigned long timeout;
c1edbf5f 3151 int ret;
6c271ce2 3152
206aefde 3153 complete(&ctx->completions[1]);
a4c0b3de 3154
6c271ce2
JA
3155 old_fs = get_fs();
3156 set_fs(USER_DS);
3157
c1edbf5f 3158 ret = timeout = inflight = 0;
2bbcd6d3 3159 while (!kthread_should_park()) {
fb5ccc98 3160 unsigned int to_submit;
6c271ce2
JA
3161
3162 if (inflight) {
3163 unsigned nr_events = 0;
3164
3165 if (ctx->flags & IORING_SETUP_IOPOLL) {
2b2ed975
JA
3166 /*
3167 * inflight is the count of the maximum possible
3168 * entries we submitted, but it can be smaller
3169 * if we dropped some of them. If we don't have
3170 * poll entries available, then we know that we
3171 * have nothing left to poll for. Reset the
3172 * inflight count to zero in that case.
3173 */
3174 mutex_lock(&ctx->uring_lock);
3175 if (!list_empty(&ctx->poll_list))
3176 __io_iopoll_check(ctx, &nr_events, 0);
3177 else
3178 inflight = 0;
3179 mutex_unlock(&ctx->uring_lock);
6c271ce2
JA
3180 } else {
3181 /*
3182 * Normal IO, just pretend everything completed.
3183 * We don't have to poll completions for that.
3184 */
3185 nr_events = inflight;
3186 }
3187
3188 inflight -= nr_events;
3189 if (!inflight)
3190 timeout = jiffies + ctx->sq_thread_idle;
3191 }
3192
fb5ccc98 3193 to_submit = io_sqring_entries(ctx);
c1edbf5f
JA
3194
3195 /*
3196 * If submit got -EBUSY, flag us as needing the application
3197 * to enter the kernel to reap and flush events.
3198 */
3199 if (!to_submit || ret == -EBUSY) {
6c271ce2
JA
3200 /*
3201 * We're polling. If we're within the defined idle
3202 * period, then let us spin without work before going
c1edbf5f
JA
3203 * to sleep. The exception is if we got EBUSY doing
3204 * more IO, we should wait for the application to
3205 * reap events and wake us up.
6c271ce2 3206 */
c1edbf5f
JA
3207 if (inflight ||
3208 (!time_after(jiffies, timeout) && ret != -EBUSY)) {
9831a90c 3209 cond_resched();
6c271ce2
JA
3210 continue;
3211 }
3212
3213 /*
3214 * Drop cur_mm before scheduling, we can't hold it for
3215 * long periods (or over schedule()). Do this before
3216 * adding ourselves to the waitqueue, as the unuse/drop
3217 * may sleep.
3218 */
3219 if (cur_mm) {
3220 unuse_mm(cur_mm);
3221 mmput(cur_mm);
3222 cur_mm = NULL;
3223 }
3224
3225 prepare_to_wait(&ctx->sqo_wait, &wait,
3226 TASK_INTERRUPTIBLE);
3227
3228 /* Tell userspace we may need a wakeup call */
75b28aff 3229 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
0d7bae69
SB
3230 /* make sure to read SQ tail after writing flags */
3231 smp_mb();
6c271ce2 3232
fb5ccc98 3233 to_submit = io_sqring_entries(ctx);
c1edbf5f 3234 if (!to_submit || ret == -EBUSY) {
2bbcd6d3 3235 if (kthread_should_park()) {
6c271ce2
JA
3236 finish_wait(&ctx->sqo_wait, &wait);
3237 break;
3238 }
3239 if (signal_pending(current))
3240 flush_signals(current);
3241 schedule();
3242 finish_wait(&ctx->sqo_wait, &wait);
3243
75b28aff 3244 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
3245 continue;
3246 }
3247 finish_wait(&ctx->sqo_wait, &wait);
3248
75b28aff 3249 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
3250 }
3251
fb5ccc98 3252 to_submit = min(to_submit, ctx->sq_entries);
1d7bb1d5
JA
3253 ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
3254 if (ret > 0)
3255 inflight += ret;
6c271ce2
JA
3256 }
3257
3258 set_fs(old_fs);
3259 if (cur_mm) {
3260 unuse_mm(cur_mm);
3261 mmput(cur_mm);
3262 }
06058632 3263
2bbcd6d3 3264 kthread_parkme();
06058632 3265
6c271ce2
JA
3266 return 0;
3267}
3268
bda52162
JA
3269struct io_wait_queue {
3270 struct wait_queue_entry wq;
3271 struct io_ring_ctx *ctx;
3272 unsigned to_wait;
3273 unsigned nr_timeouts;
3274};
3275
1d7bb1d5 3276static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
bda52162
JA
3277{
3278 struct io_ring_ctx *ctx = iowq->ctx;
3279
3280 /*
3281 * Wake up if we have enough events, or if a timeout occured since we
3282 * started waiting. For timeouts, we always want to return to userspace,
3283 * regardless of event count.
3284 */
1d7bb1d5 3285 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
bda52162
JA
3286 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
3287}
3288
3289static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
3290 int wake_flags, void *key)
3291{
3292 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
3293 wq);
3294
1d7bb1d5
JA
3295 /* use noflush == true, as we can't safely rely on locking context */
3296 if (!io_should_wake(iowq, true))
bda52162
JA
3297 return -1;
3298
3299 return autoremove_wake_function(curr, mode, wake_flags, key);
3300}
3301
2b188cc1
JA
3302/*
3303 * Wait until events become available, if we don't already have some. The
3304 * application must reap them itself, as they reside on the shared cq ring.
3305 */
3306static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
3307 const sigset_t __user *sig, size_t sigsz)
3308{
bda52162
JA
3309 struct io_wait_queue iowq = {
3310 .wq = {
3311 .private = current,
3312 .func = io_wake_function,
3313 .entry = LIST_HEAD_INIT(iowq.wq.entry),
3314 },
3315 .ctx = ctx,
3316 .to_wait = min_events,
3317 };
75b28aff 3318 struct io_rings *rings = ctx->rings;
e9ffa5c2 3319 int ret = 0;
2b188cc1 3320
1d7bb1d5 3321 if (io_cqring_events(ctx, false) >= min_events)
2b188cc1
JA
3322 return 0;
3323
3324 if (sig) {
9e75ad5d
AB
3325#ifdef CONFIG_COMPAT
3326 if (in_compat_syscall())
3327 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 3328 sigsz);
9e75ad5d
AB
3329 else
3330#endif
b772434b 3331 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 3332
2b188cc1
JA
3333 if (ret)
3334 return ret;
3335 }
3336
bda52162 3337 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
c826bd7a 3338 trace_io_uring_cqring_wait(ctx, min_events);
bda52162
JA
3339 do {
3340 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
3341 TASK_INTERRUPTIBLE);
1d7bb1d5 3342 if (io_should_wake(&iowq, false))
bda52162
JA
3343 break;
3344 schedule();
3345 if (signal_pending(current)) {
e9ffa5c2 3346 ret = -EINTR;
bda52162
JA
3347 break;
3348 }
3349 } while (1);
3350 finish_wait(&ctx->wait, &iowq.wq);
3351
e9ffa5c2 3352 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 3353
75b28aff 3354 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
3355}
3356
6b06314c
JA
3357static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
3358{
3359#if defined(CONFIG_UNIX)
3360 if (ctx->ring_sock) {
3361 struct sock *sock = ctx->ring_sock->sk;
3362 struct sk_buff *skb;
3363
3364 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
3365 kfree_skb(skb);
3366 }
3367#else
3368 int i;
3369
65e19f54
JA
3370 for (i = 0; i < ctx->nr_user_files; i++) {
3371 struct file *file;
3372
3373 file = io_file_from_index(ctx, i);
3374 if (file)
3375 fput(file);
3376 }
6b06314c
JA
3377#endif
3378}
3379
3380static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
3381{
65e19f54
JA
3382 unsigned nr_tables, i;
3383
3384 if (!ctx->file_table)
6b06314c
JA
3385 return -ENXIO;
3386
3387 __io_sqe_files_unregister(ctx);
65e19f54
JA
3388 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
3389 for (i = 0; i < nr_tables; i++)
3390 kfree(ctx->file_table[i].files);
3391 kfree(ctx->file_table);
3392 ctx->file_table = NULL;
6b06314c
JA
3393 ctx->nr_user_files = 0;
3394 return 0;
3395}
3396
6c271ce2
JA
3397static void io_sq_thread_stop(struct io_ring_ctx *ctx)
3398{
3399 if (ctx->sqo_thread) {
206aefde 3400 wait_for_completion(&ctx->completions[1]);
2bbcd6d3
RP
3401 /*
3402 * The park is a bit of a work-around, without it we get
3403 * warning spews on shutdown with SQPOLL set and affinity
3404 * set to a single CPU.
3405 */
06058632 3406 kthread_park(ctx->sqo_thread);
6c271ce2
JA
3407 kthread_stop(ctx->sqo_thread);
3408 ctx->sqo_thread = NULL;
3409 }
3410}
3411
6b06314c
JA
3412static void io_finish_async(struct io_ring_ctx *ctx)
3413{
6c271ce2
JA
3414 io_sq_thread_stop(ctx);
3415
561fb04a
JA
3416 if (ctx->io_wq) {
3417 io_wq_destroy(ctx->io_wq);
3418 ctx->io_wq = NULL;
6b06314c
JA
3419 }
3420}
3421
3422#if defined(CONFIG_UNIX)
3423static void io_destruct_skb(struct sk_buff *skb)
3424{
3425 struct io_ring_ctx *ctx = skb->sk->sk_user_data;
8a997340 3426
561fb04a
JA
3427 if (ctx->io_wq)
3428 io_wq_flush(ctx->io_wq);
6b06314c 3429
6b06314c
JA
3430 unix_destruct_scm(skb);
3431}
3432
3433/*
3434 * Ensure the UNIX gc is aware of our file set, so we are certain that
3435 * the io_uring can be safely unregistered on process exit, even if we have
3436 * loops in the file referencing.
3437 */
3438static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
3439{
3440 struct sock *sk = ctx->ring_sock->sk;
3441 struct scm_fp_list *fpl;
3442 struct sk_buff *skb;
08a45173 3443 int i, nr_files;
6b06314c
JA
3444
3445 if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
3446 unsigned long inflight = ctx->user->unix_inflight + nr;
3447
3448 if (inflight > task_rlimit(current, RLIMIT_NOFILE))
3449 return -EMFILE;
3450 }
3451
3452 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
3453 if (!fpl)
3454 return -ENOMEM;
3455
3456 skb = alloc_skb(0, GFP_KERNEL);
3457 if (!skb) {
3458 kfree(fpl);
3459 return -ENOMEM;
3460 }
3461
3462 skb->sk = sk;
6b06314c 3463
08a45173 3464 nr_files = 0;
6b06314c
JA
3465 fpl->user = get_uid(ctx->user);
3466 for (i = 0; i < nr; i++) {
65e19f54
JA
3467 struct file *file = io_file_from_index(ctx, i + offset);
3468
3469 if (!file)
08a45173 3470 continue;
65e19f54 3471 fpl->fp[nr_files] = get_file(file);
08a45173
JA
3472 unix_inflight(fpl->user, fpl->fp[nr_files]);
3473 nr_files++;
6b06314c
JA
3474 }
3475
08a45173
JA
3476 if (nr_files) {
3477 fpl->max = SCM_MAX_FD;
3478 fpl->count = nr_files;
3479 UNIXCB(skb).fp = fpl;
3480 skb->destructor = io_destruct_skb;
3481 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
3482 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 3483
08a45173
JA
3484 for (i = 0; i < nr_files; i++)
3485 fput(fpl->fp[i]);
3486 } else {
3487 kfree_skb(skb);
3488 kfree(fpl);
3489 }
6b06314c
JA
3490
3491 return 0;
3492}
3493
3494/*
3495 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
3496 * causes regular reference counting to break down. We rely on the UNIX
3497 * garbage collection to take care of this problem for us.
3498 */
3499static int io_sqe_files_scm(struct io_ring_ctx *ctx)
3500{
3501 unsigned left, total;
3502 int ret = 0;
3503
3504 total = 0;
3505 left = ctx->nr_user_files;
3506 while (left) {
3507 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
3508
3509 ret = __io_sqe_files_scm(ctx, this_files, total);
3510 if (ret)
3511 break;
3512 left -= this_files;
3513 total += this_files;
3514 }
3515
3516 if (!ret)
3517 return 0;
3518
3519 while (total < ctx->nr_user_files) {
65e19f54
JA
3520 struct file *file = io_file_from_index(ctx, total);
3521
3522 if (file)
3523 fput(file);
6b06314c
JA
3524 total++;
3525 }
3526
3527 return ret;
3528}
3529#else
3530static int io_sqe_files_scm(struct io_ring_ctx *ctx)
3531{
3532 return 0;
3533}
3534#endif
3535
65e19f54
JA
3536static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
3537 unsigned nr_files)
3538{
3539 int i;
3540
3541 for (i = 0; i < nr_tables; i++) {
3542 struct fixed_file_table *table = &ctx->file_table[i];
3543 unsigned this_files;
3544
3545 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
3546 table->files = kcalloc(this_files, sizeof(struct file *),
3547 GFP_KERNEL);
3548 if (!table->files)
3549 break;
3550 nr_files -= this_files;
3551 }
3552
3553 if (i == nr_tables)
3554 return 0;
3555
3556 for (i = 0; i < nr_tables; i++) {
3557 struct fixed_file_table *table = &ctx->file_table[i];
3558 kfree(table->files);
3559 }
3560 return 1;
3561}
3562
6b06314c
JA
3563static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
3564 unsigned nr_args)
3565{
3566 __s32 __user *fds = (__s32 __user *) arg;
65e19f54 3567 unsigned nr_tables;
6b06314c
JA
3568 int fd, ret = 0;
3569 unsigned i;
3570
65e19f54 3571 if (ctx->file_table)
6b06314c
JA
3572 return -EBUSY;
3573 if (!nr_args)
3574 return -EINVAL;
3575 if (nr_args > IORING_MAX_FIXED_FILES)
3576 return -EMFILE;
3577
65e19f54
JA
3578 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
3579 ctx->file_table = kcalloc(nr_tables, sizeof(struct fixed_file_table),
3580 GFP_KERNEL);
3581 if (!ctx->file_table)
6b06314c
JA
3582 return -ENOMEM;
3583
65e19f54
JA
3584 if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
3585 kfree(ctx->file_table);
46568e9b 3586 ctx->file_table = NULL;
65e19f54
JA
3587 return -ENOMEM;
3588 }
3589
08a45173 3590 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
65e19f54
JA
3591 struct fixed_file_table *table;
3592 unsigned index;
3593
6b06314c
JA
3594 ret = -EFAULT;
3595 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
3596 break;
08a45173
JA
3597 /* allow sparse sets */
3598 if (fd == -1) {
3599 ret = 0;
3600 continue;
3601 }
6b06314c 3602
65e19f54
JA
3603 table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
3604 index = i & IORING_FILE_TABLE_MASK;
3605 table->files[index] = fget(fd);
6b06314c
JA
3606
3607 ret = -EBADF;
65e19f54 3608 if (!table->files[index])
6b06314c
JA
3609 break;
3610 /*
3611 * Don't allow io_uring instances to be registered. If UNIX
3612 * isn't enabled, then this causes a reference cycle and this
3613 * instance can never get freed. If UNIX is enabled we'll
3614 * handle it just fine, but there's still no point in allowing
3615 * a ring fd as it doesn't support regular read/write anyway.
3616 */
65e19f54
JA
3617 if (table->files[index]->f_op == &io_uring_fops) {
3618 fput(table->files[index]);
6b06314c
JA
3619 break;
3620 }
6b06314c
JA
3621 ret = 0;
3622 }
3623
3624 if (ret) {
65e19f54
JA
3625 for (i = 0; i < ctx->nr_user_files; i++) {
3626 struct file *file;
6b06314c 3627
65e19f54
JA
3628 file = io_file_from_index(ctx, i);
3629 if (file)
3630 fput(file);
3631 }
3632 for (i = 0; i < nr_tables; i++)
3633 kfree(ctx->file_table[i].files);
3634
3635 kfree(ctx->file_table);
3636 ctx->file_table = NULL;
6b06314c
JA
3637 ctx->nr_user_files = 0;
3638 return ret;
3639 }
3640
3641 ret = io_sqe_files_scm(ctx);
3642 if (ret)
3643 io_sqe_files_unregister(ctx);
3644
3645 return ret;
3646}
3647
c3a31e60
JA
3648static void io_sqe_file_unregister(struct io_ring_ctx *ctx, int index)
3649{
3650#if defined(CONFIG_UNIX)
65e19f54 3651 struct file *file = io_file_from_index(ctx, index);
c3a31e60
JA
3652 struct sock *sock = ctx->ring_sock->sk;
3653 struct sk_buff_head list, *head = &sock->sk_receive_queue;
3654 struct sk_buff *skb;
3655 int i;
3656
3657 __skb_queue_head_init(&list);
3658
3659 /*
3660 * Find the skb that holds this file in its SCM_RIGHTS. When found,
3661 * remove this entry and rearrange the file array.
3662 */
3663 skb = skb_dequeue(head);
3664 while (skb) {
3665 struct scm_fp_list *fp;
3666
3667 fp = UNIXCB(skb).fp;
3668 for (i = 0; i < fp->count; i++) {
3669 int left;
3670
3671 if (fp->fp[i] != file)
3672 continue;
3673
3674 unix_notinflight(fp->user, fp->fp[i]);
3675 left = fp->count - 1 - i;
3676 if (left) {
3677 memmove(&fp->fp[i], &fp->fp[i + 1],
3678 left * sizeof(struct file *));
3679 }
3680 fp->count--;
3681 if (!fp->count) {
3682 kfree_skb(skb);
3683 skb = NULL;
3684 } else {
3685 __skb_queue_tail(&list, skb);
3686 }
3687 fput(file);
3688 file = NULL;
3689 break;
3690 }
3691
3692 if (!file)
3693 break;
3694
3695 __skb_queue_tail(&list, skb);
3696
3697 skb = skb_dequeue(head);
3698 }
3699
3700 if (skb_peek(&list)) {
3701 spin_lock_irq(&head->lock);
3702 while ((skb = __skb_dequeue(&list)) != NULL)
3703 __skb_queue_tail(head, skb);
3704 spin_unlock_irq(&head->lock);
3705 }
3706#else
65e19f54 3707 fput(io_file_from_index(ctx, index));
c3a31e60
JA
3708#endif
3709}
3710
3711static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
3712 int index)
3713{
3714#if defined(CONFIG_UNIX)
3715 struct sock *sock = ctx->ring_sock->sk;
3716 struct sk_buff_head *head = &sock->sk_receive_queue;
3717 struct sk_buff *skb;
3718
3719 /*
3720 * See if we can merge this file into an existing skb SCM_RIGHTS
3721 * file set. If there's no room, fall back to allocating a new skb
3722 * and filling it in.
3723 */
3724 spin_lock_irq(&head->lock);
3725 skb = skb_peek(head);
3726 if (skb) {
3727 struct scm_fp_list *fpl = UNIXCB(skb).fp;
3728
3729 if (fpl->count < SCM_MAX_FD) {
3730 __skb_unlink(skb, head);
3731 spin_unlock_irq(&head->lock);
3732 fpl->fp[fpl->count] = get_file(file);
3733 unix_inflight(fpl->user, fpl->fp[fpl->count]);
3734 fpl->count++;
3735 spin_lock_irq(&head->lock);
3736 __skb_queue_head(head, skb);
3737 } else {
3738 skb = NULL;
3739 }
3740 }
3741 spin_unlock_irq(&head->lock);
3742
3743 if (skb) {
3744 fput(file);
3745 return 0;
3746 }
3747
3748 return __io_sqe_files_scm(ctx, 1, index);
3749#else
3750 return 0;
3751#endif
3752}
3753
3754static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
3755 unsigned nr_args)
3756{
3757 struct io_uring_files_update up;
3758 __s32 __user *fds;
3759 int fd, i, err;
3760 __u32 done;
3761
65e19f54 3762 if (!ctx->file_table)
c3a31e60
JA
3763 return -ENXIO;
3764 if (!nr_args)
3765 return -EINVAL;
3766 if (copy_from_user(&up, arg, sizeof(up)))
3767 return -EFAULT;
3768 if (check_add_overflow(up.offset, nr_args, &done))
3769 return -EOVERFLOW;
3770 if (done > ctx->nr_user_files)
3771 return -EINVAL;
3772
3773 done = 0;
3774 fds = (__s32 __user *) up.fds;
3775 while (nr_args) {
65e19f54
JA
3776 struct fixed_file_table *table;
3777 unsigned index;
3778
c3a31e60
JA
3779 err = 0;
3780 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
3781 err = -EFAULT;
3782 break;
3783 }
3784 i = array_index_nospec(up.offset, ctx->nr_user_files);
65e19f54
JA
3785 table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
3786 index = i & IORING_FILE_TABLE_MASK;
3787 if (table->files[index]) {
c3a31e60 3788 io_sqe_file_unregister(ctx, i);
65e19f54 3789 table->files[index] = NULL;
c3a31e60
JA
3790 }
3791 if (fd != -1) {
3792 struct file *file;
3793
3794 file = fget(fd);
3795 if (!file) {
3796 err = -EBADF;
3797 break;
3798 }
3799 /*
3800 * Don't allow io_uring instances to be registered. If
3801 * UNIX isn't enabled, then this causes a reference
3802 * cycle and this instance can never get freed. If UNIX
3803 * is enabled we'll handle it just fine, but there's
3804 * still no point in allowing a ring fd as it doesn't
3805 * support regular read/write anyway.
3806 */
3807 if (file->f_op == &io_uring_fops) {
3808 fput(file);
3809 err = -EBADF;
3810 break;
3811 }
65e19f54 3812 table->files[index] = file;
c3a31e60
JA
3813 err = io_sqe_file_register(ctx, file, i);
3814 if (err)
3815 break;
3816 }
3817 nr_args--;
3818 done++;
3819 up.offset++;
3820 }
3821
3822 return done ? done : err;
3823}
3824
6c271ce2
JA
3825static int io_sq_offload_start(struct io_ring_ctx *ctx,
3826 struct io_uring_params *p)
2b188cc1 3827{
561fb04a 3828 unsigned concurrency;
2b188cc1
JA
3829 int ret;
3830
6c271ce2 3831 init_waitqueue_head(&ctx->sqo_wait);
2b188cc1
JA
3832 mmgrab(current->mm);
3833 ctx->sqo_mm = current->mm;
3834
6c271ce2 3835 if (ctx->flags & IORING_SETUP_SQPOLL) {
3ec482d1
JA
3836 ret = -EPERM;
3837 if (!capable(CAP_SYS_ADMIN))
3838 goto err;
3839
917257da
JA
3840 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
3841 if (!ctx->sq_thread_idle)
3842 ctx->sq_thread_idle = HZ;
3843
6c271ce2 3844 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 3845 int cpu = p->sq_thread_cpu;
6c271ce2 3846
917257da 3847 ret = -EINVAL;
44a9bd18
JA
3848 if (cpu >= nr_cpu_ids)
3849 goto err;
7889f44d 3850 if (!cpu_online(cpu))
917257da
JA
3851 goto err;
3852
6c271ce2
JA
3853 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
3854 ctx, cpu,
3855 "io_uring-sq");
3856 } else {
3857 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
3858 "io_uring-sq");
3859 }
3860 if (IS_ERR(ctx->sqo_thread)) {
3861 ret = PTR_ERR(ctx->sqo_thread);
3862 ctx->sqo_thread = NULL;
3863 goto err;
3864 }
3865 wake_up_process(ctx->sqo_thread);
3866 } else if (p->flags & IORING_SETUP_SQ_AFF) {
3867 /* Can't have SQ_AFF without SQPOLL */
3868 ret = -EINVAL;
3869 goto err;
3870 }
3871
561fb04a
JA
3872 /* Do QD, or 4 * CPUS, whatever is smallest */
3873 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
5f8fd2d3 3874 ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm, ctx->user);
975c99a5
JA
3875 if (IS_ERR(ctx->io_wq)) {
3876 ret = PTR_ERR(ctx->io_wq);
3877 ctx->io_wq = NULL;
2b188cc1
JA
3878 goto err;
3879 }
3880
3881 return 0;
3882err:
54a91f3b 3883 io_finish_async(ctx);
2b188cc1
JA
3884 mmdrop(ctx->sqo_mm);
3885 ctx->sqo_mm = NULL;
3886 return ret;
3887}
3888
3889static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
3890{
3891 atomic_long_sub(nr_pages, &user->locked_vm);
3892}
3893
3894static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
3895{
3896 unsigned long page_limit, cur_pages, new_pages;
3897
3898 /* Don't allow more pages than we can safely lock */
3899 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
3900
3901 do {
3902 cur_pages = atomic_long_read(&user->locked_vm);
3903 new_pages = cur_pages + nr_pages;
3904 if (new_pages > page_limit)
3905 return -ENOMEM;
3906 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
3907 new_pages) != cur_pages);
3908
3909 return 0;
3910}
3911
3912static void io_mem_free(void *ptr)
3913{
52e04ef4
MR
3914 struct page *page;
3915
3916 if (!ptr)
3917 return;
2b188cc1 3918
52e04ef4 3919 page = virt_to_head_page(ptr);
2b188cc1
JA
3920 if (put_page_testzero(page))
3921 free_compound_page(page);
3922}
3923
3924static void *io_mem_alloc(size_t size)
3925{
3926 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
3927 __GFP_NORETRY;
3928
3929 return (void *) __get_free_pages(gfp_flags, get_order(size));
3930}
3931
75b28aff
HV
3932static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
3933 size_t *sq_offset)
3934{
3935 struct io_rings *rings;
3936 size_t off, sq_array_size;
3937
3938 off = struct_size(rings, cqes, cq_entries);
3939 if (off == SIZE_MAX)
3940 return SIZE_MAX;
3941
3942#ifdef CONFIG_SMP
3943 off = ALIGN(off, SMP_CACHE_BYTES);
3944 if (off == 0)
3945 return SIZE_MAX;
3946#endif
3947
3948 sq_array_size = array_size(sizeof(u32), sq_entries);
3949 if (sq_array_size == SIZE_MAX)
3950 return SIZE_MAX;
3951
3952 if (check_add_overflow(off, sq_array_size, &off))
3953 return SIZE_MAX;
3954
3955 if (sq_offset)
3956 *sq_offset = off;
3957
3958 return off;
3959}
3960
2b188cc1
JA
3961static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
3962{
75b28aff 3963 size_t pages;
2b188cc1 3964
75b28aff
HV
3965 pages = (size_t)1 << get_order(
3966 rings_size(sq_entries, cq_entries, NULL));
3967 pages += (size_t)1 << get_order(
3968 array_size(sizeof(struct io_uring_sqe), sq_entries));
2b188cc1 3969
75b28aff 3970 return pages;
2b188cc1
JA
3971}
3972
edafccee
JA
3973static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
3974{
3975 int i, j;
3976
3977 if (!ctx->user_bufs)
3978 return -ENXIO;
3979
3980 for (i = 0; i < ctx->nr_user_bufs; i++) {
3981 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3982
3983 for (j = 0; j < imu->nr_bvecs; j++)
27c4d3a3 3984 put_user_page(imu->bvec[j].bv_page);
edafccee
JA
3985
3986 if (ctx->account_mem)
3987 io_unaccount_mem(ctx->user, imu->nr_bvecs);
d4ef6475 3988 kvfree(imu->bvec);
edafccee
JA
3989 imu->nr_bvecs = 0;
3990 }
3991
3992 kfree(ctx->user_bufs);
3993 ctx->user_bufs = NULL;
3994 ctx->nr_user_bufs = 0;
3995 return 0;
3996}
3997
3998static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
3999 void __user *arg, unsigned index)
4000{
4001 struct iovec __user *src;
4002
4003#ifdef CONFIG_COMPAT
4004 if (ctx->compat) {
4005 struct compat_iovec __user *ciovs;
4006 struct compat_iovec ciov;
4007
4008 ciovs = (struct compat_iovec __user *) arg;
4009 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
4010 return -EFAULT;
4011
4012 dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
4013 dst->iov_len = ciov.iov_len;
4014 return 0;
4015 }
4016#endif
4017 src = (struct iovec __user *) arg;
4018 if (copy_from_user(dst, &src[index], sizeof(*dst)))
4019 return -EFAULT;
4020 return 0;
4021}
4022
4023static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
4024 unsigned nr_args)
4025{
4026 struct vm_area_struct **vmas = NULL;
4027 struct page **pages = NULL;
4028 int i, j, got_pages = 0;
4029 int ret = -EINVAL;
4030
4031 if (ctx->user_bufs)
4032 return -EBUSY;
4033 if (!nr_args || nr_args > UIO_MAXIOV)
4034 return -EINVAL;
4035
4036 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
4037 GFP_KERNEL);
4038 if (!ctx->user_bufs)
4039 return -ENOMEM;
4040
4041 for (i = 0; i < nr_args; i++) {
4042 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
4043 unsigned long off, start, end, ubuf;
4044 int pret, nr_pages;
4045 struct iovec iov;
4046 size_t size;
4047
4048 ret = io_copy_iov(ctx, &iov, arg, i);
4049 if (ret)
a278682d 4050 goto err;
edafccee
JA
4051
4052 /*
4053 * Don't impose further limits on the size and buffer
4054 * constraints here, we'll -EINVAL later when IO is
4055 * submitted if they are wrong.
4056 */
4057 ret = -EFAULT;
4058 if (!iov.iov_base || !iov.iov_len)
4059 goto err;
4060
4061 /* arbitrary limit, but we need something */
4062 if (iov.iov_len > SZ_1G)
4063 goto err;
4064
4065 ubuf = (unsigned long) iov.iov_base;
4066 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
4067 start = ubuf >> PAGE_SHIFT;
4068 nr_pages = end - start;
4069
4070 if (ctx->account_mem) {
4071 ret = io_account_mem(ctx->user, nr_pages);
4072 if (ret)
4073 goto err;
4074 }
4075
4076 ret = 0;
4077 if (!pages || nr_pages > got_pages) {
4078 kfree(vmas);
4079 kfree(pages);
d4ef6475 4080 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
edafccee 4081 GFP_KERNEL);
d4ef6475 4082 vmas = kvmalloc_array(nr_pages,
edafccee
JA
4083 sizeof(struct vm_area_struct *),
4084 GFP_KERNEL);
4085 if (!pages || !vmas) {
4086 ret = -ENOMEM;
4087 if (ctx->account_mem)
4088 io_unaccount_mem(ctx->user, nr_pages);
4089 goto err;
4090 }
4091 got_pages = nr_pages;
4092 }
4093
d4ef6475 4094 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
edafccee
JA
4095 GFP_KERNEL);
4096 ret = -ENOMEM;
4097 if (!imu->bvec) {
4098 if (ctx->account_mem)
4099 io_unaccount_mem(ctx->user, nr_pages);
4100 goto err;
4101 }
4102
4103 ret = 0;
4104 down_read(&current->mm->mmap_sem);
932f4a63
IW
4105 pret = get_user_pages(ubuf, nr_pages,
4106 FOLL_WRITE | FOLL_LONGTERM,
4107 pages, vmas);
edafccee
JA
4108 if (pret == nr_pages) {
4109 /* don't support file backed memory */
4110 for (j = 0; j < nr_pages; j++) {
4111 struct vm_area_struct *vma = vmas[j];
4112
4113 if (vma->vm_file &&
4114 !is_file_hugepages(vma->vm_file)) {
4115 ret = -EOPNOTSUPP;
4116 break;
4117 }
4118 }
4119 } else {
4120 ret = pret < 0 ? pret : -EFAULT;
4121 }
4122 up_read(&current->mm->mmap_sem);
4123 if (ret) {
4124 /*
4125 * if we did partial map, or found file backed vmas,
4126 * release any pages we did get
4127 */
27c4d3a3
JH
4128 if (pret > 0)
4129 put_user_pages(pages, pret);
edafccee
JA
4130 if (ctx->account_mem)
4131 io_unaccount_mem(ctx->user, nr_pages);
d4ef6475 4132 kvfree(imu->bvec);
edafccee
JA
4133 goto err;
4134 }
4135
4136 off = ubuf & ~PAGE_MASK;
4137 size = iov.iov_len;
4138 for (j = 0; j < nr_pages; j++) {
4139 size_t vec_len;
4140
4141 vec_len = min_t(size_t, size, PAGE_SIZE - off);
4142 imu->bvec[j].bv_page = pages[j];
4143 imu->bvec[j].bv_len = vec_len;
4144 imu->bvec[j].bv_offset = off;
4145 off = 0;
4146 size -= vec_len;
4147 }
4148 /* store original address for later verification */
4149 imu->ubuf = ubuf;
4150 imu->len = iov.iov_len;
4151 imu->nr_bvecs = nr_pages;
4152
4153 ctx->nr_user_bufs++;
4154 }
d4ef6475
MR
4155 kvfree(pages);
4156 kvfree(vmas);
edafccee
JA
4157 return 0;
4158err:
d4ef6475
MR
4159 kvfree(pages);
4160 kvfree(vmas);
edafccee
JA
4161 io_sqe_buffer_unregister(ctx);
4162 return ret;
4163}
4164
9b402849
JA
4165static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
4166{
4167 __s32 __user *fds = arg;
4168 int fd;
4169
4170 if (ctx->cq_ev_fd)
4171 return -EBUSY;
4172
4173 if (copy_from_user(&fd, fds, sizeof(*fds)))
4174 return -EFAULT;
4175
4176 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
4177 if (IS_ERR(ctx->cq_ev_fd)) {
4178 int ret = PTR_ERR(ctx->cq_ev_fd);
4179 ctx->cq_ev_fd = NULL;
4180 return ret;
4181 }
4182
4183 return 0;
4184}
4185
4186static int io_eventfd_unregister(struct io_ring_ctx *ctx)
4187{
4188 if (ctx->cq_ev_fd) {
4189 eventfd_ctx_put(ctx->cq_ev_fd);
4190 ctx->cq_ev_fd = NULL;
4191 return 0;
4192 }
4193
4194 return -ENXIO;
4195}
4196
2b188cc1
JA
4197static void io_ring_ctx_free(struct io_ring_ctx *ctx)
4198{
6b06314c 4199 io_finish_async(ctx);
2b188cc1
JA
4200 if (ctx->sqo_mm)
4201 mmdrop(ctx->sqo_mm);
def596e9
JA
4202
4203 io_iopoll_reap_events(ctx);
edafccee 4204 io_sqe_buffer_unregister(ctx);
6b06314c 4205 io_sqe_files_unregister(ctx);
9b402849 4206 io_eventfd_unregister(ctx);
def596e9 4207
2b188cc1 4208#if defined(CONFIG_UNIX)
355e8d26
EB
4209 if (ctx->ring_sock) {
4210 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 4211 sock_release(ctx->ring_sock);
355e8d26 4212 }
2b188cc1
JA
4213#endif
4214
75b28aff 4215 io_mem_free(ctx->rings);
2b188cc1 4216 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
4217
4218 percpu_ref_exit(&ctx->refs);
4219 if (ctx->account_mem)
4220 io_unaccount_mem(ctx->user,
4221 ring_pages(ctx->sq_entries, ctx->cq_entries));
4222 free_uid(ctx->user);
206aefde 4223 kfree(ctx->completions);
0ddf92e8 4224 kmem_cache_free(req_cachep, ctx->fallback_req);
2b188cc1
JA
4225 kfree(ctx);
4226}
4227
4228static __poll_t io_uring_poll(struct file *file, poll_table *wait)
4229{
4230 struct io_ring_ctx *ctx = file->private_data;
4231 __poll_t mask = 0;
4232
4233 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
4234 /*
4235 * synchronizes with barrier from wq_has_sleeper call in
4236 * io_commit_cqring
4237 */
2b188cc1 4238 smp_rmb();
75b28aff
HV
4239 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
4240 ctx->rings->sq_ring_entries)
2b188cc1 4241 mask |= EPOLLOUT | EPOLLWRNORM;
daa5de54 4242 if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail)
2b188cc1
JA
4243 mask |= EPOLLIN | EPOLLRDNORM;
4244
4245 return mask;
4246}
4247
4248static int io_uring_fasync(int fd, struct file *file, int on)
4249{
4250 struct io_ring_ctx *ctx = file->private_data;
4251
4252 return fasync_helper(fd, file, on, &ctx->cq_fasync);
4253}
4254
4255static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
4256{
4257 mutex_lock(&ctx->uring_lock);
4258 percpu_ref_kill(&ctx->refs);
4259 mutex_unlock(&ctx->uring_lock);
4260
5262f567 4261 io_kill_timeouts(ctx);
221c5eb2 4262 io_poll_remove_all(ctx);
561fb04a
JA
4263
4264 if (ctx->io_wq)
4265 io_wq_cancel_all(ctx->io_wq);
4266
def596e9 4267 io_iopoll_reap_events(ctx);
1d7bb1d5 4268 io_cqring_overflow_flush(ctx, true);
206aefde 4269 wait_for_completion(&ctx->completions[0]);
2b188cc1
JA
4270 io_ring_ctx_free(ctx);
4271}
4272
4273static int io_uring_release(struct inode *inode, struct file *file)
4274{
4275 struct io_ring_ctx *ctx = file->private_data;
4276
4277 file->private_data = NULL;
4278 io_ring_ctx_wait_and_kill(ctx);
4279 return 0;
4280}
4281
fcb323cc
JA
4282static void io_uring_cancel_files(struct io_ring_ctx *ctx,
4283 struct files_struct *files)
4284{
4285 struct io_kiocb *req;
4286 DEFINE_WAIT(wait);
4287
4288 while (!list_empty_careful(&ctx->inflight_list)) {
4289 enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
768134d4 4290 struct io_kiocb *cancel_req = NULL;
fcb323cc
JA
4291
4292 spin_lock_irq(&ctx->inflight_lock);
4293 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
768134d4
JA
4294 if (req->work.files != files)
4295 continue;
4296 /* req is being completed, ignore */
4297 if (!refcount_inc_not_zero(&req->refs))
4298 continue;
4299 cancel_req = req;
4300 break;
fcb323cc 4301 }
768134d4 4302 if (cancel_req)
fcb323cc 4303 prepare_to_wait(&ctx->inflight_wait, &wait,
768134d4 4304 TASK_UNINTERRUPTIBLE);
fcb323cc
JA
4305 spin_unlock_irq(&ctx->inflight_lock);
4306
768134d4
JA
4307 if (cancel_req) {
4308 ret = io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
4309 io_put_req(cancel_req);
4310 }
4311
4312 /* We need to keep going until we don't find a matching req */
4313 if (!cancel_req)
fcb323cc
JA
4314 break;
4315 schedule();
4316 }
768134d4 4317 finish_wait(&ctx->inflight_wait, &wait);
fcb323cc
JA
4318}
4319
4320static int io_uring_flush(struct file *file, void *data)
4321{
4322 struct io_ring_ctx *ctx = file->private_data;
4323
4324 io_uring_cancel_files(ctx, data);
1d7bb1d5
JA
4325 if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
4326 io_cqring_overflow_flush(ctx, true);
fcb323cc 4327 io_wq_cancel_all(ctx->io_wq);
1d7bb1d5 4328 }
fcb323cc
JA
4329 return 0;
4330}
4331
2b188cc1
JA
4332static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
4333{
4334 loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
4335 unsigned long sz = vma->vm_end - vma->vm_start;
4336 struct io_ring_ctx *ctx = file->private_data;
4337 unsigned long pfn;
4338 struct page *page;
4339 void *ptr;
4340
4341 switch (offset) {
4342 case IORING_OFF_SQ_RING:
75b28aff
HV
4343 case IORING_OFF_CQ_RING:
4344 ptr = ctx->rings;
2b188cc1
JA
4345 break;
4346 case IORING_OFF_SQES:
4347 ptr = ctx->sq_sqes;
4348 break;
2b188cc1
JA
4349 default:
4350 return -EINVAL;
4351 }
4352
4353 page = virt_to_head_page(ptr);
a50b854e 4354 if (sz > page_size(page))
2b188cc1
JA
4355 return -EINVAL;
4356
4357 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
4358 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
4359}
4360
4361SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
4362 u32, min_complete, u32, flags, const sigset_t __user *, sig,
4363 size_t, sigsz)
4364{
4365 struct io_ring_ctx *ctx;
4366 long ret = -EBADF;
4367 int submitted = 0;
4368 struct fd f;
4369
6c271ce2 4370 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
2b188cc1
JA
4371 return -EINVAL;
4372
4373 f = fdget(fd);
4374 if (!f.file)
4375 return -EBADF;
4376
4377 ret = -EOPNOTSUPP;
4378 if (f.file->f_op != &io_uring_fops)
4379 goto out_fput;
4380
4381 ret = -ENXIO;
4382 ctx = f.file->private_data;
4383 if (!percpu_ref_tryget(&ctx->refs))
4384 goto out_fput;
4385
6c271ce2
JA
4386 /*
4387 * For SQ polling, the thread will do all submissions and completions.
4388 * Just return the requested submit count, and wake the thread if
4389 * we were asked to.
4390 */
b2a9eada 4391 ret = 0;
6c271ce2 4392 if (ctx->flags & IORING_SETUP_SQPOLL) {
c1edbf5f
JA
4393 if (!list_empty_careful(&ctx->cq_overflow_list))
4394 io_cqring_overflow_flush(ctx, false);
6c271ce2
JA
4395 if (flags & IORING_ENTER_SQ_WAKEUP)
4396 wake_up(&ctx->sqo_wait);
4397 submitted = to_submit;
b2a9eada 4398 } else if (to_submit) {
ae9428ca 4399 struct mm_struct *cur_mm;
2b188cc1 4400
ae9428ca 4401 to_submit = min(to_submit, ctx->sq_entries);
2b188cc1 4402 mutex_lock(&ctx->uring_lock);
ae9428ca
PB
4403 /* already have mm, so io_submit_sqes() won't try to grab it */
4404 cur_mm = ctx->sqo_mm;
4405 submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
4406 &cur_mm, false);
2b188cc1 4407 mutex_unlock(&ctx->uring_lock);
2b188cc1
JA
4408 }
4409 if (flags & IORING_ENTER_GETEVENTS) {
def596e9
JA
4410 unsigned nr_events = 0;
4411
2b188cc1
JA
4412 min_complete = min(min_complete, ctx->cq_entries);
4413
def596e9 4414 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9 4415 ret = io_iopoll_check(ctx, &nr_events, min_complete);
def596e9
JA
4416 } else {
4417 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
4418 }
2b188cc1
JA
4419 }
4420
6805b32e 4421 percpu_ref_put(&ctx->refs);
2b188cc1
JA
4422out_fput:
4423 fdput(f);
4424 return submitted ? submitted : ret;
4425}
4426
4427static const struct file_operations io_uring_fops = {
4428 .release = io_uring_release,
fcb323cc 4429 .flush = io_uring_flush,
2b188cc1
JA
4430 .mmap = io_uring_mmap,
4431 .poll = io_uring_poll,
4432 .fasync = io_uring_fasync,
4433};
4434
4435static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
4436 struct io_uring_params *p)
4437{
75b28aff
HV
4438 struct io_rings *rings;
4439 size_t size, sq_array_offset;
2b188cc1 4440
75b28aff
HV
4441 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
4442 if (size == SIZE_MAX)
4443 return -EOVERFLOW;
4444
4445 rings = io_mem_alloc(size);
4446 if (!rings)
2b188cc1
JA
4447 return -ENOMEM;
4448
75b28aff
HV
4449 ctx->rings = rings;
4450 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
4451 rings->sq_ring_mask = p->sq_entries - 1;
4452 rings->cq_ring_mask = p->cq_entries - 1;
4453 rings->sq_ring_entries = p->sq_entries;
4454 rings->cq_ring_entries = p->cq_entries;
4455 ctx->sq_mask = rings->sq_ring_mask;
4456 ctx->cq_mask = rings->cq_ring_mask;
4457 ctx->sq_entries = rings->sq_ring_entries;
4458 ctx->cq_entries = rings->cq_ring_entries;
2b188cc1
JA
4459
4460 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
4461 if (size == SIZE_MAX)
4462 return -EOVERFLOW;
4463
4464 ctx->sq_sqes = io_mem_alloc(size);
52e04ef4 4465 if (!ctx->sq_sqes)
2b188cc1 4466 return -ENOMEM;
2b188cc1 4467
2b188cc1
JA
4468 return 0;
4469}
4470
4471/*
4472 * Allocate an anonymous fd, this is what constitutes the application
4473 * visible backing of an io_uring instance. The application mmaps this
4474 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
4475 * we have to tie this fd to a socket for file garbage collection purposes.
4476 */
4477static int io_uring_get_fd(struct io_ring_ctx *ctx)
4478{
4479 struct file *file;
4480 int ret;
4481
4482#if defined(CONFIG_UNIX)
4483 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
4484 &ctx->ring_sock);
4485 if (ret)
4486 return ret;
4487#endif
4488
4489 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
4490 if (ret < 0)
4491 goto err;
4492
4493 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
4494 O_RDWR | O_CLOEXEC);
4495 if (IS_ERR(file)) {
4496 put_unused_fd(ret);
4497 ret = PTR_ERR(file);
4498 goto err;
4499 }
4500
4501#if defined(CONFIG_UNIX)
4502 ctx->ring_sock->file = file;
6b06314c 4503 ctx->ring_sock->sk->sk_user_data = ctx;
2b188cc1
JA
4504#endif
4505 fd_install(ret, file);
4506 return ret;
4507err:
4508#if defined(CONFIG_UNIX)
4509 sock_release(ctx->ring_sock);
4510 ctx->ring_sock = NULL;
4511#endif
4512 return ret;
4513}
4514
4515static int io_uring_create(unsigned entries, struct io_uring_params *p)
4516{
4517 struct user_struct *user = NULL;
4518 struct io_ring_ctx *ctx;
4519 bool account_mem;
4520 int ret;
4521
4522 if (!entries || entries > IORING_MAX_ENTRIES)
4523 return -EINVAL;
4524
4525 /*
4526 * Use twice as many entries for the CQ ring. It's possible for the
4527 * application to drive a higher depth than the size of the SQ ring,
4528 * since the sqes are only used at submission time. This allows for
33a107f0
JA
4529 * some flexibility in overcommitting a bit. If the application has
4530 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
4531 * of CQ ring entries manually.
2b188cc1
JA
4532 */
4533 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
4534 if (p->flags & IORING_SETUP_CQSIZE) {
4535 /*
4536 * If IORING_SETUP_CQSIZE is set, we do the same roundup
4537 * to a power-of-two, if it isn't already. We do NOT impose
4538 * any cq vs sq ring sizing.
4539 */
4540 if (p->cq_entries < p->sq_entries || p->cq_entries > IORING_MAX_CQ_ENTRIES)
4541 return -EINVAL;
4542 p->cq_entries = roundup_pow_of_two(p->cq_entries);
4543 } else {
4544 p->cq_entries = 2 * p->sq_entries;
4545 }
2b188cc1
JA
4546
4547 user = get_uid(current_user());
4548 account_mem = !capable(CAP_IPC_LOCK);
4549
4550 if (account_mem) {
4551 ret = io_account_mem(user,
4552 ring_pages(p->sq_entries, p->cq_entries));
4553 if (ret) {
4554 free_uid(user);
4555 return ret;
4556 }
4557 }
4558
4559 ctx = io_ring_ctx_alloc(p);
4560 if (!ctx) {
4561 if (account_mem)
4562 io_unaccount_mem(user, ring_pages(p->sq_entries,
4563 p->cq_entries));
4564 free_uid(user);
4565 return -ENOMEM;
4566 }
4567 ctx->compat = in_compat_syscall();
4568 ctx->account_mem = account_mem;
4569 ctx->user = user;
4570
4571 ret = io_allocate_scq_urings(ctx, p);
4572 if (ret)
4573 goto err;
4574
6c271ce2 4575 ret = io_sq_offload_start(ctx, p);
2b188cc1
JA
4576 if (ret)
4577 goto err;
4578
2b188cc1 4579 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
4580 p->sq_off.head = offsetof(struct io_rings, sq.head);
4581 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
4582 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
4583 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
4584 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
4585 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
4586 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
4587
4588 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
4589 p->cq_off.head = offsetof(struct io_rings, cq.head);
4590 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
4591 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
4592 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
4593 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
4594 p->cq_off.cqes = offsetof(struct io_rings, cqes);
ac90f249 4595
044c1ab3
JA
4596 /*
4597 * Install ring fd as the very last thing, so we don't risk someone
4598 * having closed it before we finish setup
4599 */
4600 ret = io_uring_get_fd(ctx);
4601 if (ret < 0)
4602 goto err;
4603
1d7bb1d5 4604 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP;
c826bd7a 4605 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
4606 return ret;
4607err:
4608 io_ring_ctx_wait_and_kill(ctx);
4609 return ret;
4610}
4611
4612/*
4613 * Sets up an aio uring context, and returns the fd. Applications asks for a
4614 * ring size, we return the actual sq/cq ring sizes (among other things) in the
4615 * params structure passed in.
4616 */
4617static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
4618{
4619 struct io_uring_params p;
4620 long ret;
4621 int i;
4622
4623 if (copy_from_user(&p, params, sizeof(p)))
4624 return -EFAULT;
4625 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
4626 if (p.resv[i])
4627 return -EINVAL;
4628 }
4629
6c271ce2 4630 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
33a107f0 4631 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE))
2b188cc1
JA
4632 return -EINVAL;
4633
4634 ret = io_uring_create(entries, &p);
4635 if (ret < 0)
4636 return ret;
4637
4638 if (copy_to_user(params, &p, sizeof(p)))
4639 return -EFAULT;
4640
4641 return ret;
4642}
4643
4644SYSCALL_DEFINE2(io_uring_setup, u32, entries,
4645 struct io_uring_params __user *, params)
4646{
4647 return io_uring_setup(entries, params);
4648}
4649
edafccee
JA
4650static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
4651 void __user *arg, unsigned nr_args)
b19062a5
JA
4652 __releases(ctx->uring_lock)
4653 __acquires(ctx->uring_lock)
edafccee
JA
4654{
4655 int ret;
4656
35fa71a0
JA
4657 /*
4658 * We're inside the ring mutex, if the ref is already dying, then
4659 * someone else killed the ctx or is already going through
4660 * io_uring_register().
4661 */
4662 if (percpu_ref_is_dying(&ctx->refs))
4663 return -ENXIO;
4664
edafccee 4665 percpu_ref_kill(&ctx->refs);
b19062a5
JA
4666
4667 /*
4668 * Drop uring mutex before waiting for references to exit. If another
4669 * thread is currently inside io_uring_enter() it might need to grab
4670 * the uring_lock to make progress. If we hold it here across the drain
4671 * wait, then we can deadlock. It's safe to drop the mutex here, since
4672 * no new references will come in after we've killed the percpu ref.
4673 */
4674 mutex_unlock(&ctx->uring_lock);
206aefde 4675 wait_for_completion(&ctx->completions[0]);
b19062a5 4676 mutex_lock(&ctx->uring_lock);
edafccee
JA
4677
4678 switch (opcode) {
4679 case IORING_REGISTER_BUFFERS:
4680 ret = io_sqe_buffer_register(ctx, arg, nr_args);
4681 break;
4682 case IORING_UNREGISTER_BUFFERS:
4683 ret = -EINVAL;
4684 if (arg || nr_args)
4685 break;
4686 ret = io_sqe_buffer_unregister(ctx);
4687 break;
6b06314c
JA
4688 case IORING_REGISTER_FILES:
4689 ret = io_sqe_files_register(ctx, arg, nr_args);
4690 break;
4691 case IORING_UNREGISTER_FILES:
4692 ret = -EINVAL;
4693 if (arg || nr_args)
4694 break;
4695 ret = io_sqe_files_unregister(ctx);
4696 break;
c3a31e60
JA
4697 case IORING_REGISTER_FILES_UPDATE:
4698 ret = io_sqe_files_update(ctx, arg, nr_args);
4699 break;
9b402849
JA
4700 case IORING_REGISTER_EVENTFD:
4701 ret = -EINVAL;
4702 if (nr_args != 1)
4703 break;
4704 ret = io_eventfd_register(ctx, arg);
4705 break;
4706 case IORING_UNREGISTER_EVENTFD:
4707 ret = -EINVAL;
4708 if (arg || nr_args)
4709 break;
4710 ret = io_eventfd_unregister(ctx);
4711 break;
edafccee
JA
4712 default:
4713 ret = -EINVAL;
4714 break;
4715 }
4716
4717 /* bring the ctx back to life */
206aefde 4718 reinit_completion(&ctx->completions[0]);
edafccee
JA
4719 percpu_ref_reinit(&ctx->refs);
4720 return ret;
4721}
4722
4723SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
4724 void __user *, arg, unsigned int, nr_args)
4725{
4726 struct io_ring_ctx *ctx;
4727 long ret = -EBADF;
4728 struct fd f;
4729
4730 f = fdget(fd);
4731 if (!f.file)
4732 return -EBADF;
4733
4734 ret = -EOPNOTSUPP;
4735 if (f.file->f_op != &io_uring_fops)
4736 goto out_fput;
4737
4738 ctx = f.file->private_data;
4739
4740 mutex_lock(&ctx->uring_lock);
4741 ret = __io_uring_register(ctx, opcode, arg, nr_args);
4742 mutex_unlock(&ctx->uring_lock);
c826bd7a
DD
4743 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
4744 ctx->cq_ev_fd != NULL, ret);
edafccee
JA
4745out_fput:
4746 fdput(f);
4747 return ret;
4748}
4749
2b188cc1
JA
4750static int __init io_uring_init(void)
4751{
4752 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
4753 return 0;
4754};
4755__initcall(io_uring_init);