io-wq: remove now redundant struct io_wq_nulls_list
[linux-2.6-block.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
47#include <linux/refcount.h>
48#include <linux/uio.h>
49
50#include <linux/sched/signal.h>
51#include <linux/fs.h>
52#include <linux/file.h>
53#include <linux/fdtable.h>
54#include <linux/mm.h>
55#include <linux/mman.h>
56#include <linux/mmu_context.h>
57#include <linux/percpu.h>
58#include <linux/slab.h>
6c271ce2 59#include <linux/kthread.h>
2b188cc1 60#include <linux/blkdev.h>
edafccee 61#include <linux/bvec.h>
2b188cc1
JA
62#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
6b06314c 65#include <net/scm.h>
2b188cc1
JA
66#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
edafccee
JA
70#include <linux/sizes.h>
71#include <linux/hugetlb.h>
2b188cc1 72
c826bd7a
DD
73#define CREATE_TRACE_POINTS
74#include <trace/events/io_uring.h>
75
2b188cc1
JA
76#include <uapi/linux/io_uring.h>
77
78#include "internal.h"
561fb04a 79#include "io-wq.h"
2b188cc1 80
5277deaa 81#define IORING_MAX_ENTRIES 32768
33a107f0 82#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
65e19f54
JA
83
84/*
85 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
86 */
87#define IORING_FILE_TABLE_SHIFT 9
88#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
89#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
90#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
2b188cc1
JA
91
92struct io_uring {
93 u32 head ____cacheline_aligned_in_smp;
94 u32 tail ____cacheline_aligned_in_smp;
95};
96
1e84b97b 97/*
75b28aff
HV
98 * This data is shared with the application through the mmap at offsets
99 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
100 *
101 * The offsets to the member fields are published through struct
102 * io_sqring_offsets when calling io_uring_setup.
103 */
75b28aff 104struct io_rings {
1e84b97b
SB
105 /*
106 * Head and tail offsets into the ring; the offsets need to be
107 * masked to get valid indices.
108 *
75b28aff
HV
109 * The kernel controls head of the sq ring and the tail of the cq ring,
110 * and the application controls tail of the sq ring and the head of the
111 * cq ring.
1e84b97b 112 */
75b28aff 113 struct io_uring sq, cq;
1e84b97b 114 /*
75b28aff 115 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
116 * ring_entries - 1)
117 */
75b28aff
HV
118 u32 sq_ring_mask, cq_ring_mask;
119 /* Ring sizes (constant, power of 2) */
120 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
121 /*
122 * Number of invalid entries dropped by the kernel due to
123 * invalid index stored in array
124 *
125 * Written by the kernel, shouldn't be modified by the
126 * application (i.e. get number of "new events" by comparing to
127 * cached value).
128 *
129 * After a new SQ head value was read by the application this
130 * counter includes all submissions that were dropped reaching
131 * the new SQ head (and possibly more).
132 */
75b28aff 133 u32 sq_dropped;
1e84b97b
SB
134 /*
135 * Runtime flags
136 *
137 * Written by the kernel, shouldn't be modified by the
138 * application.
139 *
140 * The application needs a full memory barrier before checking
141 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
142 */
75b28aff 143 u32 sq_flags;
1e84b97b
SB
144 /*
145 * Number of completion events lost because the queue was full;
146 * this should be avoided by the application by making sure
147 * there are not more requests pending thatn there is space in
148 * the completion queue.
149 *
150 * Written by the kernel, shouldn't be modified by the
151 * application (i.e. get number of "new events" by comparing to
152 * cached value).
153 *
154 * As completion events come in out of order this counter is not
155 * ordered with any other data.
156 */
75b28aff 157 u32 cq_overflow;
1e84b97b
SB
158 /*
159 * Ring buffer of completion events.
160 *
161 * The kernel writes completion events fresh every time they are
162 * produced, so the application is allowed to modify pending
163 * entries.
164 */
75b28aff 165 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
166};
167
edafccee
JA
168struct io_mapped_ubuf {
169 u64 ubuf;
170 size_t len;
171 struct bio_vec *bvec;
172 unsigned int nr_bvecs;
173};
174
65e19f54
JA
175struct fixed_file_table {
176 struct file **files;
177};
178
2b188cc1
JA
179struct io_ring_ctx {
180 struct {
181 struct percpu_ref refs;
182 } ____cacheline_aligned_in_smp;
183
184 struct {
185 unsigned int flags;
186 bool compat;
187 bool account_mem;
1d7bb1d5 188 bool cq_overflow_flushed;
2b188cc1 189
75b28aff
HV
190 /*
191 * Ring buffer of indices into array of io_uring_sqe, which is
192 * mmapped by the application using the IORING_OFF_SQES offset.
193 *
194 * This indirection could e.g. be used to assign fixed
195 * io_uring_sqe entries to operations and only submit them to
196 * the queue when needed.
197 *
198 * The kernel modifies neither the indices array nor the entries
199 * array.
200 */
201 u32 *sq_array;
2b188cc1
JA
202 unsigned cached_sq_head;
203 unsigned sq_entries;
204 unsigned sq_mask;
6c271ce2 205 unsigned sq_thread_idle;
498ccd9e 206 unsigned cached_sq_dropped;
206aefde 207 atomic_t cached_cq_overflow;
2b188cc1 208 struct io_uring_sqe *sq_sqes;
de0617e4
JA
209
210 struct list_head defer_list;
5262f567 211 struct list_head timeout_list;
1d7bb1d5 212 struct list_head cq_overflow_list;
fcb323cc
JA
213
214 wait_queue_head_t inflight_wait;
2b188cc1
JA
215 } ____cacheline_aligned_in_smp;
216
206aefde
JA
217 struct io_rings *rings;
218
2b188cc1 219 /* IO offload */
561fb04a 220 struct io_wq *io_wq;
6c271ce2 221 struct task_struct *sqo_thread; /* if using sq thread polling */
2b188cc1 222 struct mm_struct *sqo_mm;
6c271ce2 223 wait_queue_head_t sqo_wait;
75b28aff 224
6b06314c
JA
225 /*
226 * If used, fixed file set. Writers must ensure that ->refs is dead,
227 * readers must ensure that ->refs is alive as long as the file* is
228 * used. Only updated through io_uring_register(2).
229 */
65e19f54 230 struct fixed_file_table *file_table;
6b06314c
JA
231 unsigned nr_user_files;
232
edafccee
JA
233 /* if used, fixed mapped user buffers */
234 unsigned nr_user_bufs;
235 struct io_mapped_ubuf *user_bufs;
236
2b188cc1
JA
237 struct user_struct *user;
238
206aefde
JA
239 /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */
240 struct completion *completions;
241
0ddf92e8
JA
242 /* if all else fails... */
243 struct io_kiocb *fallback_req;
244
206aefde
JA
245#if defined(CONFIG_UNIX)
246 struct socket *ring_sock;
247#endif
248
249 struct {
250 unsigned cached_cq_tail;
251 unsigned cq_entries;
252 unsigned cq_mask;
253 atomic_t cq_timeouts;
254 struct wait_queue_head cq_wait;
255 struct fasync_struct *cq_fasync;
256 struct eventfd_ctx *cq_ev_fd;
257 } ____cacheline_aligned_in_smp;
2b188cc1
JA
258
259 struct {
260 struct mutex uring_lock;
261 wait_queue_head_t wait;
262 } ____cacheline_aligned_in_smp;
263
264 struct {
265 spinlock_t completion_lock;
def596e9
JA
266 bool poll_multi_file;
267 /*
268 * ->poll_list is protected by the ctx->uring_lock for
269 * io_uring instances that don't use IORING_SETUP_SQPOLL.
270 * For SQPOLL, only the single threaded io_sq_thread() will
271 * manipulate the list, hence no extra locking is needed there.
272 */
273 struct list_head poll_list;
221c5eb2 274 struct list_head cancel_list;
fcb323cc
JA
275
276 spinlock_t inflight_lock;
277 struct list_head inflight_list;
2b188cc1 278 } ____cacheline_aligned_in_smp;
2b188cc1
JA
279};
280
281struct sqe_submit {
282 const struct io_uring_sqe *sqe;
fcb323cc
JA
283 struct file *ring_file;
284 int ring_fd;
8776f3fa 285 u32 sequence;
2b188cc1 286 bool has_user;
ba5290cc 287 bool in_async;
6c271ce2 288 bool needs_fixed_file;
2b188cc1
JA
289};
290
09bb8394
JA
291/*
292 * First field must be the file pointer in all the
293 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
294 */
221c5eb2
JA
295struct io_poll_iocb {
296 struct file *file;
297 struct wait_queue_head *head;
298 __poll_t events;
8c838788 299 bool done;
221c5eb2
JA
300 bool canceled;
301 struct wait_queue_entry wait;
302};
303
5262f567
JA
304struct io_timeout {
305 struct file *file;
306 struct hrtimer timer;
307};
308
09bb8394
JA
309/*
310 * NOTE! Each of the iocb union members has the file pointer
311 * as the first entry in their struct definition. So you can
312 * access the file pointer through any of the sub-structs,
313 * or directly as just 'ki_filp' in this struct.
314 */
2b188cc1 315struct io_kiocb {
221c5eb2 316 union {
09bb8394 317 struct file *file;
221c5eb2
JA
318 struct kiocb rw;
319 struct io_poll_iocb poll;
5262f567 320 struct io_timeout timeout;
221c5eb2 321 };
2b188cc1
JA
322
323 struct sqe_submit submit;
324
325 struct io_ring_ctx *ctx;
326 struct list_head list;
9e645e11 327 struct list_head link_list;
2b188cc1 328 unsigned int flags;
c16361c1 329 refcount_t refs;
8449eeda 330#define REQ_F_NOWAIT 1 /* must not punt to workers */
def596e9 331#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
6b06314c 332#define REQ_F_FIXED_FILE 4 /* ctx owns file */
31b51510 333#define REQ_F_SEQ_PREV 8 /* sequential with previous */
e2033e33
SB
334#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
335#define REQ_F_IO_DRAINED 32 /* drain done */
9e645e11 336#define REQ_F_LINK 64 /* linked sqes */
2665abfd 337#define REQ_F_LINK_TIMEOUT 128 /* has linked timeout */
f7b76ac9 338#define REQ_F_FAIL_LINK 256 /* fail rest of links */
4fe2c963 339#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
5262f567 340#define REQ_F_TIMEOUT 1024 /* timeout request */
491381ce
JA
341#define REQ_F_ISREG 2048 /* regular file */
342#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
fcb323cc 343#define REQ_F_INFLIGHT 8192 /* on inflight list */
7c9e7f0f 344#define REQ_F_COMP_LOCKED 16384 /* completion under lock */
2b188cc1 345 u64 user_data;
9e645e11 346 u32 result;
de0617e4 347 u32 sequence;
2b188cc1 348
fcb323cc
JA
349 struct list_head inflight_entry;
350
561fb04a 351 struct io_wq_work work;
2b188cc1
JA
352};
353
354#define IO_PLUG_THRESHOLD 2
def596e9 355#define IO_IOPOLL_BATCH 8
2b188cc1 356
9a56a232
JA
357struct io_submit_state {
358 struct blk_plug plug;
359
2579f913
JA
360 /*
361 * io_kiocb alloc cache
362 */
363 void *reqs[IO_IOPOLL_BATCH];
364 unsigned int free_reqs;
365 unsigned int cur_req;
366
9a56a232
JA
367 /*
368 * File reference cache
369 */
370 struct file *file;
371 unsigned int fd;
372 unsigned int has_refs;
373 unsigned int used_refs;
374 unsigned int ios_left;
375};
376
561fb04a 377static void io_wq_submit_work(struct io_wq_work **workptr);
78e19bbe 378static void io_cqring_fill_event(struct io_kiocb *req, long res);
4fe2c963 379static void __io_free_req(struct io_kiocb *req);
ec9c02ad 380static void io_put_req(struct io_kiocb *req);
78e19bbe 381static void io_double_put_req(struct io_kiocb *req);
de0617e4 382
2b188cc1
JA
383static struct kmem_cache *req_cachep;
384
385static const struct file_operations io_uring_fops;
386
387struct sock *io_uring_get_socket(struct file *file)
388{
389#if defined(CONFIG_UNIX)
390 if (file->f_op == &io_uring_fops) {
391 struct io_ring_ctx *ctx = file->private_data;
392
393 return ctx->ring_sock->sk;
394 }
395#endif
396 return NULL;
397}
398EXPORT_SYMBOL(io_uring_get_socket);
399
400static void io_ring_ctx_ref_free(struct percpu_ref *ref)
401{
402 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
403
206aefde 404 complete(&ctx->completions[0]);
2b188cc1
JA
405}
406
407static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
408{
409 struct io_ring_ctx *ctx;
410
411 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
412 if (!ctx)
413 return NULL;
414
0ddf92e8
JA
415 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
416 if (!ctx->fallback_req)
417 goto err;
418
206aefde
JA
419 ctx->completions = kmalloc(2 * sizeof(struct completion), GFP_KERNEL);
420 if (!ctx->completions)
421 goto err;
422
21482896 423 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
424 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
425 goto err;
2b188cc1
JA
426
427 ctx->flags = p->flags;
428 init_waitqueue_head(&ctx->cq_wait);
1d7bb1d5 429 INIT_LIST_HEAD(&ctx->cq_overflow_list);
206aefde
JA
430 init_completion(&ctx->completions[0]);
431 init_completion(&ctx->completions[1]);
2b188cc1
JA
432 mutex_init(&ctx->uring_lock);
433 init_waitqueue_head(&ctx->wait);
434 spin_lock_init(&ctx->completion_lock);
def596e9 435 INIT_LIST_HEAD(&ctx->poll_list);
221c5eb2 436 INIT_LIST_HEAD(&ctx->cancel_list);
de0617e4 437 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 438 INIT_LIST_HEAD(&ctx->timeout_list);
fcb323cc
JA
439 init_waitqueue_head(&ctx->inflight_wait);
440 spin_lock_init(&ctx->inflight_lock);
441 INIT_LIST_HEAD(&ctx->inflight_list);
2b188cc1 442 return ctx;
206aefde 443err:
0ddf92e8
JA
444 if (ctx->fallback_req)
445 kmem_cache_free(req_cachep, ctx->fallback_req);
206aefde
JA
446 kfree(ctx->completions);
447 kfree(ctx);
448 return NULL;
2b188cc1
JA
449}
450
9d858b21 451static inline bool __req_need_defer(struct io_kiocb *req)
7adf4eaf 452{
a197f664
JL
453 struct io_ring_ctx *ctx = req->ctx;
454
498ccd9e
JA
455 return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
456 + atomic_read(&ctx->cached_cq_overflow);
7adf4eaf
JA
457}
458
9d858b21 459static inline bool req_need_defer(struct io_kiocb *req)
de0617e4 460{
9d858b21
BL
461 if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) == REQ_F_IO_DRAIN)
462 return __req_need_defer(req);
de0617e4 463
9d858b21 464 return false;
de0617e4
JA
465}
466
7adf4eaf 467static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
de0617e4
JA
468{
469 struct io_kiocb *req;
470
7adf4eaf 471 req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
9d858b21 472 if (req && !req_need_defer(req)) {
de0617e4
JA
473 list_del_init(&req->list);
474 return req;
475 }
476
477 return NULL;
478}
479
5262f567
JA
480static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
481{
7adf4eaf
JA
482 struct io_kiocb *req;
483
484 req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
9d858b21 485 if (req && !__req_need_defer(req)) {
7adf4eaf
JA
486 list_del_init(&req->list);
487 return req;
488 }
489
490 return NULL;
5262f567
JA
491}
492
de0617e4 493static void __io_commit_cqring(struct io_ring_ctx *ctx)
2b188cc1 494{
75b28aff 495 struct io_rings *rings = ctx->rings;
2b188cc1 496
75b28aff 497 if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) {
2b188cc1 498 /* order cqe stores with ring update */
75b28aff 499 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
2b188cc1 500
2b188cc1
JA
501 if (wq_has_sleeper(&ctx->cq_wait)) {
502 wake_up_interruptible(&ctx->cq_wait);
503 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
504 }
505 }
506}
507
561fb04a 508static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
18d9be1a 509{
561fb04a
JA
510 u8 opcode = READ_ONCE(sqe->opcode);
511
512 return !(opcode == IORING_OP_READ_FIXED ||
513 opcode == IORING_OP_WRITE_FIXED);
514}
515
516static inline bool io_prep_async_work(struct io_kiocb *req)
517{
518 bool do_hashed = false;
54a91f3b 519
6cc47d1d
JA
520 if (req->submit.sqe) {
521 switch (req->submit.sqe->opcode) {
522 case IORING_OP_WRITEV:
523 case IORING_OP_WRITE_FIXED:
561fb04a 524 do_hashed = true;
5f8fd2d3
JA
525 /* fall-through */
526 case IORING_OP_READV:
527 case IORING_OP_READ_FIXED:
528 case IORING_OP_SENDMSG:
529 case IORING_OP_RECVMSG:
530 case IORING_OP_ACCEPT:
531 case IORING_OP_POLL_ADD:
532 /*
533 * We know REQ_F_ISREG is not set on some of these
534 * opcodes, but this enables us to keep the check in
535 * just one place.
536 */
537 if (!(req->flags & REQ_F_ISREG))
538 req->work.flags |= IO_WQ_WORK_UNBOUND;
6cc47d1d
JA
539 break;
540 }
561fb04a
JA
541 if (io_sqe_needs_user(req->submit.sqe))
542 req->work.flags |= IO_WQ_WORK_NEEDS_USER;
54a91f3b
JA
543 }
544
561fb04a
JA
545 return do_hashed;
546}
547
a197f664 548static inline void io_queue_async_work(struct io_kiocb *req)
561fb04a
JA
549{
550 bool do_hashed = io_prep_async_work(req);
a197f664 551 struct io_ring_ctx *ctx = req->ctx;
561fb04a
JA
552
553 trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
554 req->flags);
555 if (!do_hashed) {
556 io_wq_enqueue(ctx->io_wq, &req->work);
557 } else {
558 io_wq_enqueue_hashed(ctx->io_wq, &req->work,
559 file_inode(req->file));
560 }
18d9be1a
JA
561}
562
5262f567
JA
563static void io_kill_timeout(struct io_kiocb *req)
564{
565 int ret;
566
567 ret = hrtimer_try_to_cancel(&req->timeout.timer);
568 if (ret != -1) {
569 atomic_inc(&req->ctx->cq_timeouts);
842f9612 570 list_del_init(&req->list);
78e19bbe 571 io_cqring_fill_event(req, 0);
ec9c02ad 572 io_put_req(req);
5262f567
JA
573 }
574}
575
576static void io_kill_timeouts(struct io_ring_ctx *ctx)
577{
578 struct io_kiocb *req, *tmp;
579
580 spin_lock_irq(&ctx->completion_lock);
581 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
582 io_kill_timeout(req);
583 spin_unlock_irq(&ctx->completion_lock);
584}
585
de0617e4
JA
586static void io_commit_cqring(struct io_ring_ctx *ctx)
587{
588 struct io_kiocb *req;
589
5262f567
JA
590 while ((req = io_get_timeout_req(ctx)) != NULL)
591 io_kill_timeout(req);
592
de0617e4
JA
593 __io_commit_cqring(ctx);
594
595 while ((req = io_get_deferred_req(ctx)) != NULL) {
4fe2c963
JL
596 if (req->flags & REQ_F_SHADOW_DRAIN) {
597 /* Just for drain, free it. */
598 __io_free_req(req);
599 continue;
600 }
de0617e4 601 req->flags |= REQ_F_IO_DRAINED;
a197f664 602 io_queue_async_work(req);
de0617e4
JA
603 }
604}
605
2b188cc1
JA
606static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
607{
75b28aff 608 struct io_rings *rings = ctx->rings;
2b188cc1
JA
609 unsigned tail;
610
611 tail = ctx->cached_cq_tail;
115e12e5
SB
612 /*
613 * writes to the cq entry need to come after reading head; the
614 * control dependency is enough as we're using WRITE_ONCE to
615 * fill the cq entry
616 */
75b28aff 617 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
2b188cc1
JA
618 return NULL;
619
620 ctx->cached_cq_tail++;
75b28aff 621 return &rings->cqes[tail & ctx->cq_mask];
2b188cc1
JA
622}
623
1d7bb1d5
JA
624static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
625{
626 if (waitqueue_active(&ctx->wait))
627 wake_up(&ctx->wait);
628 if (waitqueue_active(&ctx->sqo_wait))
629 wake_up(&ctx->sqo_wait);
630 if (ctx->cq_ev_fd)
631 eventfd_signal(ctx->cq_ev_fd, 1);
632}
633
634static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
635{
636 struct io_rings *rings = ctx->rings;
637 struct io_uring_cqe *cqe;
638 struct io_kiocb *req;
639 unsigned long flags;
640 LIST_HEAD(list);
641
642 if (!force) {
643 if (list_empty_careful(&ctx->cq_overflow_list))
644 return;
645 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
646 rings->cq_ring_entries))
647 return;
648 }
649
650 spin_lock_irqsave(&ctx->completion_lock, flags);
651
652 /* if force is set, the ring is going away. always drop after that */
653 if (force)
654 ctx->cq_overflow_flushed = true;
655
656 while (!list_empty(&ctx->cq_overflow_list)) {
657 cqe = io_get_cqring(ctx);
658 if (!cqe && !force)
659 break;
660
661 req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
662 list);
663 list_move(&req->list, &list);
664 if (cqe) {
665 WRITE_ONCE(cqe->user_data, req->user_data);
666 WRITE_ONCE(cqe->res, req->result);
667 WRITE_ONCE(cqe->flags, 0);
668 } else {
669 WRITE_ONCE(ctx->rings->cq_overflow,
670 atomic_inc_return(&ctx->cached_cq_overflow));
671 }
672 }
673
674 io_commit_cqring(ctx);
675 spin_unlock_irqrestore(&ctx->completion_lock, flags);
676 io_cqring_ev_posted(ctx);
677
678 while (!list_empty(&list)) {
679 req = list_first_entry(&list, struct io_kiocb, list);
680 list_del(&req->list);
ec9c02ad 681 io_put_req(req);
1d7bb1d5
JA
682 }
683}
684
78e19bbe 685static void io_cqring_fill_event(struct io_kiocb *req, long res)
2b188cc1 686{
78e19bbe 687 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
688 struct io_uring_cqe *cqe;
689
78e19bbe 690 trace_io_uring_complete(ctx, req->user_data, res);
51c3ff62 691
2b188cc1
JA
692 /*
693 * If we can't get a cq entry, userspace overflowed the
694 * submission (by quite a lot). Increment the overflow count in
695 * the ring.
696 */
697 cqe = io_get_cqring(ctx);
1d7bb1d5 698 if (likely(cqe)) {
78e19bbe 699 WRITE_ONCE(cqe->user_data, req->user_data);
2b188cc1 700 WRITE_ONCE(cqe->res, res);
c71ffb67 701 WRITE_ONCE(cqe->flags, 0);
1d7bb1d5 702 } else if (ctx->cq_overflow_flushed) {
498ccd9e
JA
703 WRITE_ONCE(ctx->rings->cq_overflow,
704 atomic_inc_return(&ctx->cached_cq_overflow));
1d7bb1d5
JA
705 } else {
706 refcount_inc(&req->refs);
707 req->result = res;
708 list_add_tail(&req->list, &ctx->cq_overflow_list);
2b188cc1
JA
709 }
710}
711
78e19bbe 712static void io_cqring_add_event(struct io_kiocb *req, long res)
2b188cc1 713{
78e19bbe 714 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
715 unsigned long flags;
716
717 spin_lock_irqsave(&ctx->completion_lock, flags);
78e19bbe 718 io_cqring_fill_event(req, res);
2b188cc1
JA
719 io_commit_cqring(ctx);
720 spin_unlock_irqrestore(&ctx->completion_lock, flags);
721
8c838788 722 io_cqring_ev_posted(ctx);
2b188cc1
JA
723}
724
0ddf92e8
JA
725static inline bool io_is_fallback_req(struct io_kiocb *req)
726{
727 return req == (struct io_kiocb *)
728 ((unsigned long) req->ctx->fallback_req & ~1UL);
729}
730
731static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
732{
733 struct io_kiocb *req;
734
735 req = ctx->fallback_req;
736 if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req))
737 return req;
738
739 return NULL;
740}
741
2579f913
JA
742static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
743 struct io_submit_state *state)
2b188cc1 744{
fd6fab2c 745 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2b188cc1
JA
746 struct io_kiocb *req;
747
748 if (!percpu_ref_tryget(&ctx->refs))
749 return NULL;
750
2579f913 751 if (!state) {
fd6fab2c 752 req = kmem_cache_alloc(req_cachep, gfp);
2579f913 753 if (unlikely(!req))
0ddf92e8 754 goto fallback;
2579f913
JA
755 } else if (!state->free_reqs) {
756 size_t sz;
757 int ret;
758
759 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
fd6fab2c
JA
760 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
761
762 /*
763 * Bulk alloc is all-or-nothing. If we fail to get a batch,
764 * retry single alloc to be on the safe side.
765 */
766 if (unlikely(ret <= 0)) {
767 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
768 if (!state->reqs[0])
0ddf92e8 769 goto fallback;
fd6fab2c
JA
770 ret = 1;
771 }
2579f913
JA
772 state->free_reqs = ret - 1;
773 state->cur_req = 1;
774 req = state->reqs[0];
775 } else {
776 req = state->reqs[state->cur_req];
777 state->free_reqs--;
778 state->cur_req++;
2b188cc1
JA
779 }
780
0ddf92e8 781got_it:
60c112b0 782 req->file = NULL;
2579f913
JA
783 req->ctx = ctx;
784 req->flags = 0;
e65ef56d
JA
785 /* one is dropped after submission, the other at completion */
786 refcount_set(&req->refs, 2);
9e645e11 787 req->result = 0;
561fb04a 788 INIT_IO_WORK(&req->work, io_wq_submit_work);
2579f913 789 return req;
0ddf92e8
JA
790fallback:
791 req = io_get_fallback_req(ctx);
792 if (req)
793 goto got_it;
6805b32e 794 percpu_ref_put(&ctx->refs);
2b188cc1
JA
795 return NULL;
796}
797
def596e9
JA
798static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
799{
800 if (*nr) {
801 kmem_cache_free_bulk(req_cachep, *nr, reqs);
6805b32e 802 percpu_ref_put_many(&ctx->refs, *nr);
def596e9
JA
803 *nr = 0;
804 }
805}
806
9e645e11 807static void __io_free_req(struct io_kiocb *req)
2b188cc1 808{
fcb323cc
JA
809 struct io_ring_ctx *ctx = req->ctx;
810
09bb8394
JA
811 if (req->file && !(req->flags & REQ_F_FIXED_FILE))
812 fput(req->file);
fcb323cc
JA
813 if (req->flags & REQ_F_INFLIGHT) {
814 unsigned long flags;
815
816 spin_lock_irqsave(&ctx->inflight_lock, flags);
817 list_del(&req->inflight_entry);
818 if (waitqueue_active(&ctx->inflight_wait))
819 wake_up(&ctx->inflight_wait);
820 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
821 }
822 percpu_ref_put(&ctx->refs);
0ddf92e8
JA
823 if (likely(!io_is_fallback_req(req)))
824 kmem_cache_free(req_cachep, req);
825 else
826 clear_bit_unlock(0, (unsigned long *) ctx->fallback_req);
e65ef56d
JA
827}
828
a197f664 829static bool io_link_cancel_timeout(struct io_kiocb *req)
2665abfd 830{
a197f664 831 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
832 int ret;
833
834 ret = hrtimer_try_to_cancel(&req->timeout.timer);
835 if (ret != -1) {
78e19bbe 836 io_cqring_fill_event(req, -ECANCELED);
2665abfd
JA
837 io_commit_cqring(ctx);
838 req->flags &= ~REQ_F_LINK;
ec9c02ad 839 io_put_req(req);
2665abfd
JA
840 return true;
841 }
842
843 return false;
844}
845
ba816ad6 846static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
9e645e11 847{
2665abfd 848 struct io_ring_ctx *ctx = req->ctx;
9e645e11 849 struct io_kiocb *nxt;
2665abfd 850 bool wake_ev = false;
9e645e11
JA
851
852 /*
853 * The list should never be empty when we are called here. But could
854 * potentially happen if the chain is messed up, check to be on the
855 * safe side.
856 */
857 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
2665abfd 858 while (nxt) {
76a46e06 859 list_del_init(&nxt->list);
9e645e11
JA
860 if (!list_empty(&req->link_list)) {
861 INIT_LIST_HEAD(&nxt->link_list);
862 list_splice(&req->link_list, &nxt->link_list);
863 nxt->flags |= REQ_F_LINK;
864 }
865
ba816ad6
JA
866 /*
867 * If we're in async work, we can continue processing the chain
868 * in this context instead of having to queue up new async work.
869 */
2665abfd 870 if (req->flags & REQ_F_LINK_TIMEOUT) {
a197f664 871 wake_ev = io_link_cancel_timeout(nxt);
2665abfd
JA
872
873 /* we dropped this link, get next */
874 nxt = list_first_entry_or_null(&req->link_list,
875 struct io_kiocb, list);
960e432d 876 } else if (nxtptr && io_wq_current_is_worker()) {
ba816ad6 877 *nxtptr = nxt;
2665abfd
JA
878 break;
879 } else {
a197f664 880 io_queue_async_work(nxt);
2665abfd
JA
881 break;
882 }
9e645e11 883 }
2665abfd
JA
884
885 if (wake_ev)
886 io_cqring_ev_posted(ctx);
9e645e11
JA
887}
888
889/*
890 * Called if REQ_F_LINK is set, and we fail the head request
891 */
892static void io_fail_links(struct io_kiocb *req)
893{
2665abfd 894 struct io_ring_ctx *ctx = req->ctx;
9e645e11 895 struct io_kiocb *link;
2665abfd
JA
896 unsigned long flags;
897
898 spin_lock_irqsave(&ctx->completion_lock, flags);
9e645e11
JA
899
900 while (!list_empty(&req->link_list)) {
901 link = list_first_entry(&req->link_list, struct io_kiocb, list);
2665abfd 902 list_del_init(&link->list);
9e645e11 903
c826bd7a 904 trace_io_uring_fail_link(req, link);
2665abfd
JA
905
906 if ((req->flags & REQ_F_LINK_TIMEOUT) &&
907 link->submit.sqe->opcode == IORING_OP_LINK_TIMEOUT) {
a197f664 908 io_link_cancel_timeout(link);
2665abfd 909 } else {
78e19bbe
JA
910 io_cqring_fill_event(link, -ECANCELED);
911 io_double_put_req(link);
2665abfd 912 }
9e645e11 913 }
2665abfd
JA
914
915 io_commit_cqring(ctx);
916 spin_unlock_irqrestore(&ctx->completion_lock, flags);
917 io_cqring_ev_posted(ctx);
9e645e11
JA
918}
919
c69f8dbe 920static void io_free_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
9e645e11 921{
2665abfd
JA
922 if (likely(!(req->flags & REQ_F_LINK))) {
923 __io_free_req(req);
924 return;
925 }
926
9e645e11
JA
927 /*
928 * If LINK is set, we have dependent requests in this chain. If we
929 * didn't fail this request, queue the first one up, moving any other
930 * dependencies to the next request. In case of failure, fail the rest
931 * of the chain.
932 */
2665abfd
JA
933 if (req->flags & REQ_F_FAIL_LINK) {
934 io_fail_links(req);
7c9e7f0f
JA
935 } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
936 REQ_F_LINK_TIMEOUT) {
2665abfd
JA
937 struct io_ring_ctx *ctx = req->ctx;
938 unsigned long flags;
939
940 /*
941 * If this is a timeout link, we could be racing with the
942 * timeout timer. Grab the completion lock for this case to
7c9e7f0f 943 * protect against that.
2665abfd
JA
944 */
945 spin_lock_irqsave(&ctx->completion_lock, flags);
946 io_req_link_next(req, nxt);
947 spin_unlock_irqrestore(&ctx->completion_lock, flags);
948 } else {
949 io_req_link_next(req, nxt);
9e645e11
JA
950 }
951
952 __io_free_req(req);
953}
954
c69f8dbe
JL
955static void io_free_req(struct io_kiocb *req)
956{
957 io_free_req_find_next(req, NULL);
958}
959
ba816ad6
JA
960/*
961 * Drop reference to request, return next in chain (if there is one) if this
962 * was the last reference to this request.
963 */
ec9c02ad 964static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
e65ef56d 965{
ba816ad6
JA
966 struct io_kiocb *nxt = NULL;
967
e65ef56d 968 if (refcount_dec_and_test(&req->refs))
c69f8dbe 969 io_free_req_find_next(req, &nxt);
ba816ad6 970
ba816ad6 971 if (nxt) {
561fb04a 972 if (nxtptr)
ba816ad6 973 *nxtptr = nxt;
561fb04a 974 else
a197f664 975 io_queue_async_work(nxt);
ba816ad6 976 }
2b188cc1
JA
977}
978
ec9c02ad
JL
979static void io_put_req(struct io_kiocb *req)
980{
981 if (refcount_dec_and_test(&req->refs))
c69f8dbe 982 io_free_req(req);
ec9c02ad
JL
983}
984
78e19bbe
JA
985static void io_double_put_req(struct io_kiocb *req)
986{
987 /* drop both submit and complete references */
988 if (refcount_sub_and_test(2, &req->refs))
989 __io_free_req(req);
990}
991
1d7bb1d5 992static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
a3a0e43f 993{
84f97dc2
JA
994 struct io_rings *rings = ctx->rings;
995
1d7bb1d5
JA
996 /*
997 * noflush == true is from the waitqueue handler, just ensure we wake
998 * up the task, and the next invocation will flush the entries. We
999 * cannot safely to it from here.
1000 */
1001 if (noflush && !list_empty(&ctx->cq_overflow_list))
1002 return -1U;
1003
1004 io_cqring_overflow_flush(ctx, false);
1005
a3a0e43f
JA
1006 /* See comment at the top of this file */
1007 smp_rmb();
75b28aff 1008 return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
a3a0e43f
JA
1009}
1010
fb5ccc98
PB
1011static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
1012{
1013 struct io_rings *rings = ctx->rings;
1014
1015 /* make sure SQ entry isn't read before tail */
1016 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
1017}
1018
def596e9
JA
1019/*
1020 * Find and free completed poll iocbs
1021 */
1022static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
1023 struct list_head *done)
1024{
1025 void *reqs[IO_IOPOLL_BATCH];
1026 struct io_kiocb *req;
09bb8394 1027 int to_free;
def596e9 1028
09bb8394 1029 to_free = 0;
def596e9
JA
1030 while (!list_empty(done)) {
1031 req = list_first_entry(done, struct io_kiocb, list);
1032 list_del(&req->list);
1033
78e19bbe 1034 io_cqring_fill_event(req, req->result);
def596e9
JA
1035 (*nr_events)++;
1036
09bb8394
JA
1037 if (refcount_dec_and_test(&req->refs)) {
1038 /* If we're not using fixed files, we have to pair the
1039 * completion part with the file put. Use regular
1040 * completions for those, only batch free for fixed
9e645e11 1041 * file and non-linked commands.
09bb8394 1042 */
0ddf92e8
JA
1043 if (((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
1044 REQ_F_FIXED_FILE) && !io_is_fallback_req(req)) {
09bb8394
JA
1045 reqs[to_free++] = req;
1046 if (to_free == ARRAY_SIZE(reqs))
1047 io_free_req_many(ctx, reqs, &to_free);
6b06314c 1048 } else {
c69f8dbe 1049 io_free_req(req);
6b06314c 1050 }
9a56a232 1051 }
def596e9 1052 }
def596e9 1053
09bb8394 1054 io_commit_cqring(ctx);
def596e9
JA
1055 io_free_req_many(ctx, reqs, &to_free);
1056}
1057
1058static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
1059 long min)
1060{
1061 struct io_kiocb *req, *tmp;
1062 LIST_HEAD(done);
1063 bool spin;
1064 int ret;
1065
1066 /*
1067 * Only spin for completions if we don't have multiple devices hanging
1068 * off our complete list, and we're under the requested amount.
1069 */
1070 spin = !ctx->poll_multi_file && *nr_events < min;
1071
1072 ret = 0;
1073 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
1074 struct kiocb *kiocb = &req->rw;
1075
1076 /*
1077 * Move completed entries to our local list. If we find a
1078 * request that requires polling, break out and complete
1079 * the done list first, if we have entries there.
1080 */
1081 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
1082 list_move_tail(&req->list, &done);
1083 continue;
1084 }
1085 if (!list_empty(&done))
1086 break;
1087
1088 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
1089 if (ret < 0)
1090 break;
1091
1092 if (ret && spin)
1093 spin = false;
1094 ret = 0;
1095 }
1096
1097 if (!list_empty(&done))
1098 io_iopoll_complete(ctx, nr_events, &done);
1099
1100 return ret;
1101}
1102
1103/*
1104 * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a
1105 * non-spinning poll check - we'll still enter the driver poll loop, but only
1106 * as a non-spinning completion check.
1107 */
1108static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
1109 long min)
1110{
08f5439f 1111 while (!list_empty(&ctx->poll_list) && !need_resched()) {
def596e9
JA
1112 int ret;
1113
1114 ret = io_do_iopoll(ctx, nr_events, min);
1115 if (ret < 0)
1116 return ret;
1117 if (!min || *nr_events >= min)
1118 return 0;
1119 }
1120
1121 return 1;
1122}
1123
1124/*
1125 * We can't just wait for polled events to come to us, we have to actively
1126 * find and complete them.
1127 */
1128static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
1129{
1130 if (!(ctx->flags & IORING_SETUP_IOPOLL))
1131 return;
1132
1133 mutex_lock(&ctx->uring_lock);
1134 while (!list_empty(&ctx->poll_list)) {
1135 unsigned int nr_events = 0;
1136
1137 io_iopoll_getevents(ctx, &nr_events, 1);
08f5439f
JA
1138
1139 /*
1140 * Ensure we allow local-to-the-cpu processing to take place,
1141 * in this case we need to ensure that we reap all events.
1142 */
1143 cond_resched();
def596e9
JA
1144 }
1145 mutex_unlock(&ctx->uring_lock);
1146}
1147
2b2ed975
JA
1148static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1149 long min)
def596e9 1150{
2b2ed975 1151 int iters = 0, ret = 0;
500f9fba 1152
def596e9
JA
1153 do {
1154 int tmin = 0;
1155
a3a0e43f
JA
1156 /*
1157 * Don't enter poll loop if we already have events pending.
1158 * If we do, we can potentially be spinning for commands that
1159 * already triggered a CQE (eg in error).
1160 */
1d7bb1d5 1161 if (io_cqring_events(ctx, false))
a3a0e43f
JA
1162 break;
1163
500f9fba
JA
1164 /*
1165 * If a submit got punted to a workqueue, we can have the
1166 * application entering polling for a command before it gets
1167 * issued. That app will hold the uring_lock for the duration
1168 * of the poll right here, so we need to take a breather every
1169 * now and then to ensure that the issue has a chance to add
1170 * the poll to the issued list. Otherwise we can spin here
1171 * forever, while the workqueue is stuck trying to acquire the
1172 * very same mutex.
1173 */
1174 if (!(++iters & 7)) {
1175 mutex_unlock(&ctx->uring_lock);
1176 mutex_lock(&ctx->uring_lock);
1177 }
1178
def596e9
JA
1179 if (*nr_events < min)
1180 tmin = min - *nr_events;
1181
1182 ret = io_iopoll_getevents(ctx, nr_events, tmin);
1183 if (ret <= 0)
1184 break;
1185 ret = 0;
1186 } while (min && !*nr_events && !need_resched());
1187
2b2ed975
JA
1188 return ret;
1189}
1190
1191static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1192 long min)
1193{
1194 int ret;
1195
1196 /*
1197 * We disallow the app entering submit/complete with polling, but we
1198 * still need to lock the ring to prevent racing with polled issue
1199 * that got punted to a workqueue.
1200 */
1201 mutex_lock(&ctx->uring_lock);
1202 ret = __io_iopoll_check(ctx, nr_events, min);
500f9fba 1203 mutex_unlock(&ctx->uring_lock);
def596e9
JA
1204 return ret;
1205}
1206
491381ce 1207static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 1208{
491381ce
JA
1209 /*
1210 * Tell lockdep we inherited freeze protection from submission
1211 * thread.
1212 */
1213 if (req->flags & REQ_F_ISREG) {
1214 struct inode *inode = file_inode(req->file);
2b188cc1 1215
491381ce 1216 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2b188cc1 1217 }
491381ce 1218 file_end_write(req->file);
2b188cc1
JA
1219}
1220
ba816ad6 1221static void io_complete_rw_common(struct kiocb *kiocb, long res)
2b188cc1
JA
1222{
1223 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1224
491381ce
JA
1225 if (kiocb->ki_flags & IOCB_WRITE)
1226 kiocb_end_write(req);
2b188cc1 1227
9e645e11
JA
1228 if ((req->flags & REQ_F_LINK) && res != req->result)
1229 req->flags |= REQ_F_FAIL_LINK;
78e19bbe 1230 io_cqring_add_event(req, res);
ba816ad6
JA
1231}
1232
1233static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
1234{
1235 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1236
1237 io_complete_rw_common(kiocb, res);
ec9c02ad 1238 io_put_req(req);
ba816ad6
JA
1239}
1240
1241static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
1242{
1243 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
ec9c02ad 1244 struct io_kiocb *nxt = NULL;
ba816ad6
JA
1245
1246 io_complete_rw_common(kiocb, res);
ec9c02ad
JL
1247 io_put_req_find_next(req, &nxt);
1248
1249 return nxt;
2b188cc1
JA
1250}
1251
def596e9
JA
1252static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
1253{
1254 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1255
491381ce
JA
1256 if (kiocb->ki_flags & IOCB_WRITE)
1257 kiocb_end_write(req);
def596e9 1258
9e645e11
JA
1259 if ((req->flags & REQ_F_LINK) && res != req->result)
1260 req->flags |= REQ_F_FAIL_LINK;
1261 req->result = res;
def596e9
JA
1262 if (res != -EAGAIN)
1263 req->flags |= REQ_F_IOPOLL_COMPLETED;
1264}
1265
1266/*
1267 * After the iocb has been issued, it's safe to be found on the poll list.
1268 * Adding the kiocb to the list AFTER submission ensures that we don't
1269 * find it from a io_iopoll_getevents() thread before the issuer is done
1270 * accessing the kiocb cookie.
1271 */
1272static void io_iopoll_req_issued(struct io_kiocb *req)
1273{
1274 struct io_ring_ctx *ctx = req->ctx;
1275
1276 /*
1277 * Track whether we have multiple files in our lists. This will impact
1278 * how we do polling eventually, not spinning if we're on potentially
1279 * different devices.
1280 */
1281 if (list_empty(&ctx->poll_list)) {
1282 ctx->poll_multi_file = false;
1283 } else if (!ctx->poll_multi_file) {
1284 struct io_kiocb *list_req;
1285
1286 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
1287 list);
1288 if (list_req->rw.ki_filp != req->rw.ki_filp)
1289 ctx->poll_multi_file = true;
1290 }
1291
1292 /*
1293 * For fast devices, IO may have already completed. If it has, add
1294 * it to the front so we find it first.
1295 */
1296 if (req->flags & REQ_F_IOPOLL_COMPLETED)
1297 list_add(&req->list, &ctx->poll_list);
1298 else
1299 list_add_tail(&req->list, &ctx->poll_list);
1300}
1301
3d6770fb 1302static void io_file_put(struct io_submit_state *state)
9a56a232 1303{
3d6770fb 1304 if (state->file) {
9a56a232
JA
1305 int diff = state->has_refs - state->used_refs;
1306
1307 if (diff)
1308 fput_many(state->file, diff);
1309 state->file = NULL;
1310 }
1311}
1312
1313/*
1314 * Get as many references to a file as we have IOs left in this submission,
1315 * assuming most submissions are for one file, or at least that each file
1316 * has more than one submission.
1317 */
1318static struct file *io_file_get(struct io_submit_state *state, int fd)
1319{
1320 if (!state)
1321 return fget(fd);
1322
1323 if (state->file) {
1324 if (state->fd == fd) {
1325 state->used_refs++;
1326 state->ios_left--;
1327 return state->file;
1328 }
3d6770fb 1329 io_file_put(state);
9a56a232
JA
1330 }
1331 state->file = fget_many(fd, state->ios_left);
1332 if (!state->file)
1333 return NULL;
1334
1335 state->fd = fd;
1336 state->has_refs = state->ios_left;
1337 state->used_refs = 1;
1338 state->ios_left--;
1339 return state->file;
1340}
1341
2b188cc1
JA
1342/*
1343 * If we tracked the file through the SCM inflight mechanism, we could support
1344 * any file. For now, just ensure that anything potentially problematic is done
1345 * inline.
1346 */
1347static bool io_file_supports_async(struct file *file)
1348{
1349 umode_t mode = file_inode(file)->i_mode;
1350
1351 if (S_ISBLK(mode) || S_ISCHR(mode))
1352 return true;
1353 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
1354 return true;
1355
1356 return false;
1357}
1358
267bc904 1359static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
2b188cc1 1360{
267bc904 1361 const struct io_uring_sqe *sqe = req->submit.sqe;
def596e9 1362 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 1363 struct kiocb *kiocb = &req->rw;
09bb8394
JA
1364 unsigned ioprio;
1365 int ret;
2b188cc1 1366
09bb8394
JA
1367 if (!req->file)
1368 return -EBADF;
2b188cc1 1369
491381ce
JA
1370 if (S_ISREG(file_inode(req->file)->i_mode))
1371 req->flags |= REQ_F_ISREG;
1372
1373 /*
1374 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
1375 * we know to async punt it even if it was opened O_NONBLOCK
1376 */
1377 if (force_nonblock && !io_file_supports_async(req->file)) {
1378 req->flags |= REQ_F_MUST_PUNT;
1379 return -EAGAIN;
1380 }
6b06314c 1381
2b188cc1
JA
1382 kiocb->ki_pos = READ_ONCE(sqe->off);
1383 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
1384 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
1385
1386 ioprio = READ_ONCE(sqe->ioprio);
1387 if (ioprio) {
1388 ret = ioprio_check_cap(ioprio);
1389 if (ret)
09bb8394 1390 return ret;
2b188cc1
JA
1391
1392 kiocb->ki_ioprio = ioprio;
1393 } else
1394 kiocb->ki_ioprio = get_current_ioprio();
1395
1396 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
1397 if (unlikely(ret))
09bb8394 1398 return ret;
8449eeda
SB
1399
1400 /* don't allow async punt if RWF_NOWAIT was requested */
491381ce
JA
1401 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
1402 (req->file->f_flags & O_NONBLOCK))
8449eeda
SB
1403 req->flags |= REQ_F_NOWAIT;
1404
1405 if (force_nonblock)
2b188cc1 1406 kiocb->ki_flags |= IOCB_NOWAIT;
8449eeda 1407
def596e9 1408 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
1409 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
1410 !kiocb->ki_filp->f_op->iopoll)
09bb8394 1411 return -EOPNOTSUPP;
2b188cc1 1412
def596e9
JA
1413 kiocb->ki_flags |= IOCB_HIPRI;
1414 kiocb->ki_complete = io_complete_rw_iopoll;
1415 } else {
09bb8394
JA
1416 if (kiocb->ki_flags & IOCB_HIPRI)
1417 return -EINVAL;
def596e9
JA
1418 kiocb->ki_complete = io_complete_rw;
1419 }
2b188cc1 1420 return 0;
2b188cc1
JA
1421}
1422
1423static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
1424{
1425 switch (ret) {
1426 case -EIOCBQUEUED:
1427 break;
1428 case -ERESTARTSYS:
1429 case -ERESTARTNOINTR:
1430 case -ERESTARTNOHAND:
1431 case -ERESTART_RESTARTBLOCK:
1432 /*
1433 * We can't just restart the syscall, since previously
1434 * submitted sqes may already be in progress. Just fail this
1435 * IO with EINTR.
1436 */
1437 ret = -EINTR;
1438 /* fall through */
1439 default:
1440 kiocb->ki_complete(kiocb, ret, 0);
1441 }
1442}
1443
ba816ad6
JA
1444static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
1445 bool in_async)
1446{
1447 if (in_async && ret >= 0 && nxt && kiocb->ki_complete == io_complete_rw)
1448 *nxt = __io_complete_rw(kiocb, ret);
1449 else
1450 io_rw_done(kiocb, ret);
1451}
1452
edafccee
JA
1453static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
1454 const struct io_uring_sqe *sqe,
1455 struct iov_iter *iter)
1456{
1457 size_t len = READ_ONCE(sqe->len);
1458 struct io_mapped_ubuf *imu;
1459 unsigned index, buf_index;
1460 size_t offset;
1461 u64 buf_addr;
1462
1463 /* attempt to use fixed buffers without having provided iovecs */
1464 if (unlikely(!ctx->user_bufs))
1465 return -EFAULT;
1466
1467 buf_index = READ_ONCE(sqe->buf_index);
1468 if (unlikely(buf_index >= ctx->nr_user_bufs))
1469 return -EFAULT;
1470
1471 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
1472 imu = &ctx->user_bufs[index];
1473 buf_addr = READ_ONCE(sqe->addr);
1474
1475 /* overflow */
1476 if (buf_addr + len < buf_addr)
1477 return -EFAULT;
1478 /* not inside the mapped region */
1479 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
1480 return -EFAULT;
1481
1482 /*
1483 * May not be a start of buffer, set size appropriately
1484 * and advance us to the beginning.
1485 */
1486 offset = buf_addr - imu->ubuf;
1487 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
1488
1489 if (offset) {
1490 /*
1491 * Don't use iov_iter_advance() here, as it's really slow for
1492 * using the latter parts of a big fixed buffer - it iterates
1493 * over each segment manually. We can cheat a bit here, because
1494 * we know that:
1495 *
1496 * 1) it's a BVEC iter, we set it up
1497 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1498 * first and last bvec
1499 *
1500 * So just find our index, and adjust the iterator afterwards.
1501 * If the offset is within the first bvec (or the whole first
1502 * bvec, just use iov_iter_advance(). This makes it easier
1503 * since we can just skip the first segment, which may not
1504 * be PAGE_SIZE aligned.
1505 */
1506 const struct bio_vec *bvec = imu->bvec;
1507
1508 if (offset <= bvec->bv_len) {
1509 iov_iter_advance(iter, offset);
1510 } else {
1511 unsigned long seg_skip;
1512
1513 /* skip first vec */
1514 offset -= bvec->bv_len;
1515 seg_skip = 1 + (offset >> PAGE_SHIFT);
1516
1517 iter->bvec = bvec + seg_skip;
1518 iter->nr_segs -= seg_skip;
99c79f66 1519 iter->count -= bvec->bv_len + offset;
bd11b3a3 1520 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
1521 }
1522 }
1523
edafccee
JA
1524 return 0;
1525}
1526
87e5e6da
JA
1527static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
1528 const struct sqe_submit *s, struct iovec **iovec,
1529 struct iov_iter *iter)
2b188cc1
JA
1530{
1531 const struct io_uring_sqe *sqe = s->sqe;
1532 void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1533 size_t sqe_len = READ_ONCE(sqe->len);
edafccee
JA
1534 u8 opcode;
1535
1536 /*
1537 * We're reading ->opcode for the second time, but the first read
1538 * doesn't care whether it's _FIXED or not, so it doesn't matter
1539 * whether ->opcode changes concurrently. The first read does care
1540 * about whether it is a READ or a WRITE, so we don't trust this read
1541 * for that purpose and instead let the caller pass in the read/write
1542 * flag.
1543 */
1544 opcode = READ_ONCE(sqe->opcode);
1545 if (opcode == IORING_OP_READ_FIXED ||
1546 opcode == IORING_OP_WRITE_FIXED) {
87e5e6da 1547 ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
edafccee
JA
1548 *iovec = NULL;
1549 return ret;
1550 }
2b188cc1
JA
1551
1552 if (!s->has_user)
1553 return -EFAULT;
1554
1555#ifdef CONFIG_COMPAT
1556 if (ctx->compat)
1557 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
1558 iovec, iter);
1559#endif
1560
1561 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
1562}
1563
32960613
JA
1564/*
1565 * For files that don't have ->read_iter() and ->write_iter(), handle them
1566 * by looping over ->read() or ->write() manually.
1567 */
1568static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
1569 struct iov_iter *iter)
1570{
1571 ssize_t ret = 0;
1572
1573 /*
1574 * Don't support polled IO through this interface, and we can't
1575 * support non-blocking either. For the latter, this just causes
1576 * the kiocb to be handled from an async context.
1577 */
1578 if (kiocb->ki_flags & IOCB_HIPRI)
1579 return -EOPNOTSUPP;
1580 if (kiocb->ki_flags & IOCB_NOWAIT)
1581 return -EAGAIN;
1582
1583 while (iov_iter_count(iter)) {
1584 struct iovec iovec = iov_iter_iovec(iter);
1585 ssize_t nr;
1586
1587 if (rw == READ) {
1588 nr = file->f_op->read(file, iovec.iov_base,
1589 iovec.iov_len, &kiocb->ki_pos);
1590 } else {
1591 nr = file->f_op->write(file, iovec.iov_base,
1592 iovec.iov_len, &kiocb->ki_pos);
1593 }
1594
1595 if (nr < 0) {
1596 if (!ret)
1597 ret = nr;
1598 break;
1599 }
1600 ret += nr;
1601 if (nr != iovec.iov_len)
1602 break;
1603 iov_iter_advance(iter, nr);
1604 }
1605
1606 return ret;
1607}
1608
267bc904
PB
1609static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
1610 bool force_nonblock)
2b188cc1
JA
1611{
1612 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1613 struct kiocb *kiocb = &req->rw;
1614 struct iov_iter iter;
1615 struct file *file;
31b51510 1616 size_t iov_count;
9d93a3f5 1617 ssize_t read_size, ret;
2b188cc1 1618
267bc904 1619 ret = io_prep_rw(req, force_nonblock);
2b188cc1
JA
1620 if (ret)
1621 return ret;
1622 file = kiocb->ki_filp;
1623
2b188cc1 1624 if (unlikely(!(file->f_mode & FMODE_READ)))
09bb8394 1625 return -EBADF;
2b188cc1 1626
267bc904 1627 ret = io_import_iovec(req->ctx, READ, &req->submit, &iovec, &iter);
87e5e6da 1628 if (ret < 0)
09bb8394 1629 return ret;
2b188cc1 1630
9d93a3f5 1631 read_size = ret;
9e645e11
JA
1632 if (req->flags & REQ_F_LINK)
1633 req->result = read_size;
1634
31b51510
JA
1635 iov_count = iov_iter_count(&iter);
1636 ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
2b188cc1
JA
1637 if (!ret) {
1638 ssize_t ret2;
1639
32960613
JA
1640 if (file->f_op->read_iter)
1641 ret2 = call_read_iter(file, kiocb, &iter);
1642 else
1643 ret2 = loop_rw_iter(READ, file, kiocb, &iter);
1644
9d93a3f5
JA
1645 /*
1646 * In case of a short read, punt to async. This can happen
1647 * if we have data partially cached. Alternatively we can
1648 * return the short read, in which case the application will
1649 * need to issue another SQE and wait for it. That SQE will
1650 * need async punt anyway, so it's more efficient to do it
1651 * here.
1652 */
491381ce
JA
1653 if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
1654 (req->flags & REQ_F_ISREG) &&
1655 ret2 > 0 && ret2 < read_size)
9d93a3f5
JA
1656 ret2 = -EAGAIN;
1657 /* Catch -EAGAIN return for forced non-blocking submission */
561fb04a 1658 if (!force_nonblock || ret2 != -EAGAIN)
267bc904 1659 kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
561fb04a 1660 else
2b188cc1
JA
1661 ret = -EAGAIN;
1662 }
1663 kfree(iovec);
2b188cc1
JA
1664 return ret;
1665}
1666
267bc904
PB
1667static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
1668 bool force_nonblock)
2b188cc1
JA
1669{
1670 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1671 struct kiocb *kiocb = &req->rw;
1672 struct iov_iter iter;
1673 struct file *file;
31b51510 1674 size_t iov_count;
87e5e6da 1675 ssize_t ret;
2b188cc1 1676
267bc904 1677 ret = io_prep_rw(req, force_nonblock);
2b188cc1
JA
1678 if (ret)
1679 return ret;
2b188cc1 1680
2b188cc1
JA
1681 file = kiocb->ki_filp;
1682 if (unlikely(!(file->f_mode & FMODE_WRITE)))
09bb8394 1683 return -EBADF;
2b188cc1 1684
267bc904 1685 ret = io_import_iovec(req->ctx, WRITE, &req->submit, &iovec, &iter);
87e5e6da 1686 if (ret < 0)
09bb8394 1687 return ret;
2b188cc1 1688
9e645e11
JA
1689 if (req->flags & REQ_F_LINK)
1690 req->result = ret;
1691
31b51510
JA
1692 iov_count = iov_iter_count(&iter);
1693
1694 ret = -EAGAIN;
561fb04a 1695 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT))
31b51510 1696 goto out_free;
31b51510
JA
1697
1698 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
2b188cc1 1699 if (!ret) {
9bf7933f
RP
1700 ssize_t ret2;
1701
2b188cc1
JA
1702 /*
1703 * Open-code file_start_write here to grab freeze protection,
1704 * which will be released by another thread in
1705 * io_complete_rw(). Fool lockdep by telling it the lock got
1706 * released so that it doesn't complain about the held lock when
1707 * we return to userspace.
1708 */
491381ce 1709 if (req->flags & REQ_F_ISREG) {
2b188cc1
JA
1710 __sb_start_write(file_inode(file)->i_sb,
1711 SB_FREEZE_WRITE, true);
1712 __sb_writers_release(file_inode(file)->i_sb,
1713 SB_FREEZE_WRITE);
1714 }
1715 kiocb->ki_flags |= IOCB_WRITE;
9bf7933f 1716
32960613
JA
1717 if (file->f_op->write_iter)
1718 ret2 = call_write_iter(file, kiocb, &iter);
1719 else
1720 ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
561fb04a 1721 if (!force_nonblock || ret2 != -EAGAIN)
267bc904 1722 kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
561fb04a 1723 else
9bf7933f 1724 ret = -EAGAIN;
2b188cc1 1725 }
31b51510 1726out_free:
2b188cc1 1727 kfree(iovec);
2b188cc1
JA
1728 return ret;
1729}
1730
1731/*
1732 * IORING_OP_NOP just posts a completion event, nothing else.
1733 */
78e19bbe 1734static int io_nop(struct io_kiocb *req)
2b188cc1
JA
1735{
1736 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 1737
def596e9
JA
1738 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1739 return -EINVAL;
1740
78e19bbe 1741 io_cqring_add_event(req, 0);
ec9c02ad 1742 io_put_req(req);
2b188cc1
JA
1743 return 0;
1744}
1745
c992fe29
CH
1746static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1747{
6b06314c 1748 struct io_ring_ctx *ctx = req->ctx;
c992fe29 1749
09bb8394
JA
1750 if (!req->file)
1751 return -EBADF;
c992fe29 1752
6b06314c 1753 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 1754 return -EINVAL;
edafccee 1755 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
1756 return -EINVAL;
1757
c992fe29
CH
1758 return 0;
1759}
1760
1761static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ba816ad6 1762 struct io_kiocb **nxt, bool force_nonblock)
c992fe29
CH
1763{
1764 loff_t sqe_off = READ_ONCE(sqe->off);
1765 loff_t sqe_len = READ_ONCE(sqe->len);
1766 loff_t end = sqe_off + sqe_len;
1767 unsigned fsync_flags;
1768 int ret;
1769
1770 fsync_flags = READ_ONCE(sqe->fsync_flags);
1771 if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
1772 return -EINVAL;
1773
1774 ret = io_prep_fsync(req, sqe);
1775 if (ret)
1776 return ret;
1777
1778 /* fsync always requires a blocking context */
1779 if (force_nonblock)
1780 return -EAGAIN;
1781
1782 ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
1783 end > 0 ? end : LLONG_MAX,
1784 fsync_flags & IORING_FSYNC_DATASYNC);
1785
9e645e11
JA
1786 if (ret < 0 && (req->flags & REQ_F_LINK))
1787 req->flags |= REQ_F_FAIL_LINK;
78e19bbe 1788 io_cqring_add_event(req, ret);
ec9c02ad 1789 io_put_req_find_next(req, nxt);
c992fe29
CH
1790 return 0;
1791}
1792
5d17b4a4
JA
1793static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1794{
1795 struct io_ring_ctx *ctx = req->ctx;
1796 int ret = 0;
1797
1798 if (!req->file)
1799 return -EBADF;
5d17b4a4
JA
1800
1801 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1802 return -EINVAL;
1803 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
1804 return -EINVAL;
1805
5d17b4a4
JA
1806 return ret;
1807}
1808
1809static int io_sync_file_range(struct io_kiocb *req,
1810 const struct io_uring_sqe *sqe,
ba816ad6 1811 struct io_kiocb **nxt,
5d17b4a4
JA
1812 bool force_nonblock)
1813{
1814 loff_t sqe_off;
1815 loff_t sqe_len;
1816 unsigned flags;
1817 int ret;
1818
1819 ret = io_prep_sfr(req, sqe);
1820 if (ret)
1821 return ret;
1822
1823 /* sync_file_range always requires a blocking context */
1824 if (force_nonblock)
1825 return -EAGAIN;
1826
1827 sqe_off = READ_ONCE(sqe->off);
1828 sqe_len = READ_ONCE(sqe->len);
1829 flags = READ_ONCE(sqe->sync_range_flags);
1830
1831 ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
1832
9e645e11
JA
1833 if (ret < 0 && (req->flags & REQ_F_LINK))
1834 req->flags |= REQ_F_FAIL_LINK;
78e19bbe 1835 io_cqring_add_event(req, ret);
ec9c02ad 1836 io_put_req_find_next(req, nxt);
5d17b4a4
JA
1837 return 0;
1838}
1839
0fa03c62 1840#if defined(CONFIG_NET)
aa1fa28f 1841static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ba816ad6 1842 struct io_kiocb **nxt, bool force_nonblock,
aa1fa28f
JA
1843 long (*fn)(struct socket *, struct user_msghdr __user *,
1844 unsigned int))
1845{
0fa03c62
JA
1846 struct socket *sock;
1847 int ret;
1848
1849 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1850 return -EINVAL;
1851
1852 sock = sock_from_file(req->file, &ret);
1853 if (sock) {
1854 struct user_msghdr __user *msg;
1855 unsigned flags;
1856
1857 flags = READ_ONCE(sqe->msg_flags);
1858 if (flags & MSG_DONTWAIT)
1859 req->flags |= REQ_F_NOWAIT;
1860 else if (force_nonblock)
1861 flags |= MSG_DONTWAIT;
1862
1863 msg = (struct user_msghdr __user *) (unsigned long)
1864 READ_ONCE(sqe->addr);
1865
aa1fa28f 1866 ret = fn(sock, msg, flags);
0fa03c62
JA
1867 if (force_nonblock && ret == -EAGAIN)
1868 return ret;
1869 }
1870
78e19bbe 1871 io_cqring_add_event(req, ret);
f1f40853
JA
1872 if (ret < 0 && (req->flags & REQ_F_LINK))
1873 req->flags |= REQ_F_FAIL_LINK;
ec9c02ad 1874 io_put_req_find_next(req, nxt);
5d17b4a4
JA
1875 return 0;
1876}
aa1fa28f
JA
1877#endif
1878
1879static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ba816ad6 1880 struct io_kiocb **nxt, bool force_nonblock)
aa1fa28f
JA
1881{
1882#if defined(CONFIG_NET)
ba816ad6
JA
1883 return io_send_recvmsg(req, sqe, nxt, force_nonblock,
1884 __sys_sendmsg_sock);
aa1fa28f
JA
1885#else
1886 return -EOPNOTSUPP;
1887#endif
1888}
1889
1890static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ba816ad6 1891 struct io_kiocb **nxt, bool force_nonblock)
aa1fa28f
JA
1892{
1893#if defined(CONFIG_NET)
ba816ad6
JA
1894 return io_send_recvmsg(req, sqe, nxt, force_nonblock,
1895 __sys_recvmsg_sock);
0fa03c62
JA
1896#else
1897 return -EOPNOTSUPP;
1898#endif
1899}
5d17b4a4 1900
17f2fe35
JA
1901static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1902 struct io_kiocb **nxt, bool force_nonblock)
1903{
1904#if defined(CONFIG_NET)
1905 struct sockaddr __user *addr;
1906 int __user *addr_len;
1907 unsigned file_flags;
1908 int flags, ret;
1909
1910 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
1911 return -EINVAL;
1912 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1913 return -EINVAL;
1914
1915 addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
1916 addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2);
1917 flags = READ_ONCE(sqe->accept_flags);
1918 file_flags = force_nonblock ? O_NONBLOCK : 0;
1919
1920 ret = __sys_accept4_file(req->file, file_flags, addr, addr_len, flags);
1921 if (ret == -EAGAIN && force_nonblock) {
1922 req->work.flags |= IO_WQ_WORK_NEEDS_FILES;
1923 return -EAGAIN;
1924 }
8e3cca12
JA
1925 if (ret == -ERESTARTSYS)
1926 ret = -EINTR;
17f2fe35
JA
1927 if (ret < 0 && (req->flags & REQ_F_LINK))
1928 req->flags |= REQ_F_FAIL_LINK;
78e19bbe 1929 io_cqring_add_event(req, ret);
ec9c02ad 1930 io_put_req_find_next(req, nxt);
17f2fe35
JA
1931 return 0;
1932#else
1933 return -EOPNOTSUPP;
1934#endif
1935}
1936
221c5eb2
JA
1937static void io_poll_remove_one(struct io_kiocb *req)
1938{
1939 struct io_poll_iocb *poll = &req->poll;
1940
1941 spin_lock(&poll->head->lock);
1942 WRITE_ONCE(poll->canceled, true);
1943 if (!list_empty(&poll->wait.entry)) {
1944 list_del_init(&poll->wait.entry);
a197f664 1945 io_queue_async_work(req);
221c5eb2
JA
1946 }
1947 spin_unlock(&poll->head->lock);
1948
1949 list_del_init(&req->list);
1950}
1951
1952static void io_poll_remove_all(struct io_ring_ctx *ctx)
1953{
1954 struct io_kiocb *req;
1955
1956 spin_lock_irq(&ctx->completion_lock);
1957 while (!list_empty(&ctx->cancel_list)) {
1958 req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
1959 io_poll_remove_one(req);
1960 }
1961 spin_unlock_irq(&ctx->completion_lock);
1962}
1963
47f46768
JA
1964static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
1965{
1966 struct io_kiocb *req;
1967
1968 list_for_each_entry(req, &ctx->cancel_list, list) {
1969 if (req->user_data != sqe_addr)
1970 continue;
1971 io_poll_remove_one(req);
1972 return 0;
1973 }
1974
1975 return -ENOENT;
1976}
1977
221c5eb2
JA
1978/*
1979 * Find a running poll command that matches one specified in sqe->addr,
1980 * and remove it if found.
1981 */
1982static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1983{
1984 struct io_ring_ctx *ctx = req->ctx;
47f46768 1985 int ret;
221c5eb2
JA
1986
1987 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1988 return -EINVAL;
1989 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
1990 sqe->poll_events)
1991 return -EINVAL;
1992
1993 spin_lock_irq(&ctx->completion_lock);
47f46768 1994 ret = io_poll_cancel(ctx, READ_ONCE(sqe->addr));
221c5eb2
JA
1995 spin_unlock_irq(&ctx->completion_lock);
1996
78e19bbe 1997 io_cqring_add_event(req, ret);
f1f40853
JA
1998 if (ret < 0 && (req->flags & REQ_F_LINK))
1999 req->flags |= REQ_F_FAIL_LINK;
ec9c02ad 2000 io_put_req(req);
221c5eb2
JA
2001 return 0;
2002}
2003
a197f664 2004static void io_poll_complete(struct io_kiocb *req, __poll_t mask)
221c5eb2 2005{
a197f664
JL
2006 struct io_ring_ctx *ctx = req->ctx;
2007
8c838788 2008 req->poll.done = true;
78e19bbe 2009 io_cqring_fill_event(req, mangle_poll(mask));
8c838788 2010 io_commit_cqring(ctx);
221c5eb2
JA
2011}
2012
561fb04a 2013static void io_poll_complete_work(struct io_wq_work **workptr)
221c5eb2 2014{
561fb04a 2015 struct io_wq_work *work = *workptr;
221c5eb2
JA
2016 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2017 struct io_poll_iocb *poll = &req->poll;
2018 struct poll_table_struct pt = { ._key = poll->events };
2019 struct io_ring_ctx *ctx = req->ctx;
89723d0b 2020 struct io_kiocb *nxt = NULL;
221c5eb2
JA
2021 __poll_t mask = 0;
2022
561fb04a
JA
2023 if (work->flags & IO_WQ_WORK_CANCEL)
2024 WRITE_ONCE(poll->canceled, true);
2025
221c5eb2
JA
2026 if (!READ_ONCE(poll->canceled))
2027 mask = vfs_poll(poll->file, &pt) & poll->events;
2028
2029 /*
2030 * Note that ->ki_cancel callers also delete iocb from active_reqs after
2031 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
2032 * synchronize with them. In the cancellation case the list_del_init
2033 * itself is not actually needed, but harmless so we keep it in to
2034 * avoid further branches in the fast path.
2035 */
2036 spin_lock_irq(&ctx->completion_lock);
2037 if (!mask && !READ_ONCE(poll->canceled)) {
2038 add_wait_queue(poll->head, &poll->wait);
2039 spin_unlock_irq(&ctx->completion_lock);
2040 return;
2041 }
2042 list_del_init(&req->list);
a197f664 2043 io_poll_complete(req, mask);
221c5eb2
JA
2044 spin_unlock_irq(&ctx->completion_lock);
2045
8c838788 2046 io_cqring_ev_posted(ctx);
89723d0b 2047
ec9c02ad 2048 io_put_req_find_next(req, &nxt);
89723d0b
JA
2049 if (nxt)
2050 *workptr = &nxt->work;
221c5eb2
JA
2051}
2052
2053static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
2054 void *key)
2055{
2056 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
2057 wait);
2058 struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
2059 struct io_ring_ctx *ctx = req->ctx;
2060 __poll_t mask = key_to_poll(key);
8c838788 2061 unsigned long flags;
221c5eb2
JA
2062
2063 /* for instances that support it check for an event match first: */
8c838788
JA
2064 if (mask && !(mask & poll->events))
2065 return 0;
221c5eb2 2066
8c838788 2067 list_del_init(&poll->wait.entry);
221c5eb2 2068
7c9e7f0f
JA
2069 /*
2070 * Run completion inline if we can. We're using trylock here because
2071 * we are violating the completion_lock -> poll wq lock ordering.
2072 * If we have a link timeout we're going to need the completion_lock
2073 * for finalizing the request, mark us as having grabbed that already.
2074 */
8c838788
JA
2075 if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
2076 list_del(&req->list);
a197f664 2077 io_poll_complete(req, mask);
7c9e7f0f
JA
2078 req->flags |= REQ_F_COMP_LOCKED;
2079 io_put_req(req);
8c838788 2080 spin_unlock_irqrestore(&ctx->completion_lock, flags);
221c5eb2 2081
8c838788 2082 io_cqring_ev_posted(ctx);
8c838788 2083 } else {
a197f664 2084 io_queue_async_work(req);
221c5eb2
JA
2085 }
2086
221c5eb2
JA
2087 return 1;
2088}
2089
2090struct io_poll_table {
2091 struct poll_table_struct pt;
2092 struct io_kiocb *req;
2093 int error;
2094};
2095
2096static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
2097 struct poll_table_struct *p)
2098{
2099 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
2100
2101 if (unlikely(pt->req->poll.head)) {
2102 pt->error = -EINVAL;
2103 return;
2104 }
2105
2106 pt->error = 0;
2107 pt->req->poll.head = head;
2108 add_wait_queue(head, &pt->req->poll.wait);
2109}
2110
89723d0b
JA
2111static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2112 struct io_kiocb **nxt)
221c5eb2
JA
2113{
2114 struct io_poll_iocb *poll = &req->poll;
2115 struct io_ring_ctx *ctx = req->ctx;
2116 struct io_poll_table ipt;
8c838788 2117 bool cancel = false;
221c5eb2
JA
2118 __poll_t mask;
2119 u16 events;
221c5eb2
JA
2120
2121 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
2122 return -EINVAL;
2123 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
2124 return -EINVAL;
09bb8394
JA
2125 if (!poll->file)
2126 return -EBADF;
221c5eb2 2127
6cc47d1d 2128 req->submit.sqe = NULL;
561fb04a 2129 INIT_IO_WORK(&req->work, io_poll_complete_work);
221c5eb2
JA
2130 events = READ_ONCE(sqe->poll_events);
2131 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
2132
221c5eb2 2133 poll->head = NULL;
8c838788 2134 poll->done = false;
221c5eb2
JA
2135 poll->canceled = false;
2136
2137 ipt.pt._qproc = io_poll_queue_proc;
2138 ipt.pt._key = poll->events;
2139 ipt.req = req;
2140 ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
2141
2142 /* initialized the list so that we can do list_empty checks */
2143 INIT_LIST_HEAD(&poll->wait.entry);
2144 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
2145
36703247
JA
2146 INIT_LIST_HEAD(&req->list);
2147
221c5eb2 2148 mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
221c5eb2
JA
2149
2150 spin_lock_irq(&ctx->completion_lock);
8c838788
JA
2151 if (likely(poll->head)) {
2152 spin_lock(&poll->head->lock);
2153 if (unlikely(list_empty(&poll->wait.entry))) {
2154 if (ipt.error)
2155 cancel = true;
2156 ipt.error = 0;
2157 mask = 0;
2158 }
2159 if (mask || ipt.error)
2160 list_del_init(&poll->wait.entry);
2161 else if (cancel)
2162 WRITE_ONCE(poll->canceled, true);
2163 else if (!poll->done) /* actually waiting for an event */
2164 list_add_tail(&req->list, &ctx->cancel_list);
2165 spin_unlock(&poll->head->lock);
2166 }
2167 if (mask) { /* no async, we'd stolen it */
221c5eb2 2168 ipt.error = 0;
a197f664 2169 io_poll_complete(req, mask);
221c5eb2 2170 }
221c5eb2
JA
2171 spin_unlock_irq(&ctx->completion_lock);
2172
8c838788
JA
2173 if (mask) {
2174 io_cqring_ev_posted(ctx);
ec9c02ad 2175 io_put_req_find_next(req, nxt);
221c5eb2 2176 }
8c838788 2177 return ipt.error;
221c5eb2
JA
2178}
2179
5262f567
JA
2180static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
2181{
2182 struct io_ring_ctx *ctx;
11365043 2183 struct io_kiocb *req;
5262f567
JA
2184 unsigned long flags;
2185
2186 req = container_of(timer, struct io_kiocb, timeout.timer);
2187 ctx = req->ctx;
2188 atomic_inc(&ctx->cq_timeouts);
2189
2190 spin_lock_irqsave(&ctx->completion_lock, flags);
ef03681a 2191 /*
11365043
JA
2192 * We could be racing with timeout deletion. If the list is empty,
2193 * then timeout lookup already found it and will be handling it.
ef03681a 2194 */
842f9612 2195 if (!list_empty(&req->list)) {
11365043 2196 struct io_kiocb *prev;
5262f567 2197
11365043
JA
2198 /*
2199 * Adjust the reqs sequence before the current one because it
2200 * will consume a slot in the cq_ring and the the cq_tail
2201 * pointer will be increased, otherwise other timeout reqs may
2202 * return in advance without waiting for enough wait_nr.
2203 */
2204 prev = req;
2205 list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
2206 prev->sequence++;
11365043 2207 list_del_init(&req->list);
11365043 2208 }
842f9612 2209
78e19bbe 2210 io_cqring_fill_event(req, -ETIME);
842f9612 2211 io_commit_cqring(ctx);
5262f567
JA
2212 spin_unlock_irqrestore(&ctx->completion_lock, flags);
2213
842f9612 2214 io_cqring_ev_posted(ctx);
f1f40853
JA
2215 if (req->flags & REQ_F_LINK)
2216 req->flags |= REQ_F_FAIL_LINK;
ec9c02ad 2217 io_put_req(req);
11365043
JA
2218 return HRTIMER_NORESTART;
2219}
2220
47f46768
JA
2221static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
2222{
2223 struct io_kiocb *req;
2224 int ret = -ENOENT;
2225
2226 list_for_each_entry(req, &ctx->timeout_list, list) {
2227 if (user_data == req->user_data) {
2228 list_del_init(&req->list);
2229 ret = 0;
2230 break;
2231 }
2232 }
2233
2234 if (ret == -ENOENT)
2235 return ret;
2236
2237 ret = hrtimer_try_to_cancel(&req->timeout.timer);
2238 if (ret == -1)
2239 return -EALREADY;
2240
2241 io_cqring_fill_event(req, -ECANCELED);
2242 io_put_req(req);
2243 return 0;
2244}
2245
11365043
JA
2246/*
2247 * Remove or update an existing timeout command
2248 */
2249static int io_timeout_remove(struct io_kiocb *req,
2250 const struct io_uring_sqe *sqe)
2251{
2252 struct io_ring_ctx *ctx = req->ctx;
11365043 2253 unsigned flags;
47f46768 2254 int ret;
11365043
JA
2255
2256 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2257 return -EINVAL;
2258 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
2259 return -EINVAL;
2260 flags = READ_ONCE(sqe->timeout_flags);
2261 if (flags)
2262 return -EINVAL;
2263
11365043 2264 spin_lock_irq(&ctx->completion_lock);
47f46768 2265 ret = io_timeout_cancel(ctx, READ_ONCE(sqe->addr));
11365043 2266
47f46768 2267 io_cqring_fill_event(req, ret);
11365043
JA
2268 io_commit_cqring(ctx);
2269 spin_unlock_irq(&ctx->completion_lock);
5262f567 2270 io_cqring_ev_posted(ctx);
47f46768
JA
2271 if (ret < 0 && req->flags & REQ_F_LINK)
2272 req->flags |= REQ_F_FAIL_LINK;
ec9c02ad 2273 io_put_req(req);
11365043 2274 return 0;
5262f567
JA
2275}
2276
2277static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2278{
5da0fb1a 2279 unsigned count;
5262f567
JA
2280 struct io_ring_ctx *ctx = req->ctx;
2281 struct list_head *entry;
a41525ab 2282 enum hrtimer_mode mode;
bdf20073 2283 struct timespec64 ts;
a1f58ba4 2284 unsigned span = 0;
a41525ab 2285 unsigned flags;
5262f567
JA
2286
2287 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2288 return -EINVAL;
a41525ab
JA
2289 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len != 1)
2290 return -EINVAL;
2291 flags = READ_ONCE(sqe->timeout_flags);
2292 if (flags & ~IORING_TIMEOUT_ABS)
5262f567 2293 return -EINVAL;
bdf20073
AB
2294
2295 if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
2296 return -EFAULT;
2297
11365043
JA
2298 if (flags & IORING_TIMEOUT_ABS)
2299 mode = HRTIMER_MODE_ABS;
2300 else
2301 mode = HRTIMER_MODE_REL;
2302
2303 hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, mode);
2304
5262f567
JA
2305 /*
2306 * sqe->off holds how many events that need to occur for this
2307 * timeout event to be satisfied.
2308 */
2309 count = READ_ONCE(sqe->off);
2310 if (!count)
2311 count = 1;
2312
2313 req->sequence = ctx->cached_sq_head + count - 1;
5da0fb1a 2314 /* reuse it to store the count */
2315 req->submit.sequence = count;
5262f567
JA
2316 req->flags |= REQ_F_TIMEOUT;
2317
2318 /*
2319 * Insertion sort, ensuring the first entry in the list is always
2320 * the one we need first.
2321 */
5262f567
JA
2322 spin_lock_irq(&ctx->completion_lock);
2323 list_for_each_prev(entry, &ctx->timeout_list) {
2324 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
5da0fb1a 2325 unsigned nxt_sq_head;
2326 long long tmp, tmp_nxt;
5262f567 2327
5da0fb1a 2328 /*
2329 * Since cached_sq_head + count - 1 can overflow, use type long
2330 * long to store it.
2331 */
2332 tmp = (long long)ctx->cached_sq_head + count - 1;
2333 nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
2334 tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
2335
2336 /*
2337 * cached_sq_head may overflow, and it will never overflow twice
2338 * once there is some timeout req still be valid.
2339 */
2340 if (ctx->cached_sq_head < nxt_sq_head)
8b07a65a 2341 tmp += UINT_MAX;
5da0fb1a 2342
a1f58ba4 2343 if (tmp > tmp_nxt)
5262f567 2344 break;
a1f58ba4 2345
2346 /*
2347 * Sequence of reqs after the insert one and itself should
2348 * be adjusted because each timeout req consumes a slot.
2349 */
2350 span++;
2351 nxt->sequence++;
5262f567 2352 }
a1f58ba4 2353 req->sequence -= span;
5262f567 2354 list_add(&req->list, entry);
5262f567 2355 req->timeout.timer.function = io_timeout_fn;
a41525ab 2356 hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts), mode);
842f9612 2357 spin_unlock_irq(&ctx->completion_lock);
5262f567
JA
2358 return 0;
2359}
2360
62755e35
JA
2361static bool io_cancel_cb(struct io_wq_work *work, void *data)
2362{
2363 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2364
2365 return req->user_data == (unsigned long) data;
2366}
2367
e977d6d3 2368static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
62755e35 2369{
62755e35 2370 enum io_wq_cancel cancel_ret;
62755e35
JA
2371 int ret = 0;
2372
62755e35
JA
2373 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
2374 switch (cancel_ret) {
2375 case IO_WQ_CANCEL_OK:
2376 ret = 0;
2377 break;
2378 case IO_WQ_CANCEL_RUNNING:
2379 ret = -EALREADY;
2380 break;
2381 case IO_WQ_CANCEL_NOTFOUND:
2382 ret = -ENOENT;
2383 break;
2384 }
2385
e977d6d3
JA
2386 return ret;
2387}
2388
47f46768
JA
2389static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
2390 struct io_kiocb *req, __u64 sqe_addr,
2391 struct io_kiocb **nxt)
2392{
2393 unsigned long flags;
2394 int ret;
2395
2396 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
2397 if (ret != -ENOENT) {
2398 spin_lock_irqsave(&ctx->completion_lock, flags);
2399 goto done;
2400 }
2401
2402 spin_lock_irqsave(&ctx->completion_lock, flags);
2403 ret = io_timeout_cancel(ctx, sqe_addr);
2404 if (ret != -ENOENT)
2405 goto done;
2406 ret = io_poll_cancel(ctx, sqe_addr);
2407done:
2408 io_cqring_fill_event(req, ret);
2409 io_commit_cqring(ctx);
2410 spin_unlock_irqrestore(&ctx->completion_lock, flags);
2411 io_cqring_ev_posted(ctx);
2412
2413 if (ret < 0 && (req->flags & REQ_F_LINK))
2414 req->flags |= REQ_F_FAIL_LINK;
2415 io_put_req_find_next(req, nxt);
2416}
2417
e977d6d3
JA
2418static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2419 struct io_kiocb **nxt)
2420{
2421 struct io_ring_ctx *ctx = req->ctx;
e977d6d3
JA
2422
2423 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2424 return -EINVAL;
2425 if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
2426 sqe->cancel_flags)
2427 return -EINVAL;
2428
47f46768 2429 io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), NULL);
62755e35
JA
2430 return 0;
2431}
2432
a197f664 2433static int io_req_defer(struct io_kiocb *req)
de0617e4 2434{
267bc904 2435 const struct io_uring_sqe *sqe = req->submit.sqe;
de0617e4 2436 struct io_uring_sqe *sqe_copy;
a197f664 2437 struct io_ring_ctx *ctx = req->ctx;
de0617e4 2438
9d858b21
BL
2439 /* Still need defer if there is pending req in defer list. */
2440 if (!req_need_defer(req) && list_empty(&ctx->defer_list))
de0617e4
JA
2441 return 0;
2442
2443 sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
2444 if (!sqe_copy)
2445 return -EAGAIN;
2446
2447 spin_lock_irq(&ctx->completion_lock);
9d858b21 2448 if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
de0617e4
JA
2449 spin_unlock_irq(&ctx->completion_lock);
2450 kfree(sqe_copy);
2451 return 0;
2452 }
2453
2454 memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
2455 req->submit.sqe = sqe_copy;
2456
c826bd7a 2457 trace_io_uring_defer(ctx, req, false);
de0617e4
JA
2458 list_add_tail(&req->list, &ctx->defer_list);
2459 spin_unlock_irq(&ctx->completion_lock);
2460 return -EIOCBQUEUED;
2461}
2462
a197f664
JL
2463static int __io_submit_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
2464 bool force_nonblock)
2b188cc1 2465{
e0c5c576 2466 int ret, opcode;
267bc904 2467 struct sqe_submit *s = &req->submit;
a197f664 2468 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 2469
2b188cc1
JA
2470 opcode = READ_ONCE(s->sqe->opcode);
2471 switch (opcode) {
2472 case IORING_OP_NOP:
78e19bbe 2473 ret = io_nop(req);
2b188cc1
JA
2474 break;
2475 case IORING_OP_READV:
edafccee
JA
2476 if (unlikely(s->sqe->buf_index))
2477 return -EINVAL;
267bc904 2478 ret = io_read(req, nxt, force_nonblock);
2b188cc1
JA
2479 break;
2480 case IORING_OP_WRITEV:
edafccee
JA
2481 if (unlikely(s->sqe->buf_index))
2482 return -EINVAL;
267bc904 2483 ret = io_write(req, nxt, force_nonblock);
edafccee
JA
2484 break;
2485 case IORING_OP_READ_FIXED:
267bc904 2486 ret = io_read(req, nxt, force_nonblock);
edafccee
JA
2487 break;
2488 case IORING_OP_WRITE_FIXED:
267bc904 2489 ret = io_write(req, nxt, force_nonblock);
2b188cc1 2490 break;
c992fe29 2491 case IORING_OP_FSYNC:
ba816ad6 2492 ret = io_fsync(req, s->sqe, nxt, force_nonblock);
c992fe29 2493 break;
221c5eb2 2494 case IORING_OP_POLL_ADD:
89723d0b 2495 ret = io_poll_add(req, s->sqe, nxt);
221c5eb2
JA
2496 break;
2497 case IORING_OP_POLL_REMOVE:
2498 ret = io_poll_remove(req, s->sqe);
2499 break;
5d17b4a4 2500 case IORING_OP_SYNC_FILE_RANGE:
ba816ad6 2501 ret = io_sync_file_range(req, s->sqe, nxt, force_nonblock);
5d17b4a4 2502 break;
0fa03c62 2503 case IORING_OP_SENDMSG:
ba816ad6 2504 ret = io_sendmsg(req, s->sqe, nxt, force_nonblock);
0fa03c62 2505 break;
aa1fa28f 2506 case IORING_OP_RECVMSG:
ba816ad6 2507 ret = io_recvmsg(req, s->sqe, nxt, force_nonblock);
aa1fa28f 2508 break;
5262f567
JA
2509 case IORING_OP_TIMEOUT:
2510 ret = io_timeout(req, s->sqe);
2511 break;
11365043
JA
2512 case IORING_OP_TIMEOUT_REMOVE:
2513 ret = io_timeout_remove(req, s->sqe);
2514 break;
17f2fe35
JA
2515 case IORING_OP_ACCEPT:
2516 ret = io_accept(req, s->sqe, nxt, force_nonblock);
2517 break;
62755e35
JA
2518 case IORING_OP_ASYNC_CANCEL:
2519 ret = io_async_cancel(req, s->sqe, nxt);
2520 break;
2b188cc1
JA
2521 default:
2522 ret = -EINVAL;
2523 break;
2524 }
2525
def596e9
JA
2526 if (ret)
2527 return ret;
2528
2529 if (ctx->flags & IORING_SETUP_IOPOLL) {
9e645e11 2530 if (req->result == -EAGAIN)
def596e9
JA
2531 return -EAGAIN;
2532
2533 /* workqueue context doesn't hold uring_lock, grab it now */
ba5290cc 2534 if (s->in_async)
def596e9
JA
2535 mutex_lock(&ctx->uring_lock);
2536 io_iopoll_req_issued(req);
ba5290cc 2537 if (s->in_async)
def596e9
JA
2538 mutex_unlock(&ctx->uring_lock);
2539 }
2540
2541 return 0;
2b188cc1
JA
2542}
2543
561fb04a 2544static void io_wq_submit_work(struct io_wq_work **workptr)
2b188cc1 2545{
561fb04a 2546 struct io_wq_work *work = *workptr;
2b188cc1 2547 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
561fb04a
JA
2548 struct sqe_submit *s = &req->submit;
2549 const struct io_uring_sqe *sqe = s->sqe;
2550 struct io_kiocb *nxt = NULL;
2551 int ret = 0;
2b188cc1 2552
561fb04a
JA
2553 /* Ensure we clear previously set non-block flag */
2554 req->rw.ki_flags &= ~IOCB_NOWAIT;
2b188cc1 2555
561fb04a
JA
2556 if (work->flags & IO_WQ_WORK_CANCEL)
2557 ret = -ECANCELED;
31b51510 2558
561fb04a
JA
2559 if (!ret) {
2560 s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
2561 s->in_async = true;
2562 do {
a197f664 2563 ret = __io_submit_sqe(req, &nxt, false);
561fb04a
JA
2564 /*
2565 * We can get EAGAIN for polled IO even though we're
2566 * forcing a sync submission from here, since we can't
2567 * wait for request slots on the block side.
2568 */
2569 if (ret != -EAGAIN)
2570 break;
2571 cond_resched();
2572 } while (1);
2573 }
31b51510 2574
561fb04a 2575 /* drop submission reference */
ec9c02ad 2576 io_put_req(req);
817869d2 2577
561fb04a 2578 if (ret) {
f1f40853
JA
2579 if (req->flags & REQ_F_LINK)
2580 req->flags |= REQ_F_FAIL_LINK;
78e19bbe 2581 io_cqring_add_event(req, ret);
ec9c02ad 2582 io_put_req(req);
2b188cc1 2583 }
31b51510 2584
561fb04a
JA
2585 /* async context always use a copy of the sqe */
2586 kfree(sqe);
31b51510 2587
561fb04a
JA
2588 /* if a dependent link is ready, pass it back */
2589 if (!ret && nxt) {
2590 io_prep_async_work(nxt);
2591 *workptr = &nxt->work;
31b51510 2592 }
2b188cc1
JA
2593}
2594
09bb8394
JA
2595static bool io_op_needs_file(const struct io_uring_sqe *sqe)
2596{
2597 int op = READ_ONCE(sqe->opcode);
2598
2599 switch (op) {
2600 case IORING_OP_NOP:
2601 case IORING_OP_POLL_REMOVE:
a320e9fa
PB
2602 case IORING_OP_TIMEOUT:
2603 case IORING_OP_TIMEOUT_REMOVE:
2604 case IORING_OP_ASYNC_CANCEL:
2605 case IORING_OP_LINK_TIMEOUT:
09bb8394
JA
2606 return false;
2607 default:
2608 return true;
2609 }
2610}
2611
65e19f54
JA
2612static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
2613 int index)
2614{
2615 struct fixed_file_table *table;
2616
2617 table = &ctx->file_table[index >> IORING_FILE_TABLE_SHIFT];
2618 return table->files[index & IORING_FILE_TABLE_MASK];
2619}
2620
a197f664 2621static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
09bb8394 2622{
267bc904 2623 struct sqe_submit *s = &req->submit;
a197f664 2624 struct io_ring_ctx *ctx = req->ctx;
09bb8394
JA
2625 unsigned flags;
2626 int fd;
2627
2628 flags = READ_ONCE(s->sqe->flags);
2629 fd = READ_ONCE(s->sqe->fd);
2630
4fe2c963 2631 if (flags & IOSQE_IO_DRAIN)
de0617e4 2632 req->flags |= REQ_F_IO_DRAIN;
4fe2c963
JL
2633 /*
2634 * All io need record the previous position, if LINK vs DARIN,
2635 * it can be used to mark the position of the first IO in the
2636 * link list.
2637 */
2638 req->sequence = s->sequence;
de0617e4 2639
60c112b0 2640 if (!io_op_needs_file(s->sqe))
09bb8394 2641 return 0;
09bb8394
JA
2642
2643 if (flags & IOSQE_FIXED_FILE) {
65e19f54 2644 if (unlikely(!ctx->file_table ||
09bb8394
JA
2645 (unsigned) fd >= ctx->nr_user_files))
2646 return -EBADF;
b7620121 2647 fd = array_index_nospec(fd, ctx->nr_user_files);
65e19f54
JA
2648 req->file = io_file_from_index(ctx, fd);
2649 if (!req->file)
08a45173 2650 return -EBADF;
09bb8394
JA
2651 req->flags |= REQ_F_FIXED_FILE;
2652 } else {
2653 if (s->needs_fixed_file)
2654 return -EBADF;
c826bd7a 2655 trace_io_uring_file_get(ctx, fd);
09bb8394
JA
2656 req->file = io_file_get(state, fd);
2657 if (unlikely(!req->file))
2658 return -EBADF;
2659 }
2660
2661 return 0;
2662}
2663
a197f664 2664static int io_grab_files(struct io_kiocb *req)
fcb323cc
JA
2665{
2666 int ret = -EBADF;
a197f664 2667 struct io_ring_ctx *ctx = req->ctx;
fcb323cc
JA
2668
2669 rcu_read_lock();
2670 spin_lock_irq(&ctx->inflight_lock);
2671 /*
2672 * We use the f_ops->flush() handler to ensure that we can flush
2673 * out work accessing these files if the fd is closed. Check if
2674 * the fd has changed since we started down this path, and disallow
2675 * this operation if it has.
2676 */
2677 if (fcheck(req->submit.ring_fd) == req->submit.ring_file) {
2678 list_add(&req->inflight_entry, &ctx->inflight_list);
2679 req->flags |= REQ_F_INFLIGHT;
2680 req->work.files = current->files;
2681 ret = 0;
2682 }
2683 spin_unlock_irq(&ctx->inflight_lock);
2684 rcu_read_unlock();
2685
2686 return ret;
2687}
2688
2665abfd
JA
2689static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2690{
2691 struct io_kiocb *req = container_of(timer, struct io_kiocb,
2692 timeout.timer);
2693 struct io_ring_ctx *ctx = req->ctx;
2694 struct io_kiocb *prev = NULL;
2695 unsigned long flags;
2665abfd
JA
2696
2697 spin_lock_irqsave(&ctx->completion_lock, flags);
2698
2699 /*
2700 * We don't expect the list to be empty, that will only happen if we
2701 * race with the completion of the linked work.
2702 */
2703 if (!list_empty(&req->list)) {
2704 prev = list_entry(req->list.prev, struct io_kiocb, link_list);
76a46e06
JA
2705 if (refcount_inc_not_zero(&prev->refs))
2706 list_del_init(&req->list);
2707 else
2708 prev = NULL;
2665abfd
JA
2709 }
2710
2711 spin_unlock_irqrestore(&ctx->completion_lock, flags);
2712
2713 if (prev) {
47f46768 2714 io_async_find_and_cancel(ctx, req, prev->user_data, NULL);
76a46e06 2715 io_put_req(prev);
47f46768
JA
2716 } else {
2717 io_cqring_add_event(req, -ETIME);
2718 io_put_req(req);
2665abfd 2719 }
2665abfd
JA
2720 return HRTIMER_NORESTART;
2721}
2722
76a46e06
JA
2723static void io_queue_linked_timeout(struct io_kiocb *req, struct timespec64 *ts,
2724 enum hrtimer_mode *mode)
2665abfd 2725{
76a46e06 2726 struct io_ring_ctx *ctx = req->ctx;
2665abfd 2727
76a46e06
JA
2728 /*
2729 * If the list is now empty, then our linked request finished before
2730 * we got a chance to setup the timer
2731 */
2732 spin_lock_irq(&ctx->completion_lock);
2733 if (!list_empty(&req->list)) {
2734 req->timeout.timer.function = io_link_timeout_fn;
2735 hrtimer_start(&req->timeout.timer, timespec64_to_ktime(*ts),
2736 *mode);
2665abfd 2737 }
76a46e06 2738 spin_unlock_irq(&ctx->completion_lock);
2665abfd 2739
2665abfd 2740 /* drop submission reference */
76a46e06
JA
2741 io_put_req(req);
2742}
2665abfd 2743
76a46e06
JA
2744static int io_validate_link_timeout(const struct io_uring_sqe *sqe,
2745 struct timespec64 *ts)
2746{
2747 if (sqe->ioprio || sqe->buf_index || sqe->len != 1 || sqe->off)
2748 return -EINVAL;
2749 if (sqe->timeout_flags & ~IORING_TIMEOUT_ABS)
2750 return -EINVAL;
2751 if (get_timespec64(ts, u64_to_user_ptr(sqe->addr)))
2752 return -EFAULT;
2665abfd 2753
76a46e06 2754 return 0;
2665abfd
JA
2755}
2756
76a46e06
JA
2757static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req,
2758 struct timespec64 *ts,
2759 enum hrtimer_mode *mode)
2665abfd
JA
2760{
2761 struct io_kiocb *nxt;
76a46e06 2762 int ret;
2665abfd
JA
2763
2764 if (!(req->flags & REQ_F_LINK))
2765 return NULL;
2766
2767 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
76a46e06
JA
2768 if (!nxt || nxt->submit.sqe->opcode != IORING_OP_LINK_TIMEOUT)
2769 return NULL;
2665abfd 2770
76a46e06
JA
2771 ret = io_validate_link_timeout(nxt->submit.sqe, ts);
2772 if (ret) {
2773 list_del_init(&nxt->list);
2774 io_cqring_add_event(nxt, ret);
2775 io_double_put_req(nxt);
2776 return ERR_PTR(-ECANCELED);
2777 }
2778
2779 if (nxt->submit.sqe->timeout_flags & IORING_TIMEOUT_ABS)
2780 *mode = HRTIMER_MODE_ABS;
2781 else
2782 *mode = HRTIMER_MODE_REL;
2783
2784 req->flags |= REQ_F_LINK_TIMEOUT;
2785 hrtimer_init(&nxt->timeout.timer, CLOCK_MONOTONIC, *mode);
2786 return nxt;
2665abfd
JA
2787}
2788
a197f664 2789static int __io_queue_sqe(struct io_kiocb *req)
2b188cc1 2790{
76a46e06 2791 enum hrtimer_mode mode;
2665abfd 2792 struct io_kiocb *nxt;
76a46e06 2793 struct timespec64 ts;
e0c5c576 2794 int ret;
2b188cc1 2795
76a46e06
JA
2796 nxt = io_prep_linked_timeout(req, &ts, &mode);
2797 if (IS_ERR(nxt)) {
2798 ret = PTR_ERR(nxt);
2799 nxt = NULL;
2800 goto err;
2665abfd
JA
2801 }
2802
a197f664 2803 ret = __io_submit_sqe(req, NULL, true);
491381ce
JA
2804
2805 /*
2806 * We async punt it if the file wasn't marked NOWAIT, or if the file
2807 * doesn't support non-blocking read/write attempts
2808 */
2809 if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
2810 (req->flags & REQ_F_MUST_PUNT))) {
267bc904 2811 struct sqe_submit *s = &req->submit;
2b188cc1
JA
2812 struct io_uring_sqe *sqe_copy;
2813
954dab19 2814 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2b188cc1 2815 if (sqe_copy) {
2b188cc1 2816 s->sqe = sqe_copy;
fcb323cc 2817 if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) {
a197f664 2818 ret = io_grab_files(req);
fcb323cc
JA
2819 if (ret) {
2820 kfree(sqe_copy);
2821 goto err;
2822 }
2823 }
e65ef56d
JA
2824
2825 /*
2826 * Queued up for async execution, worker will release
9e645e11 2827 * submit reference when the iocb is actually submitted.
e65ef56d 2828 */
a197f664 2829 io_queue_async_work(req);
76a46e06
JA
2830
2831 if (nxt)
2832 io_queue_linked_timeout(nxt, &ts, &mode);
2833
e65ef56d 2834 return 0;
2b188cc1
JA
2835 }
2836 }
e65ef56d 2837
fcb323cc 2838err:
76a46e06 2839 /* drop submission reference */
ec9c02ad 2840 io_put_req(req);
e65ef56d 2841
76a46e06
JA
2842 if (nxt) {
2843 if (!ret)
2844 io_queue_linked_timeout(nxt, &ts, &mode);
2845 else
2846 io_put_req(nxt);
2847 }
2848
e65ef56d 2849 /* and drop final reference, if we failed */
9e645e11 2850 if (ret) {
78e19bbe 2851 io_cqring_add_event(req, ret);
9e645e11
JA
2852 if (req->flags & REQ_F_LINK)
2853 req->flags |= REQ_F_FAIL_LINK;
ec9c02ad 2854 io_put_req(req);
9e645e11 2855 }
2b188cc1
JA
2856
2857 return ret;
2858}
2859
a197f664 2860static int io_queue_sqe(struct io_kiocb *req)
4fe2c963
JL
2861{
2862 int ret;
2863
a197f664 2864 ret = io_req_defer(req);
4fe2c963
JL
2865 if (ret) {
2866 if (ret != -EIOCBQUEUED) {
78e19bbe
JA
2867 io_cqring_add_event(req, ret);
2868 io_double_put_req(req);
4fe2c963
JL
2869 }
2870 return 0;
2871 }
2872
a197f664 2873 return __io_queue_sqe(req);
4fe2c963
JL
2874}
2875
a197f664 2876static int io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
4fe2c963
JL
2877{
2878 int ret;
2879 int need_submit = false;
a197f664 2880 struct io_ring_ctx *ctx = req->ctx;
4fe2c963
JL
2881
2882 if (!shadow)
a197f664 2883 return io_queue_sqe(req);
4fe2c963
JL
2884
2885 /*
2886 * Mark the first IO in link list as DRAIN, let all the following
2887 * IOs enter the defer list. all IO needs to be completed before link
2888 * list.
2889 */
2890 req->flags |= REQ_F_IO_DRAIN;
a197f664 2891 ret = io_req_defer(req);
4fe2c963
JL
2892 if (ret) {
2893 if (ret != -EIOCBQUEUED) {
78e19bbe
JA
2894 io_cqring_add_event(req, ret);
2895 io_double_put_req(req);
7b20238d 2896 __io_free_req(shadow);
4fe2c963
JL
2897 return 0;
2898 }
2899 } else {
2900 /*
2901 * If ret == 0 means that all IOs in front of link io are
2902 * running done. let's queue link head.
2903 */
2904 need_submit = true;
2905 }
2906
2907 /* Insert shadow req to defer_list, blocking next IOs */
2908 spin_lock_irq(&ctx->completion_lock);
c826bd7a 2909 trace_io_uring_defer(ctx, shadow, true);
4fe2c963
JL
2910 list_add_tail(&shadow->list, &ctx->defer_list);
2911 spin_unlock_irq(&ctx->completion_lock);
2912
2913 if (need_submit)
a197f664 2914 return __io_queue_sqe(req);
4fe2c963
JL
2915
2916 return 0;
2917}
2918
9e645e11
JA
2919#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
2920
a197f664
JL
2921static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
2922 struct io_kiocb **link)
9e645e11
JA
2923{
2924 struct io_uring_sqe *sqe_copy;
267bc904 2925 struct sqe_submit *s = &req->submit;
a197f664 2926 struct io_ring_ctx *ctx = req->ctx;
9e645e11
JA
2927 int ret;
2928
78e19bbe
JA
2929 req->user_data = s->sqe->user_data;
2930
9e645e11
JA
2931 /* enforce forwards compatibility on users */
2932 if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
2933 ret = -EINVAL;
196be95c 2934 goto err_req;
9e645e11
JA
2935 }
2936
a197f664 2937 ret = io_req_set_file(state, req);
9e645e11
JA
2938 if (unlikely(ret)) {
2939err_req:
78e19bbe
JA
2940 io_cqring_add_event(req, ret);
2941 io_double_put_req(req);
9e645e11
JA
2942 return;
2943 }
2944
9e645e11
JA
2945 /*
2946 * If we already have a head request, queue this one for async
2947 * submittal once the head completes. If we don't have a head but
2948 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2949 * submitted sync once the chain is complete. If none of those
2950 * conditions are true (normal request), then just queue it.
2951 */
2952 if (*link) {
2953 struct io_kiocb *prev = *link;
2954
2955 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2956 if (!sqe_copy) {
2957 ret = -EAGAIN;
2958 goto err_req;
2959 }
2960
2961 s->sqe = sqe_copy;
c826bd7a 2962 trace_io_uring_link(ctx, req, prev);
9e645e11
JA
2963 list_add_tail(&req->list, &prev->link_list);
2964 } else if (s->sqe->flags & IOSQE_IO_LINK) {
2965 req->flags |= REQ_F_LINK;
2966
9e645e11
JA
2967 INIT_LIST_HEAD(&req->link_list);
2968 *link = req;
2665abfd
JA
2969 } else if (READ_ONCE(s->sqe->opcode) == IORING_OP_LINK_TIMEOUT) {
2970 /* Only valid as a linked SQE */
2971 ret = -EINVAL;
2972 goto err_req;
9e645e11 2973 } else {
a197f664 2974 io_queue_sqe(req);
9e645e11
JA
2975 }
2976}
2977
9a56a232
JA
2978/*
2979 * Batched submission is done, ensure local IO is flushed out.
2980 */
2981static void io_submit_state_end(struct io_submit_state *state)
2982{
2983 blk_finish_plug(&state->plug);
3d6770fb 2984 io_file_put(state);
2579f913
JA
2985 if (state->free_reqs)
2986 kmem_cache_free_bulk(req_cachep, state->free_reqs,
2987 &state->reqs[state->cur_req]);
9a56a232
JA
2988}
2989
2990/*
2991 * Start submission side cache.
2992 */
2993static void io_submit_state_start(struct io_submit_state *state,
2994 struct io_ring_ctx *ctx, unsigned max_ios)
2995{
2996 blk_start_plug(&state->plug);
2579f913 2997 state->free_reqs = 0;
9a56a232
JA
2998 state->file = NULL;
2999 state->ios_left = max_ios;
3000}
3001
2b188cc1
JA
3002static void io_commit_sqring(struct io_ring_ctx *ctx)
3003{
75b28aff 3004 struct io_rings *rings = ctx->rings;
2b188cc1 3005
75b28aff 3006 if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) {
2b188cc1
JA
3007 /*
3008 * Ensure any loads from the SQEs are done at this point,
3009 * since once we write the new head, the application could
3010 * write new data to them.
3011 */
75b28aff 3012 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
3013 }
3014}
3015
2b188cc1
JA
3016/*
3017 * Fetch an sqe, if one is available. Note that s->sqe will point to memory
3018 * that is mapped by userspace. This means that care needs to be taken to
3019 * ensure that reads are stable, as we cannot rely on userspace always
3020 * being a good citizen. If members of the sqe are validated and then later
3021 * used, it's important that those reads are done through READ_ONCE() to
3022 * prevent a re-load down the line.
3023 */
3024static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
3025{
75b28aff
HV
3026 struct io_rings *rings = ctx->rings;
3027 u32 *sq_array = ctx->sq_array;
2b188cc1
JA
3028 unsigned head;
3029
3030 /*
3031 * The cached sq head (or cq tail) serves two purposes:
3032 *
3033 * 1) allows us to batch the cost of updating the user visible
3034 * head updates.
3035 * 2) allows the kernel side to track the head on its own, even
3036 * though the application is the one updating it.
3037 */
3038 head = ctx->cached_sq_head;
e523a29c 3039 /* make sure SQ entry isn't read before tail */
75b28aff 3040 if (head == smp_load_acquire(&rings->sq.tail))
2b188cc1
JA
3041 return false;
3042
75b28aff 3043 head = READ_ONCE(sq_array[head & ctx->sq_mask]);
2b188cc1 3044 if (head < ctx->sq_entries) {
fcb323cc 3045 s->ring_file = NULL;
2b188cc1 3046 s->sqe = &ctx->sq_sqes[head];
8776f3fa 3047 s->sequence = ctx->cached_sq_head;
2b188cc1
JA
3048 ctx->cached_sq_head++;
3049 return true;
3050 }
3051
3052 /* drop invalid entries */
3053 ctx->cached_sq_head++;
498ccd9e
JA
3054 ctx->cached_sq_dropped++;
3055 WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
2b188cc1
JA
3056 return false;
3057}
3058
fb5ccc98 3059static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
ae9428ca
PB
3060 struct file *ring_file, int ring_fd,
3061 struct mm_struct **mm, bool async)
6c271ce2
JA
3062{
3063 struct io_submit_state state, *statep = NULL;
9e645e11 3064 struct io_kiocb *link = NULL;
4fe2c963 3065 struct io_kiocb *shadow_req = NULL;
9e645e11 3066 int i, submitted = 0;
95a1b3ff 3067 bool mm_fault = false;
6c271ce2 3068
1d7bb1d5
JA
3069 if (!list_empty(&ctx->cq_overflow_list)) {
3070 io_cqring_overflow_flush(ctx, false);
3071 return -EBUSY;
3072 }
3073
6c271ce2
JA
3074 if (nr > IO_PLUG_THRESHOLD) {
3075 io_submit_state_start(&state, ctx, nr);
3076 statep = &state;
3077 }
3078
3079 for (i = 0; i < nr; i++) {
196be95c 3080 struct io_kiocb *req;
50585b9a 3081 unsigned int sqe_flags;
fb5ccc98 3082
196be95c
PB
3083 req = io_get_req(ctx, statep);
3084 if (unlikely(!req)) {
3085 if (!submitted)
3086 submitted = -EAGAIN;
fb5ccc98 3087 break;
196be95c 3088 }
50585b9a 3089 if (!io_get_sqring(ctx, &req->submit)) {
196be95c
PB
3090 __io_free_req(req);
3091 break;
3092 }
fb5ccc98 3093
50585b9a 3094 if (io_sqe_needs_user(req->submit.sqe) && !*mm) {
95a1b3ff
PB
3095 mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
3096 if (!mm_fault) {
3097 use_mm(ctx->sqo_mm);
3098 *mm = ctx->sqo_mm;
3099 }
3100 }
3101
50585b9a
PB
3102 sqe_flags = req->submit.sqe->flags;
3103
3104 if (link && (sqe_flags & IOSQE_IO_DRAIN)) {
4fe2c963
JL
3105 if (!shadow_req) {
3106 shadow_req = io_get_req(ctx, NULL);
a1041c27
JL
3107 if (unlikely(!shadow_req))
3108 goto out;
4fe2c963
JL
3109 shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
3110 refcount_dec(&shadow_req->refs);
3111 }
50585b9a 3112 shadow_req->sequence = req->submit.sequence;
4fe2c963
JL
3113 }
3114
a1041c27 3115out:
50585b9a
PB
3116 req->submit.ring_file = ring_file;
3117 req->submit.ring_fd = ring_fd;
3118 req->submit.has_user = *mm != NULL;
3119 req->submit.in_async = async;
3120 req->submit.needs_fixed_file = async;
3121 trace_io_uring_submit_sqe(ctx, req->submit.sqe->user_data,
3122 true, async);
a197f664 3123 io_submit_sqe(req, statep, &link);
95a1b3ff 3124 submitted++;
e5eb6366
PB
3125
3126 /*
3127 * If previous wasn't linked and we have a linked command,
3128 * that's the end of the chain. Submit the previous link.
3129 */
50585b9a 3130 if (!(sqe_flags & IOSQE_IO_LINK) && link) {
a197f664 3131 io_queue_link_head(link, shadow_req);
e5eb6366
PB
3132 link = NULL;
3133 shadow_req = NULL;
3134 }
6c271ce2
JA
3135 }
3136
9e645e11 3137 if (link)
a197f664 3138 io_queue_link_head(link, shadow_req);
6c271ce2
JA
3139 if (statep)
3140 io_submit_state_end(&state);
3141
ae9428ca
PB
3142 /* Commit SQ ring head once we've consumed and submitted all SQEs */
3143 io_commit_sqring(ctx);
3144
6c271ce2
JA
3145 return submitted;
3146}
3147
3148static int io_sq_thread(void *data)
3149{
6c271ce2
JA
3150 struct io_ring_ctx *ctx = data;
3151 struct mm_struct *cur_mm = NULL;
3152 mm_segment_t old_fs;
3153 DEFINE_WAIT(wait);
3154 unsigned inflight;
3155 unsigned long timeout;
c1edbf5f 3156 int ret;
6c271ce2 3157
206aefde 3158 complete(&ctx->completions[1]);
a4c0b3de 3159
6c271ce2
JA
3160 old_fs = get_fs();
3161 set_fs(USER_DS);
3162
c1edbf5f 3163 ret = timeout = inflight = 0;
2bbcd6d3 3164 while (!kthread_should_park()) {
fb5ccc98 3165 unsigned int to_submit;
6c271ce2
JA
3166
3167 if (inflight) {
3168 unsigned nr_events = 0;
3169
3170 if (ctx->flags & IORING_SETUP_IOPOLL) {
2b2ed975
JA
3171 /*
3172 * inflight is the count of the maximum possible
3173 * entries we submitted, but it can be smaller
3174 * if we dropped some of them. If we don't have
3175 * poll entries available, then we know that we
3176 * have nothing left to poll for. Reset the
3177 * inflight count to zero in that case.
3178 */
3179 mutex_lock(&ctx->uring_lock);
3180 if (!list_empty(&ctx->poll_list))
3181 __io_iopoll_check(ctx, &nr_events, 0);
3182 else
3183 inflight = 0;
3184 mutex_unlock(&ctx->uring_lock);
6c271ce2
JA
3185 } else {
3186 /*
3187 * Normal IO, just pretend everything completed.
3188 * We don't have to poll completions for that.
3189 */
3190 nr_events = inflight;
3191 }
3192
3193 inflight -= nr_events;
3194 if (!inflight)
3195 timeout = jiffies + ctx->sq_thread_idle;
3196 }
3197
fb5ccc98 3198 to_submit = io_sqring_entries(ctx);
c1edbf5f
JA
3199
3200 /*
3201 * If submit got -EBUSY, flag us as needing the application
3202 * to enter the kernel to reap and flush events.
3203 */
3204 if (!to_submit || ret == -EBUSY) {
6c271ce2
JA
3205 /*
3206 * We're polling. If we're within the defined idle
3207 * period, then let us spin without work before going
c1edbf5f
JA
3208 * to sleep. The exception is if we got EBUSY doing
3209 * more IO, we should wait for the application to
3210 * reap events and wake us up.
6c271ce2 3211 */
c1edbf5f
JA
3212 if (inflight ||
3213 (!time_after(jiffies, timeout) && ret != -EBUSY)) {
9831a90c 3214 cond_resched();
6c271ce2
JA
3215 continue;
3216 }
3217
3218 /*
3219 * Drop cur_mm before scheduling, we can't hold it for
3220 * long periods (or over schedule()). Do this before
3221 * adding ourselves to the waitqueue, as the unuse/drop
3222 * may sleep.
3223 */
3224 if (cur_mm) {
3225 unuse_mm(cur_mm);
3226 mmput(cur_mm);
3227 cur_mm = NULL;
3228 }
3229
3230 prepare_to_wait(&ctx->sqo_wait, &wait,
3231 TASK_INTERRUPTIBLE);
3232
3233 /* Tell userspace we may need a wakeup call */
75b28aff 3234 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
0d7bae69
SB
3235 /* make sure to read SQ tail after writing flags */
3236 smp_mb();
6c271ce2 3237
fb5ccc98 3238 to_submit = io_sqring_entries(ctx);
c1edbf5f 3239 if (!to_submit || ret == -EBUSY) {
2bbcd6d3 3240 if (kthread_should_park()) {
6c271ce2
JA
3241 finish_wait(&ctx->sqo_wait, &wait);
3242 break;
3243 }
3244 if (signal_pending(current))
3245 flush_signals(current);
3246 schedule();
3247 finish_wait(&ctx->sqo_wait, &wait);
3248
75b28aff 3249 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
3250 continue;
3251 }
3252 finish_wait(&ctx->sqo_wait, &wait);
3253
75b28aff 3254 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
3255 }
3256
fb5ccc98 3257 to_submit = min(to_submit, ctx->sq_entries);
1d7bb1d5
JA
3258 ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
3259 if (ret > 0)
3260 inflight += ret;
6c271ce2
JA
3261 }
3262
3263 set_fs(old_fs);
3264 if (cur_mm) {
3265 unuse_mm(cur_mm);
3266 mmput(cur_mm);
3267 }
06058632 3268
2bbcd6d3 3269 kthread_parkme();
06058632 3270
6c271ce2
JA
3271 return 0;
3272}
3273
bda52162
JA
3274struct io_wait_queue {
3275 struct wait_queue_entry wq;
3276 struct io_ring_ctx *ctx;
3277 unsigned to_wait;
3278 unsigned nr_timeouts;
3279};
3280
1d7bb1d5 3281static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
bda52162
JA
3282{
3283 struct io_ring_ctx *ctx = iowq->ctx;
3284
3285 /*
3286 * Wake up if we have enough events, or if a timeout occured since we
3287 * started waiting. For timeouts, we always want to return to userspace,
3288 * regardless of event count.
3289 */
1d7bb1d5 3290 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
bda52162
JA
3291 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
3292}
3293
3294static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
3295 int wake_flags, void *key)
3296{
3297 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
3298 wq);
3299
1d7bb1d5
JA
3300 /* use noflush == true, as we can't safely rely on locking context */
3301 if (!io_should_wake(iowq, true))
bda52162
JA
3302 return -1;
3303
3304 return autoremove_wake_function(curr, mode, wake_flags, key);
3305}
3306
2b188cc1
JA
3307/*
3308 * Wait until events become available, if we don't already have some. The
3309 * application must reap them itself, as they reside on the shared cq ring.
3310 */
3311static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
3312 const sigset_t __user *sig, size_t sigsz)
3313{
bda52162
JA
3314 struct io_wait_queue iowq = {
3315 .wq = {
3316 .private = current,
3317 .func = io_wake_function,
3318 .entry = LIST_HEAD_INIT(iowq.wq.entry),
3319 },
3320 .ctx = ctx,
3321 .to_wait = min_events,
3322 };
75b28aff 3323 struct io_rings *rings = ctx->rings;
e9ffa5c2 3324 int ret = 0;
2b188cc1 3325
1d7bb1d5 3326 if (io_cqring_events(ctx, false) >= min_events)
2b188cc1
JA
3327 return 0;
3328
3329 if (sig) {
9e75ad5d
AB
3330#ifdef CONFIG_COMPAT
3331 if (in_compat_syscall())
3332 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 3333 sigsz);
9e75ad5d
AB
3334 else
3335#endif
b772434b 3336 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 3337
2b188cc1
JA
3338 if (ret)
3339 return ret;
3340 }
3341
bda52162 3342 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
c826bd7a 3343 trace_io_uring_cqring_wait(ctx, min_events);
bda52162
JA
3344 do {
3345 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
3346 TASK_INTERRUPTIBLE);
1d7bb1d5 3347 if (io_should_wake(&iowq, false))
bda52162
JA
3348 break;
3349 schedule();
3350 if (signal_pending(current)) {
e9ffa5c2 3351 ret = -EINTR;
bda52162
JA
3352 break;
3353 }
3354 } while (1);
3355 finish_wait(&ctx->wait, &iowq.wq);
3356
e9ffa5c2 3357 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 3358
75b28aff 3359 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
3360}
3361
6b06314c
JA
3362static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
3363{
3364#if defined(CONFIG_UNIX)
3365 if (ctx->ring_sock) {
3366 struct sock *sock = ctx->ring_sock->sk;
3367 struct sk_buff *skb;
3368
3369 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
3370 kfree_skb(skb);
3371 }
3372#else
3373 int i;
3374
65e19f54
JA
3375 for (i = 0; i < ctx->nr_user_files; i++) {
3376 struct file *file;
3377
3378 file = io_file_from_index(ctx, i);
3379 if (file)
3380 fput(file);
3381 }
6b06314c
JA
3382#endif
3383}
3384
3385static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
3386{
65e19f54
JA
3387 unsigned nr_tables, i;
3388
3389 if (!ctx->file_table)
6b06314c
JA
3390 return -ENXIO;
3391
3392 __io_sqe_files_unregister(ctx);
65e19f54
JA
3393 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
3394 for (i = 0; i < nr_tables; i++)
3395 kfree(ctx->file_table[i].files);
3396 kfree(ctx->file_table);
3397 ctx->file_table = NULL;
6b06314c
JA
3398 ctx->nr_user_files = 0;
3399 return 0;
3400}
3401
6c271ce2
JA
3402static void io_sq_thread_stop(struct io_ring_ctx *ctx)
3403{
3404 if (ctx->sqo_thread) {
206aefde 3405 wait_for_completion(&ctx->completions[1]);
2bbcd6d3
RP
3406 /*
3407 * The park is a bit of a work-around, without it we get
3408 * warning spews on shutdown with SQPOLL set and affinity
3409 * set to a single CPU.
3410 */
06058632 3411 kthread_park(ctx->sqo_thread);
6c271ce2
JA
3412 kthread_stop(ctx->sqo_thread);
3413 ctx->sqo_thread = NULL;
3414 }
3415}
3416
6b06314c
JA
3417static void io_finish_async(struct io_ring_ctx *ctx)
3418{
6c271ce2
JA
3419 io_sq_thread_stop(ctx);
3420
561fb04a
JA
3421 if (ctx->io_wq) {
3422 io_wq_destroy(ctx->io_wq);
3423 ctx->io_wq = NULL;
6b06314c
JA
3424 }
3425}
3426
3427#if defined(CONFIG_UNIX)
3428static void io_destruct_skb(struct sk_buff *skb)
3429{
3430 struct io_ring_ctx *ctx = skb->sk->sk_user_data;
8a997340 3431
561fb04a
JA
3432 if (ctx->io_wq)
3433 io_wq_flush(ctx->io_wq);
6b06314c 3434
6b06314c
JA
3435 unix_destruct_scm(skb);
3436}
3437
3438/*
3439 * Ensure the UNIX gc is aware of our file set, so we are certain that
3440 * the io_uring can be safely unregistered on process exit, even if we have
3441 * loops in the file referencing.
3442 */
3443static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
3444{
3445 struct sock *sk = ctx->ring_sock->sk;
3446 struct scm_fp_list *fpl;
3447 struct sk_buff *skb;
08a45173 3448 int i, nr_files;
6b06314c
JA
3449
3450 if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
3451 unsigned long inflight = ctx->user->unix_inflight + nr;
3452
3453 if (inflight > task_rlimit(current, RLIMIT_NOFILE))
3454 return -EMFILE;
3455 }
3456
3457 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
3458 if (!fpl)
3459 return -ENOMEM;
3460
3461 skb = alloc_skb(0, GFP_KERNEL);
3462 if (!skb) {
3463 kfree(fpl);
3464 return -ENOMEM;
3465 }
3466
3467 skb->sk = sk;
6b06314c 3468
08a45173 3469 nr_files = 0;
6b06314c
JA
3470 fpl->user = get_uid(ctx->user);
3471 for (i = 0; i < nr; i++) {
65e19f54
JA
3472 struct file *file = io_file_from_index(ctx, i + offset);
3473
3474 if (!file)
08a45173 3475 continue;
65e19f54 3476 fpl->fp[nr_files] = get_file(file);
08a45173
JA
3477 unix_inflight(fpl->user, fpl->fp[nr_files]);
3478 nr_files++;
6b06314c
JA
3479 }
3480
08a45173
JA
3481 if (nr_files) {
3482 fpl->max = SCM_MAX_FD;
3483 fpl->count = nr_files;
3484 UNIXCB(skb).fp = fpl;
3485 skb->destructor = io_destruct_skb;
3486 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
3487 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 3488
08a45173
JA
3489 for (i = 0; i < nr_files; i++)
3490 fput(fpl->fp[i]);
3491 } else {
3492 kfree_skb(skb);
3493 kfree(fpl);
3494 }
6b06314c
JA
3495
3496 return 0;
3497}
3498
3499/*
3500 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
3501 * causes regular reference counting to break down. We rely on the UNIX
3502 * garbage collection to take care of this problem for us.
3503 */
3504static int io_sqe_files_scm(struct io_ring_ctx *ctx)
3505{
3506 unsigned left, total;
3507 int ret = 0;
3508
3509 total = 0;
3510 left = ctx->nr_user_files;
3511 while (left) {
3512 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
3513
3514 ret = __io_sqe_files_scm(ctx, this_files, total);
3515 if (ret)
3516 break;
3517 left -= this_files;
3518 total += this_files;
3519 }
3520
3521 if (!ret)
3522 return 0;
3523
3524 while (total < ctx->nr_user_files) {
65e19f54
JA
3525 struct file *file = io_file_from_index(ctx, total);
3526
3527 if (file)
3528 fput(file);
6b06314c
JA
3529 total++;
3530 }
3531
3532 return ret;
3533}
3534#else
3535static int io_sqe_files_scm(struct io_ring_ctx *ctx)
3536{
3537 return 0;
3538}
3539#endif
3540
65e19f54
JA
3541static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
3542 unsigned nr_files)
3543{
3544 int i;
3545
3546 for (i = 0; i < nr_tables; i++) {
3547 struct fixed_file_table *table = &ctx->file_table[i];
3548 unsigned this_files;
3549
3550 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
3551 table->files = kcalloc(this_files, sizeof(struct file *),
3552 GFP_KERNEL);
3553 if (!table->files)
3554 break;
3555 nr_files -= this_files;
3556 }
3557
3558 if (i == nr_tables)
3559 return 0;
3560
3561 for (i = 0; i < nr_tables; i++) {
3562 struct fixed_file_table *table = &ctx->file_table[i];
3563 kfree(table->files);
3564 }
3565 return 1;
3566}
3567
6b06314c
JA
3568static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
3569 unsigned nr_args)
3570{
3571 __s32 __user *fds = (__s32 __user *) arg;
65e19f54 3572 unsigned nr_tables;
6b06314c
JA
3573 int fd, ret = 0;
3574 unsigned i;
3575
65e19f54 3576 if (ctx->file_table)
6b06314c
JA
3577 return -EBUSY;
3578 if (!nr_args)
3579 return -EINVAL;
3580 if (nr_args > IORING_MAX_FIXED_FILES)
3581 return -EMFILE;
3582
65e19f54
JA
3583 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
3584 ctx->file_table = kcalloc(nr_tables, sizeof(struct fixed_file_table),
3585 GFP_KERNEL);
3586 if (!ctx->file_table)
6b06314c
JA
3587 return -ENOMEM;
3588
65e19f54
JA
3589 if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
3590 kfree(ctx->file_table);
46568e9b 3591 ctx->file_table = NULL;
65e19f54
JA
3592 return -ENOMEM;
3593 }
3594
08a45173 3595 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
65e19f54
JA
3596 struct fixed_file_table *table;
3597 unsigned index;
3598
6b06314c
JA
3599 ret = -EFAULT;
3600 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
3601 break;
08a45173
JA
3602 /* allow sparse sets */
3603 if (fd == -1) {
3604 ret = 0;
3605 continue;
3606 }
6b06314c 3607
65e19f54
JA
3608 table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
3609 index = i & IORING_FILE_TABLE_MASK;
3610 table->files[index] = fget(fd);
6b06314c
JA
3611
3612 ret = -EBADF;
65e19f54 3613 if (!table->files[index])
6b06314c
JA
3614 break;
3615 /*
3616 * Don't allow io_uring instances to be registered. If UNIX
3617 * isn't enabled, then this causes a reference cycle and this
3618 * instance can never get freed. If UNIX is enabled we'll
3619 * handle it just fine, but there's still no point in allowing
3620 * a ring fd as it doesn't support regular read/write anyway.
3621 */
65e19f54
JA
3622 if (table->files[index]->f_op == &io_uring_fops) {
3623 fput(table->files[index]);
6b06314c
JA
3624 break;
3625 }
6b06314c
JA
3626 ret = 0;
3627 }
3628
3629 if (ret) {
65e19f54
JA
3630 for (i = 0; i < ctx->nr_user_files; i++) {
3631 struct file *file;
6b06314c 3632
65e19f54
JA
3633 file = io_file_from_index(ctx, i);
3634 if (file)
3635 fput(file);
3636 }
3637 for (i = 0; i < nr_tables; i++)
3638 kfree(ctx->file_table[i].files);
3639
3640 kfree(ctx->file_table);
3641 ctx->file_table = NULL;
6b06314c
JA
3642 ctx->nr_user_files = 0;
3643 return ret;
3644 }
3645
3646 ret = io_sqe_files_scm(ctx);
3647 if (ret)
3648 io_sqe_files_unregister(ctx);
3649
3650 return ret;
3651}
3652
c3a31e60
JA
3653static void io_sqe_file_unregister(struct io_ring_ctx *ctx, int index)
3654{
3655#if defined(CONFIG_UNIX)
65e19f54 3656 struct file *file = io_file_from_index(ctx, index);
c3a31e60
JA
3657 struct sock *sock = ctx->ring_sock->sk;
3658 struct sk_buff_head list, *head = &sock->sk_receive_queue;
3659 struct sk_buff *skb;
3660 int i;
3661
3662 __skb_queue_head_init(&list);
3663
3664 /*
3665 * Find the skb that holds this file in its SCM_RIGHTS. When found,
3666 * remove this entry and rearrange the file array.
3667 */
3668 skb = skb_dequeue(head);
3669 while (skb) {
3670 struct scm_fp_list *fp;
3671
3672 fp = UNIXCB(skb).fp;
3673 for (i = 0; i < fp->count; i++) {
3674 int left;
3675
3676 if (fp->fp[i] != file)
3677 continue;
3678
3679 unix_notinflight(fp->user, fp->fp[i]);
3680 left = fp->count - 1 - i;
3681 if (left) {
3682 memmove(&fp->fp[i], &fp->fp[i + 1],
3683 left * sizeof(struct file *));
3684 }
3685 fp->count--;
3686 if (!fp->count) {
3687 kfree_skb(skb);
3688 skb = NULL;
3689 } else {
3690 __skb_queue_tail(&list, skb);
3691 }
3692 fput(file);
3693 file = NULL;
3694 break;
3695 }
3696
3697 if (!file)
3698 break;
3699
3700 __skb_queue_tail(&list, skb);
3701
3702 skb = skb_dequeue(head);
3703 }
3704
3705 if (skb_peek(&list)) {
3706 spin_lock_irq(&head->lock);
3707 while ((skb = __skb_dequeue(&list)) != NULL)
3708 __skb_queue_tail(head, skb);
3709 spin_unlock_irq(&head->lock);
3710 }
3711#else
65e19f54 3712 fput(io_file_from_index(ctx, index));
c3a31e60
JA
3713#endif
3714}
3715
3716static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
3717 int index)
3718{
3719#if defined(CONFIG_UNIX)
3720 struct sock *sock = ctx->ring_sock->sk;
3721 struct sk_buff_head *head = &sock->sk_receive_queue;
3722 struct sk_buff *skb;
3723
3724 /*
3725 * See if we can merge this file into an existing skb SCM_RIGHTS
3726 * file set. If there's no room, fall back to allocating a new skb
3727 * and filling it in.
3728 */
3729 spin_lock_irq(&head->lock);
3730 skb = skb_peek(head);
3731 if (skb) {
3732 struct scm_fp_list *fpl = UNIXCB(skb).fp;
3733
3734 if (fpl->count < SCM_MAX_FD) {
3735 __skb_unlink(skb, head);
3736 spin_unlock_irq(&head->lock);
3737 fpl->fp[fpl->count] = get_file(file);
3738 unix_inflight(fpl->user, fpl->fp[fpl->count]);
3739 fpl->count++;
3740 spin_lock_irq(&head->lock);
3741 __skb_queue_head(head, skb);
3742 } else {
3743 skb = NULL;
3744 }
3745 }
3746 spin_unlock_irq(&head->lock);
3747
3748 if (skb) {
3749 fput(file);
3750 return 0;
3751 }
3752
3753 return __io_sqe_files_scm(ctx, 1, index);
3754#else
3755 return 0;
3756#endif
3757}
3758
3759static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
3760 unsigned nr_args)
3761{
3762 struct io_uring_files_update up;
3763 __s32 __user *fds;
3764 int fd, i, err;
3765 __u32 done;
3766
65e19f54 3767 if (!ctx->file_table)
c3a31e60
JA
3768 return -ENXIO;
3769 if (!nr_args)
3770 return -EINVAL;
3771 if (copy_from_user(&up, arg, sizeof(up)))
3772 return -EFAULT;
3773 if (check_add_overflow(up.offset, nr_args, &done))
3774 return -EOVERFLOW;
3775 if (done > ctx->nr_user_files)
3776 return -EINVAL;
3777
3778 done = 0;
3779 fds = (__s32 __user *) up.fds;
3780 while (nr_args) {
65e19f54
JA
3781 struct fixed_file_table *table;
3782 unsigned index;
3783
c3a31e60
JA
3784 err = 0;
3785 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
3786 err = -EFAULT;
3787 break;
3788 }
3789 i = array_index_nospec(up.offset, ctx->nr_user_files);
65e19f54
JA
3790 table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
3791 index = i & IORING_FILE_TABLE_MASK;
3792 if (table->files[index]) {
c3a31e60 3793 io_sqe_file_unregister(ctx, i);
65e19f54 3794 table->files[index] = NULL;
c3a31e60
JA
3795 }
3796 if (fd != -1) {
3797 struct file *file;
3798
3799 file = fget(fd);
3800 if (!file) {
3801 err = -EBADF;
3802 break;
3803 }
3804 /*
3805 * Don't allow io_uring instances to be registered. If
3806 * UNIX isn't enabled, then this causes a reference
3807 * cycle and this instance can never get freed. If UNIX
3808 * is enabled we'll handle it just fine, but there's
3809 * still no point in allowing a ring fd as it doesn't
3810 * support regular read/write anyway.
3811 */
3812 if (file->f_op == &io_uring_fops) {
3813 fput(file);
3814 err = -EBADF;
3815 break;
3816 }
65e19f54 3817 table->files[index] = file;
c3a31e60
JA
3818 err = io_sqe_file_register(ctx, file, i);
3819 if (err)
3820 break;
3821 }
3822 nr_args--;
3823 done++;
3824 up.offset++;
3825 }
3826
3827 return done ? done : err;
3828}
3829
7d723065
JA
3830static void io_put_work(struct io_wq_work *work)
3831{
3832 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3833
3834 io_put_req(req);
3835}
3836
3837static void io_get_work(struct io_wq_work *work)
3838{
3839 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3840
3841 refcount_inc(&req->refs);
3842}
3843
6c271ce2
JA
3844static int io_sq_offload_start(struct io_ring_ctx *ctx,
3845 struct io_uring_params *p)
2b188cc1 3846{
561fb04a 3847 unsigned concurrency;
2b188cc1
JA
3848 int ret;
3849
6c271ce2 3850 init_waitqueue_head(&ctx->sqo_wait);
2b188cc1
JA
3851 mmgrab(current->mm);
3852 ctx->sqo_mm = current->mm;
3853
6c271ce2 3854 if (ctx->flags & IORING_SETUP_SQPOLL) {
3ec482d1
JA
3855 ret = -EPERM;
3856 if (!capable(CAP_SYS_ADMIN))
3857 goto err;
3858
917257da
JA
3859 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
3860 if (!ctx->sq_thread_idle)
3861 ctx->sq_thread_idle = HZ;
3862
6c271ce2 3863 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 3864 int cpu = p->sq_thread_cpu;
6c271ce2 3865
917257da 3866 ret = -EINVAL;
44a9bd18
JA
3867 if (cpu >= nr_cpu_ids)
3868 goto err;
7889f44d 3869 if (!cpu_online(cpu))
917257da
JA
3870 goto err;
3871
6c271ce2
JA
3872 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
3873 ctx, cpu,
3874 "io_uring-sq");
3875 } else {
3876 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
3877 "io_uring-sq");
3878 }
3879 if (IS_ERR(ctx->sqo_thread)) {
3880 ret = PTR_ERR(ctx->sqo_thread);
3881 ctx->sqo_thread = NULL;
3882 goto err;
3883 }
3884 wake_up_process(ctx->sqo_thread);
3885 } else if (p->flags & IORING_SETUP_SQ_AFF) {
3886 /* Can't have SQ_AFF without SQPOLL */
3887 ret = -EINVAL;
3888 goto err;
3889 }
3890
561fb04a
JA
3891 /* Do QD, or 4 * CPUS, whatever is smallest */
3892 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
7d723065
JA
3893 ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm, ctx->user,
3894 io_get_work, io_put_work);
975c99a5
JA
3895 if (IS_ERR(ctx->io_wq)) {
3896 ret = PTR_ERR(ctx->io_wq);
3897 ctx->io_wq = NULL;
2b188cc1
JA
3898 goto err;
3899 }
3900
3901 return 0;
3902err:
54a91f3b 3903 io_finish_async(ctx);
2b188cc1
JA
3904 mmdrop(ctx->sqo_mm);
3905 ctx->sqo_mm = NULL;
3906 return ret;
3907}
3908
3909static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
3910{
3911 atomic_long_sub(nr_pages, &user->locked_vm);
3912}
3913
3914static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
3915{
3916 unsigned long page_limit, cur_pages, new_pages;
3917
3918 /* Don't allow more pages than we can safely lock */
3919 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
3920
3921 do {
3922 cur_pages = atomic_long_read(&user->locked_vm);
3923 new_pages = cur_pages + nr_pages;
3924 if (new_pages > page_limit)
3925 return -ENOMEM;
3926 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
3927 new_pages) != cur_pages);
3928
3929 return 0;
3930}
3931
3932static void io_mem_free(void *ptr)
3933{
52e04ef4
MR
3934 struct page *page;
3935
3936 if (!ptr)
3937 return;
2b188cc1 3938
52e04ef4 3939 page = virt_to_head_page(ptr);
2b188cc1
JA
3940 if (put_page_testzero(page))
3941 free_compound_page(page);
3942}
3943
3944static void *io_mem_alloc(size_t size)
3945{
3946 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
3947 __GFP_NORETRY;
3948
3949 return (void *) __get_free_pages(gfp_flags, get_order(size));
3950}
3951
75b28aff
HV
3952static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
3953 size_t *sq_offset)
3954{
3955 struct io_rings *rings;
3956 size_t off, sq_array_size;
3957
3958 off = struct_size(rings, cqes, cq_entries);
3959 if (off == SIZE_MAX)
3960 return SIZE_MAX;
3961
3962#ifdef CONFIG_SMP
3963 off = ALIGN(off, SMP_CACHE_BYTES);
3964 if (off == 0)
3965 return SIZE_MAX;
3966#endif
3967
3968 sq_array_size = array_size(sizeof(u32), sq_entries);
3969 if (sq_array_size == SIZE_MAX)
3970 return SIZE_MAX;
3971
3972 if (check_add_overflow(off, sq_array_size, &off))
3973 return SIZE_MAX;
3974
3975 if (sq_offset)
3976 *sq_offset = off;
3977
3978 return off;
3979}
3980
2b188cc1
JA
3981static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
3982{
75b28aff 3983 size_t pages;
2b188cc1 3984
75b28aff
HV
3985 pages = (size_t)1 << get_order(
3986 rings_size(sq_entries, cq_entries, NULL));
3987 pages += (size_t)1 << get_order(
3988 array_size(sizeof(struct io_uring_sqe), sq_entries));
2b188cc1 3989
75b28aff 3990 return pages;
2b188cc1
JA
3991}
3992
edafccee
JA
3993static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
3994{
3995 int i, j;
3996
3997 if (!ctx->user_bufs)
3998 return -ENXIO;
3999
4000 for (i = 0; i < ctx->nr_user_bufs; i++) {
4001 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
4002
4003 for (j = 0; j < imu->nr_bvecs; j++)
27c4d3a3 4004 put_user_page(imu->bvec[j].bv_page);
edafccee
JA
4005
4006 if (ctx->account_mem)
4007 io_unaccount_mem(ctx->user, imu->nr_bvecs);
d4ef6475 4008 kvfree(imu->bvec);
edafccee
JA
4009 imu->nr_bvecs = 0;
4010 }
4011
4012 kfree(ctx->user_bufs);
4013 ctx->user_bufs = NULL;
4014 ctx->nr_user_bufs = 0;
4015 return 0;
4016}
4017
4018static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
4019 void __user *arg, unsigned index)
4020{
4021 struct iovec __user *src;
4022
4023#ifdef CONFIG_COMPAT
4024 if (ctx->compat) {
4025 struct compat_iovec __user *ciovs;
4026 struct compat_iovec ciov;
4027
4028 ciovs = (struct compat_iovec __user *) arg;
4029 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
4030 return -EFAULT;
4031
4032 dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
4033 dst->iov_len = ciov.iov_len;
4034 return 0;
4035 }
4036#endif
4037 src = (struct iovec __user *) arg;
4038 if (copy_from_user(dst, &src[index], sizeof(*dst)))
4039 return -EFAULT;
4040 return 0;
4041}
4042
4043static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
4044 unsigned nr_args)
4045{
4046 struct vm_area_struct **vmas = NULL;
4047 struct page **pages = NULL;
4048 int i, j, got_pages = 0;
4049 int ret = -EINVAL;
4050
4051 if (ctx->user_bufs)
4052 return -EBUSY;
4053 if (!nr_args || nr_args > UIO_MAXIOV)
4054 return -EINVAL;
4055
4056 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
4057 GFP_KERNEL);
4058 if (!ctx->user_bufs)
4059 return -ENOMEM;
4060
4061 for (i = 0; i < nr_args; i++) {
4062 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
4063 unsigned long off, start, end, ubuf;
4064 int pret, nr_pages;
4065 struct iovec iov;
4066 size_t size;
4067
4068 ret = io_copy_iov(ctx, &iov, arg, i);
4069 if (ret)
a278682d 4070 goto err;
edafccee
JA
4071
4072 /*
4073 * Don't impose further limits on the size and buffer
4074 * constraints here, we'll -EINVAL later when IO is
4075 * submitted if they are wrong.
4076 */
4077 ret = -EFAULT;
4078 if (!iov.iov_base || !iov.iov_len)
4079 goto err;
4080
4081 /* arbitrary limit, but we need something */
4082 if (iov.iov_len > SZ_1G)
4083 goto err;
4084
4085 ubuf = (unsigned long) iov.iov_base;
4086 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
4087 start = ubuf >> PAGE_SHIFT;
4088 nr_pages = end - start;
4089
4090 if (ctx->account_mem) {
4091 ret = io_account_mem(ctx->user, nr_pages);
4092 if (ret)
4093 goto err;
4094 }
4095
4096 ret = 0;
4097 if (!pages || nr_pages > got_pages) {
4098 kfree(vmas);
4099 kfree(pages);
d4ef6475 4100 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
edafccee 4101 GFP_KERNEL);
d4ef6475 4102 vmas = kvmalloc_array(nr_pages,
edafccee
JA
4103 sizeof(struct vm_area_struct *),
4104 GFP_KERNEL);
4105 if (!pages || !vmas) {
4106 ret = -ENOMEM;
4107 if (ctx->account_mem)
4108 io_unaccount_mem(ctx->user, nr_pages);
4109 goto err;
4110 }
4111 got_pages = nr_pages;
4112 }
4113
d4ef6475 4114 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
edafccee
JA
4115 GFP_KERNEL);
4116 ret = -ENOMEM;
4117 if (!imu->bvec) {
4118 if (ctx->account_mem)
4119 io_unaccount_mem(ctx->user, nr_pages);
4120 goto err;
4121 }
4122
4123 ret = 0;
4124 down_read(&current->mm->mmap_sem);
932f4a63
IW
4125 pret = get_user_pages(ubuf, nr_pages,
4126 FOLL_WRITE | FOLL_LONGTERM,
4127 pages, vmas);
edafccee
JA
4128 if (pret == nr_pages) {
4129 /* don't support file backed memory */
4130 for (j = 0; j < nr_pages; j++) {
4131 struct vm_area_struct *vma = vmas[j];
4132
4133 if (vma->vm_file &&
4134 !is_file_hugepages(vma->vm_file)) {
4135 ret = -EOPNOTSUPP;
4136 break;
4137 }
4138 }
4139 } else {
4140 ret = pret < 0 ? pret : -EFAULT;
4141 }
4142 up_read(&current->mm->mmap_sem);
4143 if (ret) {
4144 /*
4145 * if we did partial map, or found file backed vmas,
4146 * release any pages we did get
4147 */
27c4d3a3
JH
4148 if (pret > 0)
4149 put_user_pages(pages, pret);
edafccee
JA
4150 if (ctx->account_mem)
4151 io_unaccount_mem(ctx->user, nr_pages);
d4ef6475 4152 kvfree(imu->bvec);
edafccee
JA
4153 goto err;
4154 }
4155
4156 off = ubuf & ~PAGE_MASK;
4157 size = iov.iov_len;
4158 for (j = 0; j < nr_pages; j++) {
4159 size_t vec_len;
4160
4161 vec_len = min_t(size_t, size, PAGE_SIZE - off);
4162 imu->bvec[j].bv_page = pages[j];
4163 imu->bvec[j].bv_len = vec_len;
4164 imu->bvec[j].bv_offset = off;
4165 off = 0;
4166 size -= vec_len;
4167 }
4168 /* store original address for later verification */
4169 imu->ubuf = ubuf;
4170 imu->len = iov.iov_len;
4171 imu->nr_bvecs = nr_pages;
4172
4173 ctx->nr_user_bufs++;
4174 }
d4ef6475
MR
4175 kvfree(pages);
4176 kvfree(vmas);
edafccee
JA
4177 return 0;
4178err:
d4ef6475
MR
4179 kvfree(pages);
4180 kvfree(vmas);
edafccee
JA
4181 io_sqe_buffer_unregister(ctx);
4182 return ret;
4183}
4184
9b402849
JA
4185static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
4186{
4187 __s32 __user *fds = arg;
4188 int fd;
4189
4190 if (ctx->cq_ev_fd)
4191 return -EBUSY;
4192
4193 if (copy_from_user(&fd, fds, sizeof(*fds)))
4194 return -EFAULT;
4195
4196 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
4197 if (IS_ERR(ctx->cq_ev_fd)) {
4198 int ret = PTR_ERR(ctx->cq_ev_fd);
4199 ctx->cq_ev_fd = NULL;
4200 return ret;
4201 }
4202
4203 return 0;
4204}
4205
4206static int io_eventfd_unregister(struct io_ring_ctx *ctx)
4207{
4208 if (ctx->cq_ev_fd) {
4209 eventfd_ctx_put(ctx->cq_ev_fd);
4210 ctx->cq_ev_fd = NULL;
4211 return 0;
4212 }
4213
4214 return -ENXIO;
4215}
4216
2b188cc1
JA
4217static void io_ring_ctx_free(struct io_ring_ctx *ctx)
4218{
6b06314c 4219 io_finish_async(ctx);
2b188cc1
JA
4220 if (ctx->sqo_mm)
4221 mmdrop(ctx->sqo_mm);
def596e9
JA
4222
4223 io_iopoll_reap_events(ctx);
edafccee 4224 io_sqe_buffer_unregister(ctx);
6b06314c 4225 io_sqe_files_unregister(ctx);
9b402849 4226 io_eventfd_unregister(ctx);
def596e9 4227
2b188cc1 4228#if defined(CONFIG_UNIX)
355e8d26
EB
4229 if (ctx->ring_sock) {
4230 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 4231 sock_release(ctx->ring_sock);
355e8d26 4232 }
2b188cc1
JA
4233#endif
4234
75b28aff 4235 io_mem_free(ctx->rings);
2b188cc1 4236 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
4237
4238 percpu_ref_exit(&ctx->refs);
4239 if (ctx->account_mem)
4240 io_unaccount_mem(ctx->user,
4241 ring_pages(ctx->sq_entries, ctx->cq_entries));
4242 free_uid(ctx->user);
206aefde 4243 kfree(ctx->completions);
0ddf92e8 4244 kmem_cache_free(req_cachep, ctx->fallback_req);
2b188cc1
JA
4245 kfree(ctx);
4246}
4247
4248static __poll_t io_uring_poll(struct file *file, poll_table *wait)
4249{
4250 struct io_ring_ctx *ctx = file->private_data;
4251 __poll_t mask = 0;
4252
4253 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
4254 /*
4255 * synchronizes with barrier from wq_has_sleeper call in
4256 * io_commit_cqring
4257 */
2b188cc1 4258 smp_rmb();
75b28aff
HV
4259 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
4260 ctx->rings->sq_ring_entries)
2b188cc1 4261 mask |= EPOLLOUT | EPOLLWRNORM;
daa5de54 4262 if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail)
2b188cc1
JA
4263 mask |= EPOLLIN | EPOLLRDNORM;
4264
4265 return mask;
4266}
4267
4268static int io_uring_fasync(int fd, struct file *file, int on)
4269{
4270 struct io_ring_ctx *ctx = file->private_data;
4271
4272 return fasync_helper(fd, file, on, &ctx->cq_fasync);
4273}
4274
4275static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
4276{
4277 mutex_lock(&ctx->uring_lock);
4278 percpu_ref_kill(&ctx->refs);
4279 mutex_unlock(&ctx->uring_lock);
4280
5262f567 4281 io_kill_timeouts(ctx);
221c5eb2 4282 io_poll_remove_all(ctx);
561fb04a
JA
4283
4284 if (ctx->io_wq)
4285 io_wq_cancel_all(ctx->io_wq);
4286
def596e9 4287 io_iopoll_reap_events(ctx);
15dff286
JA
4288 /* if we failed setting up the ctx, we might not have any rings */
4289 if (ctx->rings)
4290 io_cqring_overflow_flush(ctx, true);
206aefde 4291 wait_for_completion(&ctx->completions[0]);
2b188cc1
JA
4292 io_ring_ctx_free(ctx);
4293}
4294
4295static int io_uring_release(struct inode *inode, struct file *file)
4296{
4297 struct io_ring_ctx *ctx = file->private_data;
4298
4299 file->private_data = NULL;
4300 io_ring_ctx_wait_and_kill(ctx);
4301 return 0;
4302}
4303
fcb323cc
JA
4304static void io_uring_cancel_files(struct io_ring_ctx *ctx,
4305 struct files_struct *files)
4306{
4307 struct io_kiocb *req;
4308 DEFINE_WAIT(wait);
4309
4310 while (!list_empty_careful(&ctx->inflight_list)) {
768134d4 4311 struct io_kiocb *cancel_req = NULL;
fcb323cc
JA
4312
4313 spin_lock_irq(&ctx->inflight_lock);
4314 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
768134d4
JA
4315 if (req->work.files != files)
4316 continue;
4317 /* req is being completed, ignore */
4318 if (!refcount_inc_not_zero(&req->refs))
4319 continue;
4320 cancel_req = req;
4321 break;
fcb323cc 4322 }
768134d4 4323 if (cancel_req)
fcb323cc 4324 prepare_to_wait(&ctx->inflight_wait, &wait,
768134d4 4325 TASK_UNINTERRUPTIBLE);
fcb323cc
JA
4326 spin_unlock_irq(&ctx->inflight_lock);
4327
768134d4
JA
4328 /* We need to keep going until we don't find a matching req */
4329 if (!cancel_req)
fcb323cc 4330 break;
2f6d9b9d
BL
4331
4332 io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
4333 io_put_req(cancel_req);
fcb323cc
JA
4334 schedule();
4335 }
768134d4 4336 finish_wait(&ctx->inflight_wait, &wait);
fcb323cc
JA
4337}
4338
4339static int io_uring_flush(struct file *file, void *data)
4340{
4341 struct io_ring_ctx *ctx = file->private_data;
4342
4343 io_uring_cancel_files(ctx, data);
1d7bb1d5
JA
4344 if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
4345 io_cqring_overflow_flush(ctx, true);
fcb323cc 4346 io_wq_cancel_all(ctx->io_wq);
1d7bb1d5 4347 }
fcb323cc
JA
4348 return 0;
4349}
4350
2b188cc1
JA
4351static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
4352{
4353 loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
4354 unsigned long sz = vma->vm_end - vma->vm_start;
4355 struct io_ring_ctx *ctx = file->private_data;
4356 unsigned long pfn;
4357 struct page *page;
4358 void *ptr;
4359
4360 switch (offset) {
4361 case IORING_OFF_SQ_RING:
75b28aff
HV
4362 case IORING_OFF_CQ_RING:
4363 ptr = ctx->rings;
2b188cc1
JA
4364 break;
4365 case IORING_OFF_SQES:
4366 ptr = ctx->sq_sqes;
4367 break;
2b188cc1
JA
4368 default:
4369 return -EINVAL;
4370 }
4371
4372 page = virt_to_head_page(ptr);
a50b854e 4373 if (sz > page_size(page))
2b188cc1
JA
4374 return -EINVAL;
4375
4376 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
4377 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
4378}
4379
4380SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
4381 u32, min_complete, u32, flags, const sigset_t __user *, sig,
4382 size_t, sigsz)
4383{
4384 struct io_ring_ctx *ctx;
4385 long ret = -EBADF;
4386 int submitted = 0;
4387 struct fd f;
4388
6c271ce2 4389 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
2b188cc1
JA
4390 return -EINVAL;
4391
4392 f = fdget(fd);
4393 if (!f.file)
4394 return -EBADF;
4395
4396 ret = -EOPNOTSUPP;
4397 if (f.file->f_op != &io_uring_fops)
4398 goto out_fput;
4399
4400 ret = -ENXIO;
4401 ctx = f.file->private_data;
4402 if (!percpu_ref_tryget(&ctx->refs))
4403 goto out_fput;
4404
6c271ce2
JA
4405 /*
4406 * For SQ polling, the thread will do all submissions and completions.
4407 * Just return the requested submit count, and wake the thread if
4408 * we were asked to.
4409 */
b2a9eada 4410 ret = 0;
6c271ce2 4411 if (ctx->flags & IORING_SETUP_SQPOLL) {
c1edbf5f
JA
4412 if (!list_empty_careful(&ctx->cq_overflow_list))
4413 io_cqring_overflow_flush(ctx, false);
6c271ce2
JA
4414 if (flags & IORING_ENTER_SQ_WAKEUP)
4415 wake_up(&ctx->sqo_wait);
4416 submitted = to_submit;
b2a9eada 4417 } else if (to_submit) {
ae9428ca 4418 struct mm_struct *cur_mm;
2b188cc1 4419
ae9428ca 4420 to_submit = min(to_submit, ctx->sq_entries);
2b188cc1 4421 mutex_lock(&ctx->uring_lock);
ae9428ca
PB
4422 /* already have mm, so io_submit_sqes() won't try to grab it */
4423 cur_mm = ctx->sqo_mm;
4424 submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
4425 &cur_mm, false);
2b188cc1 4426 mutex_unlock(&ctx->uring_lock);
2b188cc1
JA
4427 }
4428 if (flags & IORING_ENTER_GETEVENTS) {
def596e9
JA
4429 unsigned nr_events = 0;
4430
2b188cc1
JA
4431 min_complete = min(min_complete, ctx->cq_entries);
4432
def596e9 4433 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9 4434 ret = io_iopoll_check(ctx, &nr_events, min_complete);
def596e9
JA
4435 } else {
4436 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
4437 }
2b188cc1
JA
4438 }
4439
6805b32e 4440 percpu_ref_put(&ctx->refs);
2b188cc1
JA
4441out_fput:
4442 fdput(f);
4443 return submitted ? submitted : ret;
4444}
4445
4446static const struct file_operations io_uring_fops = {
4447 .release = io_uring_release,
fcb323cc 4448 .flush = io_uring_flush,
2b188cc1
JA
4449 .mmap = io_uring_mmap,
4450 .poll = io_uring_poll,
4451 .fasync = io_uring_fasync,
4452};
4453
4454static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
4455 struct io_uring_params *p)
4456{
75b28aff
HV
4457 struct io_rings *rings;
4458 size_t size, sq_array_offset;
2b188cc1 4459
75b28aff
HV
4460 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
4461 if (size == SIZE_MAX)
4462 return -EOVERFLOW;
4463
4464 rings = io_mem_alloc(size);
4465 if (!rings)
2b188cc1
JA
4466 return -ENOMEM;
4467
75b28aff
HV
4468 ctx->rings = rings;
4469 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
4470 rings->sq_ring_mask = p->sq_entries - 1;
4471 rings->cq_ring_mask = p->cq_entries - 1;
4472 rings->sq_ring_entries = p->sq_entries;
4473 rings->cq_ring_entries = p->cq_entries;
4474 ctx->sq_mask = rings->sq_ring_mask;
4475 ctx->cq_mask = rings->cq_ring_mask;
4476 ctx->sq_entries = rings->sq_ring_entries;
4477 ctx->cq_entries = rings->cq_ring_entries;
2b188cc1
JA
4478
4479 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
4480 if (size == SIZE_MAX)
4481 return -EOVERFLOW;
4482
4483 ctx->sq_sqes = io_mem_alloc(size);
52e04ef4 4484 if (!ctx->sq_sqes)
2b188cc1 4485 return -ENOMEM;
2b188cc1 4486
2b188cc1
JA
4487 return 0;
4488}
4489
4490/*
4491 * Allocate an anonymous fd, this is what constitutes the application
4492 * visible backing of an io_uring instance. The application mmaps this
4493 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
4494 * we have to tie this fd to a socket for file garbage collection purposes.
4495 */
4496static int io_uring_get_fd(struct io_ring_ctx *ctx)
4497{
4498 struct file *file;
4499 int ret;
4500
4501#if defined(CONFIG_UNIX)
4502 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
4503 &ctx->ring_sock);
4504 if (ret)
4505 return ret;
4506#endif
4507
4508 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
4509 if (ret < 0)
4510 goto err;
4511
4512 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
4513 O_RDWR | O_CLOEXEC);
4514 if (IS_ERR(file)) {
4515 put_unused_fd(ret);
4516 ret = PTR_ERR(file);
4517 goto err;
4518 }
4519
4520#if defined(CONFIG_UNIX)
4521 ctx->ring_sock->file = file;
6b06314c 4522 ctx->ring_sock->sk->sk_user_data = ctx;
2b188cc1
JA
4523#endif
4524 fd_install(ret, file);
4525 return ret;
4526err:
4527#if defined(CONFIG_UNIX)
4528 sock_release(ctx->ring_sock);
4529 ctx->ring_sock = NULL;
4530#endif
4531 return ret;
4532}
4533
4534static int io_uring_create(unsigned entries, struct io_uring_params *p)
4535{
4536 struct user_struct *user = NULL;
4537 struct io_ring_ctx *ctx;
4538 bool account_mem;
4539 int ret;
4540
4541 if (!entries || entries > IORING_MAX_ENTRIES)
4542 return -EINVAL;
4543
4544 /*
4545 * Use twice as many entries for the CQ ring. It's possible for the
4546 * application to drive a higher depth than the size of the SQ ring,
4547 * since the sqes are only used at submission time. This allows for
33a107f0
JA
4548 * some flexibility in overcommitting a bit. If the application has
4549 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
4550 * of CQ ring entries manually.
2b188cc1
JA
4551 */
4552 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
4553 if (p->flags & IORING_SETUP_CQSIZE) {
4554 /*
4555 * If IORING_SETUP_CQSIZE is set, we do the same roundup
4556 * to a power-of-two, if it isn't already. We do NOT impose
4557 * any cq vs sq ring sizing.
4558 */
4559 if (p->cq_entries < p->sq_entries || p->cq_entries > IORING_MAX_CQ_ENTRIES)
4560 return -EINVAL;
4561 p->cq_entries = roundup_pow_of_two(p->cq_entries);
4562 } else {
4563 p->cq_entries = 2 * p->sq_entries;
4564 }
2b188cc1
JA
4565
4566 user = get_uid(current_user());
4567 account_mem = !capable(CAP_IPC_LOCK);
4568
4569 if (account_mem) {
4570 ret = io_account_mem(user,
4571 ring_pages(p->sq_entries, p->cq_entries));
4572 if (ret) {
4573 free_uid(user);
4574 return ret;
4575 }
4576 }
4577
4578 ctx = io_ring_ctx_alloc(p);
4579 if (!ctx) {
4580 if (account_mem)
4581 io_unaccount_mem(user, ring_pages(p->sq_entries,
4582 p->cq_entries));
4583 free_uid(user);
4584 return -ENOMEM;
4585 }
4586 ctx->compat = in_compat_syscall();
4587 ctx->account_mem = account_mem;
4588 ctx->user = user;
4589
4590 ret = io_allocate_scq_urings(ctx, p);
4591 if (ret)
4592 goto err;
4593
6c271ce2 4594 ret = io_sq_offload_start(ctx, p);
2b188cc1
JA
4595 if (ret)
4596 goto err;
4597
2b188cc1 4598 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
4599 p->sq_off.head = offsetof(struct io_rings, sq.head);
4600 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
4601 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
4602 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
4603 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
4604 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
4605 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
4606
4607 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
4608 p->cq_off.head = offsetof(struct io_rings, cq.head);
4609 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
4610 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
4611 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
4612 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
4613 p->cq_off.cqes = offsetof(struct io_rings, cqes);
ac90f249 4614
044c1ab3
JA
4615 /*
4616 * Install ring fd as the very last thing, so we don't risk someone
4617 * having closed it before we finish setup
4618 */
4619 ret = io_uring_get_fd(ctx);
4620 if (ret < 0)
4621 goto err;
4622
1d7bb1d5 4623 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP;
c826bd7a 4624 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
4625 return ret;
4626err:
4627 io_ring_ctx_wait_and_kill(ctx);
4628 return ret;
4629}
4630
4631/*
4632 * Sets up an aio uring context, and returns the fd. Applications asks for a
4633 * ring size, we return the actual sq/cq ring sizes (among other things) in the
4634 * params structure passed in.
4635 */
4636static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
4637{
4638 struct io_uring_params p;
4639 long ret;
4640 int i;
4641
4642 if (copy_from_user(&p, params, sizeof(p)))
4643 return -EFAULT;
4644 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
4645 if (p.resv[i])
4646 return -EINVAL;
4647 }
4648
6c271ce2 4649 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
33a107f0 4650 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE))
2b188cc1
JA
4651 return -EINVAL;
4652
4653 ret = io_uring_create(entries, &p);
4654 if (ret < 0)
4655 return ret;
4656
4657 if (copy_to_user(params, &p, sizeof(p)))
4658 return -EFAULT;
4659
4660 return ret;
4661}
4662
4663SYSCALL_DEFINE2(io_uring_setup, u32, entries,
4664 struct io_uring_params __user *, params)
4665{
4666 return io_uring_setup(entries, params);
4667}
4668
edafccee
JA
4669static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
4670 void __user *arg, unsigned nr_args)
b19062a5
JA
4671 __releases(ctx->uring_lock)
4672 __acquires(ctx->uring_lock)
edafccee
JA
4673{
4674 int ret;
4675
35fa71a0
JA
4676 /*
4677 * We're inside the ring mutex, if the ref is already dying, then
4678 * someone else killed the ctx or is already going through
4679 * io_uring_register().
4680 */
4681 if (percpu_ref_is_dying(&ctx->refs))
4682 return -ENXIO;
4683
edafccee 4684 percpu_ref_kill(&ctx->refs);
b19062a5
JA
4685
4686 /*
4687 * Drop uring mutex before waiting for references to exit. If another
4688 * thread is currently inside io_uring_enter() it might need to grab
4689 * the uring_lock to make progress. If we hold it here across the drain
4690 * wait, then we can deadlock. It's safe to drop the mutex here, since
4691 * no new references will come in after we've killed the percpu ref.
4692 */
4693 mutex_unlock(&ctx->uring_lock);
206aefde 4694 wait_for_completion(&ctx->completions[0]);
b19062a5 4695 mutex_lock(&ctx->uring_lock);
edafccee
JA
4696
4697 switch (opcode) {
4698 case IORING_REGISTER_BUFFERS:
4699 ret = io_sqe_buffer_register(ctx, arg, nr_args);
4700 break;
4701 case IORING_UNREGISTER_BUFFERS:
4702 ret = -EINVAL;
4703 if (arg || nr_args)
4704 break;
4705 ret = io_sqe_buffer_unregister(ctx);
4706 break;
6b06314c
JA
4707 case IORING_REGISTER_FILES:
4708 ret = io_sqe_files_register(ctx, arg, nr_args);
4709 break;
4710 case IORING_UNREGISTER_FILES:
4711 ret = -EINVAL;
4712 if (arg || nr_args)
4713 break;
4714 ret = io_sqe_files_unregister(ctx);
4715 break;
c3a31e60
JA
4716 case IORING_REGISTER_FILES_UPDATE:
4717 ret = io_sqe_files_update(ctx, arg, nr_args);
4718 break;
9b402849
JA
4719 case IORING_REGISTER_EVENTFD:
4720 ret = -EINVAL;
4721 if (nr_args != 1)
4722 break;
4723 ret = io_eventfd_register(ctx, arg);
4724 break;
4725 case IORING_UNREGISTER_EVENTFD:
4726 ret = -EINVAL;
4727 if (arg || nr_args)
4728 break;
4729 ret = io_eventfd_unregister(ctx);
4730 break;
edafccee
JA
4731 default:
4732 ret = -EINVAL;
4733 break;
4734 }
4735
4736 /* bring the ctx back to life */
206aefde 4737 reinit_completion(&ctx->completions[0]);
edafccee
JA
4738 percpu_ref_reinit(&ctx->refs);
4739 return ret;
4740}
4741
4742SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
4743 void __user *, arg, unsigned int, nr_args)
4744{
4745 struct io_ring_ctx *ctx;
4746 long ret = -EBADF;
4747 struct fd f;
4748
4749 f = fdget(fd);
4750 if (!f.file)
4751 return -EBADF;
4752
4753 ret = -EOPNOTSUPP;
4754 if (f.file->f_op != &io_uring_fops)
4755 goto out_fput;
4756
4757 ctx = f.file->private_data;
4758
4759 mutex_lock(&ctx->uring_lock);
4760 ret = __io_uring_register(ctx, opcode, arg, nr_args);
4761 mutex_unlock(&ctx->uring_lock);
c826bd7a
DD
4762 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
4763 ctx->cq_ev_fd != NULL, ret);
edafccee
JA
4764out_fput:
4765 fdput(f);
4766 return ret;
4767}
4768
2b188cc1
JA
4769static int __init io_uring_init(void)
4770{
4771 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
4772 return 0;
4773};
4774__initcall(io_uring_init);