Merge tag 'v5.6-rc1' into arm/fixes
[linux-2.6-block.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
47#include <linux/refcount.h>
48#include <linux/uio.h>
6b47ee6e 49#include <linux/bits.h>
2b188cc1
JA
50
51#include <linux/sched/signal.h>
52#include <linux/fs.h>
53#include <linux/file.h>
54#include <linux/fdtable.h>
55#include <linux/mm.h>
56#include <linux/mman.h>
57#include <linux/mmu_context.h>
58#include <linux/percpu.h>
59#include <linux/slab.h>
6c271ce2 60#include <linux/kthread.h>
2b188cc1 61#include <linux/blkdev.h>
edafccee 62#include <linux/bvec.h>
2b188cc1
JA
63#include <linux/net.h>
64#include <net/sock.h>
65#include <net/af_unix.h>
6b06314c 66#include <net/scm.h>
2b188cc1
JA
67#include <linux/anon_inodes.h>
68#include <linux/sched/mm.h>
69#include <linux/uaccess.h>
70#include <linux/nospec.h>
edafccee
JA
71#include <linux/sizes.h>
72#include <linux/hugetlb.h>
aa4c3967 73#include <linux/highmem.h>
15b71abe
JA
74#include <linux/namei.h>
75#include <linux/fsnotify.h>
4840e418 76#include <linux/fadvise.h>
3e4827b0 77#include <linux/eventpoll.h>
2b188cc1 78
c826bd7a
DD
79#define CREATE_TRACE_POINTS
80#include <trace/events/io_uring.h>
81
2b188cc1
JA
82#include <uapi/linux/io_uring.h>
83
84#include "internal.h"
561fb04a 85#include "io-wq.h"
2b188cc1 86
5277deaa 87#define IORING_MAX_ENTRIES 32768
33a107f0 88#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
65e19f54
JA
89
90/*
91 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
92 */
93#define IORING_FILE_TABLE_SHIFT 9
94#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
95#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
96#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
2b188cc1
JA
97
98struct io_uring {
99 u32 head ____cacheline_aligned_in_smp;
100 u32 tail ____cacheline_aligned_in_smp;
101};
102
1e84b97b 103/*
75b28aff
HV
104 * This data is shared with the application through the mmap at offsets
105 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
106 *
107 * The offsets to the member fields are published through struct
108 * io_sqring_offsets when calling io_uring_setup.
109 */
75b28aff 110struct io_rings {
1e84b97b
SB
111 /*
112 * Head and tail offsets into the ring; the offsets need to be
113 * masked to get valid indices.
114 *
75b28aff
HV
115 * The kernel controls head of the sq ring and the tail of the cq ring,
116 * and the application controls tail of the sq ring and the head of the
117 * cq ring.
1e84b97b 118 */
75b28aff 119 struct io_uring sq, cq;
1e84b97b 120 /*
75b28aff 121 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
122 * ring_entries - 1)
123 */
75b28aff
HV
124 u32 sq_ring_mask, cq_ring_mask;
125 /* Ring sizes (constant, power of 2) */
126 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
127 /*
128 * Number of invalid entries dropped by the kernel due to
129 * invalid index stored in array
130 *
131 * Written by the kernel, shouldn't be modified by the
132 * application (i.e. get number of "new events" by comparing to
133 * cached value).
134 *
135 * After a new SQ head value was read by the application this
136 * counter includes all submissions that were dropped reaching
137 * the new SQ head (and possibly more).
138 */
75b28aff 139 u32 sq_dropped;
1e84b97b
SB
140 /*
141 * Runtime flags
142 *
143 * Written by the kernel, shouldn't be modified by the
144 * application.
145 *
146 * The application needs a full memory barrier before checking
147 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
148 */
75b28aff 149 u32 sq_flags;
1e84b97b
SB
150 /*
151 * Number of completion events lost because the queue was full;
152 * this should be avoided by the application by making sure
0b4295b5 153 * there are not more requests pending than there is space in
1e84b97b
SB
154 * the completion queue.
155 *
156 * Written by the kernel, shouldn't be modified by the
157 * application (i.e. get number of "new events" by comparing to
158 * cached value).
159 *
160 * As completion events come in out of order this counter is not
161 * ordered with any other data.
162 */
75b28aff 163 u32 cq_overflow;
1e84b97b
SB
164 /*
165 * Ring buffer of completion events.
166 *
167 * The kernel writes completion events fresh every time they are
168 * produced, so the application is allowed to modify pending
169 * entries.
170 */
75b28aff 171 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
172};
173
edafccee
JA
174struct io_mapped_ubuf {
175 u64 ubuf;
176 size_t len;
177 struct bio_vec *bvec;
178 unsigned int nr_bvecs;
179};
180
65e19f54
JA
181struct fixed_file_table {
182 struct file **files;
31b51510
JA
183};
184
05f3fb3c
JA
185enum {
186 FFD_F_ATOMIC,
187};
188
189struct fixed_file_data {
190 struct fixed_file_table *table;
191 struct io_ring_ctx *ctx;
192
193 struct percpu_ref refs;
194 struct llist_head put_llist;
195 unsigned long state;
196 struct work_struct ref_work;
197 struct completion done;
198};
199
2b188cc1
JA
200struct io_ring_ctx {
201 struct {
202 struct percpu_ref refs;
203 } ____cacheline_aligned_in_smp;
204
205 struct {
206 unsigned int flags;
69b3e546
JA
207 int compat: 1;
208 int account_mem: 1;
209 int cq_overflow_flushed: 1;
210 int drain_next: 1;
f2842ab5 211 int eventfd_async: 1;
2b188cc1 212
75b28aff
HV
213 /*
214 * Ring buffer of indices into array of io_uring_sqe, which is
215 * mmapped by the application using the IORING_OFF_SQES offset.
216 *
217 * This indirection could e.g. be used to assign fixed
218 * io_uring_sqe entries to operations and only submit them to
219 * the queue when needed.
220 *
221 * The kernel modifies neither the indices array nor the entries
222 * array.
223 */
224 u32 *sq_array;
2b188cc1
JA
225 unsigned cached_sq_head;
226 unsigned sq_entries;
227 unsigned sq_mask;
6c271ce2 228 unsigned sq_thread_idle;
498ccd9e 229 unsigned cached_sq_dropped;
206aefde 230 atomic_t cached_cq_overflow;
ad3eb2c8 231 unsigned long sq_check_overflow;
de0617e4
JA
232
233 struct list_head defer_list;
5262f567 234 struct list_head timeout_list;
1d7bb1d5 235 struct list_head cq_overflow_list;
fcb323cc
JA
236
237 wait_queue_head_t inflight_wait;
ad3eb2c8 238 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
239 } ____cacheline_aligned_in_smp;
240
206aefde
JA
241 struct io_rings *rings;
242
2b188cc1 243 /* IO offload */
561fb04a 244 struct io_wq *io_wq;
6c271ce2 245 struct task_struct *sqo_thread; /* if using sq thread polling */
2b188cc1 246 struct mm_struct *sqo_mm;
6c271ce2 247 wait_queue_head_t sqo_wait;
75b28aff 248
6b06314c
JA
249 /*
250 * If used, fixed file set. Writers must ensure that ->refs is dead,
251 * readers must ensure that ->refs is alive as long as the file* is
252 * used. Only updated through io_uring_register(2).
253 */
05f3fb3c 254 struct fixed_file_data *file_data;
6b06314c 255 unsigned nr_user_files;
b14cca0c
PB
256 int ring_fd;
257 struct file *ring_file;
6b06314c 258
edafccee
JA
259 /* if used, fixed mapped user buffers */
260 unsigned nr_user_bufs;
261 struct io_mapped_ubuf *user_bufs;
262
2b188cc1
JA
263 struct user_struct *user;
264
0b8c0ec7 265 const struct cred *creds;
181e448d 266
206aefde
JA
267 /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */
268 struct completion *completions;
269
0ddf92e8
JA
270 /* if all else fails... */
271 struct io_kiocb *fallback_req;
272
206aefde
JA
273#if defined(CONFIG_UNIX)
274 struct socket *ring_sock;
275#endif
276
071698e1
JA
277 struct idr personality_idr;
278
206aefde
JA
279 struct {
280 unsigned cached_cq_tail;
281 unsigned cq_entries;
282 unsigned cq_mask;
283 atomic_t cq_timeouts;
ad3eb2c8 284 unsigned long cq_check_overflow;
206aefde
JA
285 struct wait_queue_head cq_wait;
286 struct fasync_struct *cq_fasync;
287 struct eventfd_ctx *cq_ev_fd;
288 } ____cacheline_aligned_in_smp;
2b188cc1
JA
289
290 struct {
291 struct mutex uring_lock;
292 wait_queue_head_t wait;
293 } ____cacheline_aligned_in_smp;
294
295 struct {
296 spinlock_t completion_lock;
e94f141b
JA
297 struct llist_head poll_llist;
298
def596e9
JA
299 /*
300 * ->poll_list is protected by the ctx->uring_lock for
301 * io_uring instances that don't use IORING_SETUP_SQPOLL.
302 * For SQPOLL, only the single threaded io_sq_thread() will
303 * manipulate the list, hence no extra locking is needed there.
304 */
305 struct list_head poll_list;
78076bb6
JA
306 struct hlist_head *cancel_hash;
307 unsigned cancel_hash_bits;
e94f141b 308 bool poll_multi_file;
31b51510 309
fcb323cc
JA
310 spinlock_t inflight_lock;
311 struct list_head inflight_list;
2b188cc1 312 } ____cacheline_aligned_in_smp;
2b188cc1
JA
313};
314
09bb8394
JA
315/*
316 * First field must be the file pointer in all the
317 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
318 */
221c5eb2
JA
319struct io_poll_iocb {
320 struct file *file;
0969e783
JA
321 union {
322 struct wait_queue_head *head;
323 u64 addr;
324 };
221c5eb2 325 __poll_t events;
8c838788 326 bool done;
221c5eb2 327 bool canceled;
392edb45 328 struct wait_queue_entry wait;
221c5eb2
JA
329};
330
b5dba59e
JA
331struct io_close {
332 struct file *file;
333 struct file *put_file;
334 int fd;
335};
336
ad8a48ac
JA
337struct io_timeout_data {
338 struct io_kiocb *req;
339 struct hrtimer timer;
340 struct timespec64 ts;
341 enum hrtimer_mode mode;
cc42e0ac 342 u32 seq_offset;
ad8a48ac
JA
343};
344
8ed8d3c3
JA
345struct io_accept {
346 struct file *file;
347 struct sockaddr __user *addr;
348 int __user *addr_len;
349 int flags;
350};
351
352struct io_sync {
353 struct file *file;
354 loff_t len;
355 loff_t off;
356 int flags;
d63d1b5e 357 int mode;
8ed8d3c3
JA
358};
359
fbf23849
JA
360struct io_cancel {
361 struct file *file;
362 u64 addr;
363};
364
b29472ee
JA
365struct io_timeout {
366 struct file *file;
367 u64 addr;
368 int flags;
26a61679 369 unsigned count;
b29472ee
JA
370};
371
9adbd45d
JA
372struct io_rw {
373 /* NOTE: kiocb has the file as the first member, so don't do it here */
374 struct kiocb kiocb;
375 u64 addr;
376 u64 len;
377};
378
3fbb51c1
JA
379struct io_connect {
380 struct file *file;
381 struct sockaddr __user *addr;
382 int addr_len;
383};
384
e47293fd
JA
385struct io_sr_msg {
386 struct file *file;
fddaface
JA
387 union {
388 struct user_msghdr __user *msg;
389 void __user *buf;
390 };
e47293fd 391 int msg_flags;
fddaface 392 size_t len;
e47293fd
JA
393};
394
15b71abe
JA
395struct io_open {
396 struct file *file;
397 int dfd;
eddc7ef5 398 union {
eddc7ef5
JA
399 unsigned mask;
400 };
15b71abe 401 struct filename *filename;
eddc7ef5 402 struct statx __user *buffer;
c12cedf2 403 struct open_how how;
15b71abe
JA
404};
405
05f3fb3c
JA
406struct io_files_update {
407 struct file *file;
408 u64 arg;
409 u32 nr_args;
410 u32 offset;
411};
412
4840e418
JA
413struct io_fadvise {
414 struct file *file;
415 u64 offset;
416 u32 len;
417 u32 advice;
418};
419
c1ca757b
JA
420struct io_madvise {
421 struct file *file;
422 u64 addr;
423 u32 len;
424 u32 advice;
425};
426
3e4827b0
JA
427struct io_epoll {
428 struct file *file;
429 int epfd;
430 int op;
431 int fd;
432 struct epoll_event event;
e47293fd
JA
433};
434
f499a021
JA
435struct io_async_connect {
436 struct sockaddr_storage address;
437};
438
03b1230c
JA
439struct io_async_msghdr {
440 struct iovec fast_iov[UIO_FASTIOV];
441 struct iovec *iov;
442 struct sockaddr __user *uaddr;
443 struct msghdr msg;
444};
445
f67676d1
JA
446struct io_async_rw {
447 struct iovec fast_iov[UIO_FASTIOV];
448 struct iovec *iov;
449 ssize_t nr_segs;
450 ssize_t size;
451};
452
15b71abe
JA
453struct io_async_open {
454 struct filename *filename;
455};
456
1a6b74fc 457struct io_async_ctx {
f67676d1
JA
458 union {
459 struct io_async_rw rw;
03b1230c 460 struct io_async_msghdr msg;
f499a021 461 struct io_async_connect connect;
2d28390a 462 struct io_timeout_data timeout;
15b71abe 463 struct io_async_open open;
f67676d1 464 };
1a6b74fc
JA
465};
466
6b47ee6e
PB
467enum {
468 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
469 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
470 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
471 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
472 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
473
474 REQ_F_LINK_NEXT_BIT,
475 REQ_F_FAIL_LINK_BIT,
476 REQ_F_INFLIGHT_BIT,
477 REQ_F_CUR_POS_BIT,
478 REQ_F_NOWAIT_BIT,
479 REQ_F_IOPOLL_COMPLETED_BIT,
480 REQ_F_LINK_TIMEOUT_BIT,
481 REQ_F_TIMEOUT_BIT,
482 REQ_F_ISREG_BIT,
483 REQ_F_MUST_PUNT_BIT,
484 REQ_F_TIMEOUT_NOSEQ_BIT,
485 REQ_F_COMP_LOCKED_BIT,
486};
487
488enum {
489 /* ctx owns file */
490 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
491 /* drain existing IO first */
492 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
493 /* linked sqes */
494 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
495 /* doesn't sever on completion < 0 */
496 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
497 /* IOSQE_ASYNC */
498 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
499
500 /* already grabbed next link */
501 REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT),
502 /* fail rest of links */
503 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
504 /* on inflight list */
505 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
506 /* read/write uses file position */
507 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
508 /* must not punt to workers */
509 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
510 /* polled IO has completed */
511 REQ_F_IOPOLL_COMPLETED = BIT(REQ_F_IOPOLL_COMPLETED_BIT),
512 /* has linked timeout */
513 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
514 /* timeout request */
515 REQ_F_TIMEOUT = BIT(REQ_F_TIMEOUT_BIT),
516 /* regular file */
517 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
518 /* must be punted even for NONBLOCK */
519 REQ_F_MUST_PUNT = BIT(REQ_F_MUST_PUNT_BIT),
520 /* no timeout sequence */
521 REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
522 /* completion under lock */
523 REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
524};
525
09bb8394
JA
526/*
527 * NOTE! Each of the iocb union members has the file pointer
528 * as the first entry in their struct definition. So you can
529 * access the file pointer through any of the sub-structs,
530 * or directly as just 'ki_filp' in this struct.
531 */
2b188cc1 532struct io_kiocb {
221c5eb2 533 union {
09bb8394 534 struct file *file;
9adbd45d 535 struct io_rw rw;
221c5eb2 536 struct io_poll_iocb poll;
8ed8d3c3
JA
537 struct io_accept accept;
538 struct io_sync sync;
fbf23849 539 struct io_cancel cancel;
b29472ee 540 struct io_timeout timeout;
3fbb51c1 541 struct io_connect connect;
e47293fd 542 struct io_sr_msg sr_msg;
15b71abe 543 struct io_open open;
b5dba59e 544 struct io_close close;
05f3fb3c 545 struct io_files_update files_update;
4840e418 546 struct io_fadvise fadvise;
c1ca757b 547 struct io_madvise madvise;
3e4827b0 548 struct io_epoll epoll;
221c5eb2 549 };
2b188cc1 550
1a6b74fc 551 struct io_async_ctx *io;
b14cca0c
PB
552 /*
553 * llist_node is only used for poll deferred completions
554 */
555 struct llist_node llist_node;
cf6fd4bd
PB
556 bool has_user;
557 bool in_async;
558 bool needs_fixed_file;
d625c6ee 559 u8 opcode;
2b188cc1
JA
560
561 struct io_ring_ctx *ctx;
eac406c6
JA
562 union {
563 struct list_head list;
78076bb6 564 struct hlist_node hash_node;
eac406c6 565 };
9e645e11 566 struct list_head link_list;
2b188cc1 567 unsigned int flags;
c16361c1 568 refcount_t refs;
2b188cc1 569 u64 user_data;
9e645e11 570 u32 result;
de0617e4 571 u32 sequence;
2b188cc1 572
fcb323cc
JA
573 struct list_head inflight_entry;
574
561fb04a 575 struct io_wq_work work;
2b188cc1
JA
576};
577
578#define IO_PLUG_THRESHOLD 2
def596e9 579#define IO_IOPOLL_BATCH 8
2b188cc1 580
9a56a232
JA
581struct io_submit_state {
582 struct blk_plug plug;
583
2579f913
JA
584 /*
585 * io_kiocb alloc cache
586 */
587 void *reqs[IO_IOPOLL_BATCH];
6c8a3134 588 unsigned int free_reqs;
2579f913 589
9a56a232
JA
590 /*
591 * File reference cache
592 */
593 struct file *file;
594 unsigned int fd;
595 unsigned int has_refs;
596 unsigned int used_refs;
597 unsigned int ios_left;
598};
599
d3656344
JA
600struct io_op_def {
601 /* needs req->io allocated for deferral/async */
602 unsigned async_ctx : 1;
603 /* needs current->mm setup, does mm access */
604 unsigned needs_mm : 1;
605 /* needs req->file assigned */
606 unsigned needs_file : 1;
607 /* needs req->file assigned IFF fd is >= 0 */
608 unsigned fd_non_neg : 1;
609 /* hash wq insertion if file is a regular file */
610 unsigned hash_reg_file : 1;
611 /* unbound wq insertion if file is a non-regular file */
612 unsigned unbound_nonreg_file : 1;
66f4af93
JA
613 /* opcode is not supported by this kernel */
614 unsigned not_supported : 1;
f86cd20c
JA
615 /* needs file table */
616 unsigned file_table : 1;
d3656344
JA
617};
618
619static const struct io_op_def io_op_defs[] = {
0463b6c5
PB
620 [IORING_OP_NOP] = {},
621 [IORING_OP_READV] = {
d3656344
JA
622 .async_ctx = 1,
623 .needs_mm = 1,
624 .needs_file = 1,
625 .unbound_nonreg_file = 1,
626 },
0463b6c5 627 [IORING_OP_WRITEV] = {
d3656344
JA
628 .async_ctx = 1,
629 .needs_mm = 1,
630 .needs_file = 1,
631 .hash_reg_file = 1,
632 .unbound_nonreg_file = 1,
633 },
0463b6c5 634 [IORING_OP_FSYNC] = {
d3656344
JA
635 .needs_file = 1,
636 },
0463b6c5 637 [IORING_OP_READ_FIXED] = {
d3656344
JA
638 .needs_file = 1,
639 .unbound_nonreg_file = 1,
640 },
0463b6c5 641 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
642 .needs_file = 1,
643 .hash_reg_file = 1,
644 .unbound_nonreg_file = 1,
645 },
0463b6c5 646 [IORING_OP_POLL_ADD] = {
d3656344
JA
647 .needs_file = 1,
648 .unbound_nonreg_file = 1,
649 },
0463b6c5
PB
650 [IORING_OP_POLL_REMOVE] = {},
651 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344
JA
652 .needs_file = 1,
653 },
0463b6c5 654 [IORING_OP_SENDMSG] = {
d3656344
JA
655 .async_ctx = 1,
656 .needs_mm = 1,
657 .needs_file = 1,
658 .unbound_nonreg_file = 1,
659 },
0463b6c5 660 [IORING_OP_RECVMSG] = {
d3656344
JA
661 .async_ctx = 1,
662 .needs_mm = 1,
663 .needs_file = 1,
664 .unbound_nonreg_file = 1,
665 },
0463b6c5 666 [IORING_OP_TIMEOUT] = {
d3656344
JA
667 .async_ctx = 1,
668 .needs_mm = 1,
669 },
0463b6c5
PB
670 [IORING_OP_TIMEOUT_REMOVE] = {},
671 [IORING_OP_ACCEPT] = {
d3656344
JA
672 .needs_mm = 1,
673 .needs_file = 1,
674 .unbound_nonreg_file = 1,
f86cd20c 675 .file_table = 1,
d3656344 676 },
0463b6c5
PB
677 [IORING_OP_ASYNC_CANCEL] = {},
678 [IORING_OP_LINK_TIMEOUT] = {
d3656344
JA
679 .async_ctx = 1,
680 .needs_mm = 1,
681 },
0463b6c5 682 [IORING_OP_CONNECT] = {
d3656344
JA
683 .async_ctx = 1,
684 .needs_mm = 1,
685 .needs_file = 1,
686 .unbound_nonreg_file = 1,
687 },
0463b6c5 688 [IORING_OP_FALLOCATE] = {
d3656344
JA
689 .needs_file = 1,
690 },
0463b6c5 691 [IORING_OP_OPENAT] = {
d3656344
JA
692 .needs_file = 1,
693 .fd_non_neg = 1,
f86cd20c 694 .file_table = 1,
d3656344 695 },
0463b6c5 696 [IORING_OP_CLOSE] = {
d3656344 697 .needs_file = 1,
f86cd20c 698 .file_table = 1,
d3656344 699 },
0463b6c5 700 [IORING_OP_FILES_UPDATE] = {
d3656344 701 .needs_mm = 1,
f86cd20c 702 .file_table = 1,
d3656344 703 },
0463b6c5 704 [IORING_OP_STATX] = {
d3656344
JA
705 .needs_mm = 1,
706 .needs_file = 1,
707 .fd_non_neg = 1,
708 },
0463b6c5 709 [IORING_OP_READ] = {
3a6820f2
JA
710 .needs_mm = 1,
711 .needs_file = 1,
712 .unbound_nonreg_file = 1,
713 },
0463b6c5 714 [IORING_OP_WRITE] = {
3a6820f2
JA
715 .needs_mm = 1,
716 .needs_file = 1,
717 .unbound_nonreg_file = 1,
718 },
0463b6c5 719 [IORING_OP_FADVISE] = {
4840e418
JA
720 .needs_file = 1,
721 },
0463b6c5 722 [IORING_OP_MADVISE] = {
c1ca757b
JA
723 .needs_mm = 1,
724 },
0463b6c5 725 [IORING_OP_SEND] = {
fddaface
JA
726 .needs_mm = 1,
727 .needs_file = 1,
728 .unbound_nonreg_file = 1,
729 },
0463b6c5 730 [IORING_OP_RECV] = {
fddaface
JA
731 .needs_mm = 1,
732 .needs_file = 1,
733 .unbound_nonreg_file = 1,
734 },
0463b6c5 735 [IORING_OP_OPENAT2] = {
cebdb986
JA
736 .needs_file = 1,
737 .fd_non_neg = 1,
f86cd20c 738 .file_table = 1,
cebdb986 739 },
3e4827b0
JA
740 [IORING_OP_EPOLL_CTL] = {
741 .unbound_nonreg_file = 1,
742 .file_table = 1,
743 },
d3656344
JA
744};
745
561fb04a 746static void io_wq_submit_work(struct io_wq_work **workptr);
78e19bbe 747static void io_cqring_fill_event(struct io_kiocb *req, long res);
ec9c02ad 748static void io_put_req(struct io_kiocb *req);
978db57e 749static void __io_double_put_req(struct io_kiocb *req);
94ae5e77
JA
750static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
751static void io_queue_linked_timeout(struct io_kiocb *req);
05f3fb3c
JA
752static int __io_sqe_files_update(struct io_ring_ctx *ctx,
753 struct io_uring_files_update *ip,
754 unsigned nr_args);
f86cd20c 755static int io_grab_files(struct io_kiocb *req);
2faf852d 756static void io_ring_file_ref_flush(struct fixed_file_data *data);
de0617e4 757
2b188cc1
JA
758static struct kmem_cache *req_cachep;
759
760static const struct file_operations io_uring_fops;
761
762struct sock *io_uring_get_socket(struct file *file)
763{
764#if defined(CONFIG_UNIX)
765 if (file->f_op == &io_uring_fops) {
766 struct io_ring_ctx *ctx = file->private_data;
767
768 return ctx->ring_sock->sk;
769 }
770#endif
771 return NULL;
772}
773EXPORT_SYMBOL(io_uring_get_socket);
774
775static void io_ring_ctx_ref_free(struct percpu_ref *ref)
776{
777 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
778
206aefde 779 complete(&ctx->completions[0]);
2b188cc1
JA
780}
781
782static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
783{
784 struct io_ring_ctx *ctx;
78076bb6 785 int hash_bits;
2b188cc1
JA
786
787 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
788 if (!ctx)
789 return NULL;
790
0ddf92e8
JA
791 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
792 if (!ctx->fallback_req)
793 goto err;
794
206aefde
JA
795 ctx->completions = kmalloc(2 * sizeof(struct completion), GFP_KERNEL);
796 if (!ctx->completions)
797 goto err;
798
78076bb6
JA
799 /*
800 * Use 5 bits less than the max cq entries, that should give us around
801 * 32 entries per hash list if totally full and uniformly spread.
802 */
803 hash_bits = ilog2(p->cq_entries);
804 hash_bits -= 5;
805 if (hash_bits <= 0)
806 hash_bits = 1;
807 ctx->cancel_hash_bits = hash_bits;
808 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
809 GFP_KERNEL);
810 if (!ctx->cancel_hash)
811 goto err;
812 __hash_init(ctx->cancel_hash, 1U << hash_bits);
813
21482896 814 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
815 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
816 goto err;
2b188cc1
JA
817
818 ctx->flags = p->flags;
819 init_waitqueue_head(&ctx->cq_wait);
1d7bb1d5 820 INIT_LIST_HEAD(&ctx->cq_overflow_list);
206aefde
JA
821 init_completion(&ctx->completions[0]);
822 init_completion(&ctx->completions[1]);
071698e1 823 idr_init(&ctx->personality_idr);
2b188cc1
JA
824 mutex_init(&ctx->uring_lock);
825 init_waitqueue_head(&ctx->wait);
826 spin_lock_init(&ctx->completion_lock);
e94f141b 827 init_llist_head(&ctx->poll_llist);
def596e9 828 INIT_LIST_HEAD(&ctx->poll_list);
de0617e4 829 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 830 INIT_LIST_HEAD(&ctx->timeout_list);
fcb323cc
JA
831 init_waitqueue_head(&ctx->inflight_wait);
832 spin_lock_init(&ctx->inflight_lock);
833 INIT_LIST_HEAD(&ctx->inflight_list);
2b188cc1 834 return ctx;
206aefde 835err:
0ddf92e8
JA
836 if (ctx->fallback_req)
837 kmem_cache_free(req_cachep, ctx->fallback_req);
206aefde 838 kfree(ctx->completions);
78076bb6 839 kfree(ctx->cancel_hash);
206aefde
JA
840 kfree(ctx);
841 return NULL;
2b188cc1
JA
842}
843
9d858b21 844static inline bool __req_need_defer(struct io_kiocb *req)
7adf4eaf 845{
a197f664
JL
846 struct io_ring_ctx *ctx = req->ctx;
847
498ccd9e
JA
848 return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
849 + atomic_read(&ctx->cached_cq_overflow);
7adf4eaf
JA
850}
851
9d858b21 852static inline bool req_need_defer(struct io_kiocb *req)
de0617e4 853{
87987898 854 if (unlikely(req->flags & REQ_F_IO_DRAIN))
9d858b21 855 return __req_need_defer(req);
de0617e4 856
9d858b21 857 return false;
de0617e4
JA
858}
859
7adf4eaf 860static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
de0617e4
JA
861{
862 struct io_kiocb *req;
863
7adf4eaf 864 req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
9d858b21 865 if (req && !req_need_defer(req)) {
de0617e4
JA
866 list_del_init(&req->list);
867 return req;
868 }
869
870 return NULL;
871}
872
5262f567
JA
873static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
874{
7adf4eaf
JA
875 struct io_kiocb *req;
876
877 req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
93bd25bb
JA
878 if (req) {
879 if (req->flags & REQ_F_TIMEOUT_NOSEQ)
880 return NULL;
fb4b3d3f 881 if (!__req_need_defer(req)) {
93bd25bb
JA
882 list_del_init(&req->list);
883 return req;
884 }
7adf4eaf
JA
885 }
886
887 return NULL;
5262f567
JA
888}
889
de0617e4 890static void __io_commit_cqring(struct io_ring_ctx *ctx)
2b188cc1 891{
75b28aff 892 struct io_rings *rings = ctx->rings;
2b188cc1 893
07910158
PB
894 /* order cqe stores with ring update */
895 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
2b188cc1 896
07910158
PB
897 if (wq_has_sleeper(&ctx->cq_wait)) {
898 wake_up_interruptible(&ctx->cq_wait);
899 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
2b188cc1
JA
900 }
901}
902
cccf0ee8
JA
903static inline void io_req_work_grab_env(struct io_kiocb *req,
904 const struct io_op_def *def)
905{
906 if (!req->work.mm && def->needs_mm) {
907 mmgrab(current->mm);
908 req->work.mm = current->mm;
2b188cc1 909 }
cccf0ee8
JA
910 if (!req->work.creds)
911 req->work.creds = get_current_cred();
2b188cc1
JA
912}
913
cccf0ee8 914static inline void io_req_work_drop_env(struct io_kiocb *req)
18d9be1a 915{
cccf0ee8
JA
916 if (req->work.mm) {
917 mmdrop(req->work.mm);
918 req->work.mm = NULL;
919 }
920 if (req->work.creds) {
921 put_cred(req->work.creds);
922 req->work.creds = NULL;
923 }
561fb04a
JA
924}
925
94ae5e77
JA
926static inline bool io_prep_async_work(struct io_kiocb *req,
927 struct io_kiocb **link)
18d9be1a 928{
d3656344 929 const struct io_op_def *def = &io_op_defs[req->opcode];
561fb04a 930 bool do_hashed = false;
54a91f3b 931
d3656344
JA
932 if (req->flags & REQ_F_ISREG) {
933 if (def->hash_reg_file)
3529d8c2 934 do_hashed = true;
d3656344
JA
935 } else {
936 if (def->unbound_nonreg_file)
3529d8c2 937 req->work.flags |= IO_WQ_WORK_UNBOUND;
54a91f3b 938 }
cccf0ee8
JA
939
940 io_req_work_grab_env(req, def);
54a91f3b 941
94ae5e77 942 *link = io_prep_linked_timeout(req);
561fb04a
JA
943 return do_hashed;
944}
945
a197f664 946static inline void io_queue_async_work(struct io_kiocb *req)
561fb04a 947{
a197f664 948 struct io_ring_ctx *ctx = req->ctx;
94ae5e77
JA
949 struct io_kiocb *link;
950 bool do_hashed;
951
952 do_hashed = io_prep_async_work(req, &link);
561fb04a
JA
953
954 trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
955 req->flags);
956 if (!do_hashed) {
957 io_wq_enqueue(ctx->io_wq, &req->work);
958 } else {
959 io_wq_enqueue_hashed(ctx->io_wq, &req->work,
960 file_inode(req->file));
961 }
94ae5e77
JA
962
963 if (link)
964 io_queue_linked_timeout(link);
18d9be1a
JA
965}
966
5262f567
JA
967static void io_kill_timeout(struct io_kiocb *req)
968{
969 int ret;
970
2d28390a 971 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
5262f567
JA
972 if (ret != -1) {
973 atomic_inc(&req->ctx->cq_timeouts);
842f9612 974 list_del_init(&req->list);
78e19bbe 975 io_cqring_fill_event(req, 0);
ec9c02ad 976 io_put_req(req);
5262f567
JA
977 }
978}
979
980static void io_kill_timeouts(struct io_ring_ctx *ctx)
981{
982 struct io_kiocb *req, *tmp;
983
984 spin_lock_irq(&ctx->completion_lock);
985 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
986 io_kill_timeout(req);
987 spin_unlock_irq(&ctx->completion_lock);
988}
989
de0617e4
JA
990static void io_commit_cqring(struct io_ring_ctx *ctx)
991{
992 struct io_kiocb *req;
993
5262f567
JA
994 while ((req = io_get_timeout_req(ctx)) != NULL)
995 io_kill_timeout(req);
996
de0617e4
JA
997 __io_commit_cqring(ctx);
998
87987898 999 while ((req = io_get_deferred_req(ctx)) != NULL)
a197f664 1000 io_queue_async_work(req);
de0617e4
JA
1001}
1002
2b188cc1
JA
1003static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1004{
75b28aff 1005 struct io_rings *rings = ctx->rings;
2b188cc1
JA
1006 unsigned tail;
1007
1008 tail = ctx->cached_cq_tail;
115e12e5
SB
1009 /*
1010 * writes to the cq entry need to come after reading head; the
1011 * control dependency is enough as we're using WRITE_ONCE to
1012 * fill the cq entry
1013 */
75b28aff 1014 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
2b188cc1
JA
1015 return NULL;
1016
1017 ctx->cached_cq_tail++;
75b28aff 1018 return &rings->cqes[tail & ctx->cq_mask];
2b188cc1
JA
1019}
1020
f2842ab5
JA
1021static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1022{
f0b493e6
JA
1023 if (!ctx->cq_ev_fd)
1024 return false;
f2842ab5
JA
1025 if (!ctx->eventfd_async)
1026 return true;
1027 return io_wq_current_is_worker() || in_interrupt();
1028}
1029
f0b493e6 1030static void __io_cqring_ev_posted(struct io_ring_ctx *ctx, bool trigger_ev)
1d7bb1d5
JA
1031{
1032 if (waitqueue_active(&ctx->wait))
1033 wake_up(&ctx->wait);
1034 if (waitqueue_active(&ctx->sqo_wait))
1035 wake_up(&ctx->sqo_wait);
f0b493e6 1036 if (trigger_ev)
1d7bb1d5
JA
1037 eventfd_signal(ctx->cq_ev_fd, 1);
1038}
1039
f0b493e6
JA
1040static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1041{
1042 __io_cqring_ev_posted(ctx, io_should_trigger_evfd(ctx));
1043}
1044
c4a2ed72
JA
1045/* Returns true if there are no backlogged entries after the flush */
1046static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1d7bb1d5
JA
1047{
1048 struct io_rings *rings = ctx->rings;
1049 struct io_uring_cqe *cqe;
1050 struct io_kiocb *req;
1051 unsigned long flags;
1052 LIST_HEAD(list);
1053
1054 if (!force) {
1055 if (list_empty_careful(&ctx->cq_overflow_list))
c4a2ed72 1056 return true;
1d7bb1d5
JA
1057 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1058 rings->cq_ring_entries))
c4a2ed72 1059 return false;
1d7bb1d5
JA
1060 }
1061
1062 spin_lock_irqsave(&ctx->completion_lock, flags);
1063
1064 /* if force is set, the ring is going away. always drop after that */
1065 if (force)
69b3e546 1066 ctx->cq_overflow_flushed = 1;
1d7bb1d5 1067
c4a2ed72 1068 cqe = NULL;
1d7bb1d5
JA
1069 while (!list_empty(&ctx->cq_overflow_list)) {
1070 cqe = io_get_cqring(ctx);
1071 if (!cqe && !force)
1072 break;
1073
1074 req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
1075 list);
1076 list_move(&req->list, &list);
1077 if (cqe) {
1078 WRITE_ONCE(cqe->user_data, req->user_data);
1079 WRITE_ONCE(cqe->res, req->result);
1080 WRITE_ONCE(cqe->flags, 0);
1081 } else {
1082 WRITE_ONCE(ctx->rings->cq_overflow,
1083 atomic_inc_return(&ctx->cached_cq_overflow));
1084 }
1085 }
1086
1087 io_commit_cqring(ctx);
ad3eb2c8
JA
1088 if (cqe) {
1089 clear_bit(0, &ctx->sq_check_overflow);
1090 clear_bit(0, &ctx->cq_check_overflow);
1091 }
1d7bb1d5
JA
1092 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1093 io_cqring_ev_posted(ctx);
1094
1095 while (!list_empty(&list)) {
1096 req = list_first_entry(&list, struct io_kiocb, list);
1097 list_del(&req->list);
ec9c02ad 1098 io_put_req(req);
1d7bb1d5 1099 }
c4a2ed72
JA
1100
1101 return cqe != NULL;
1d7bb1d5
JA
1102}
1103
78e19bbe 1104static void io_cqring_fill_event(struct io_kiocb *req, long res)
2b188cc1 1105{
78e19bbe 1106 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1107 struct io_uring_cqe *cqe;
1108
78e19bbe 1109 trace_io_uring_complete(ctx, req->user_data, res);
51c3ff62 1110
2b188cc1
JA
1111 /*
1112 * If we can't get a cq entry, userspace overflowed the
1113 * submission (by quite a lot). Increment the overflow count in
1114 * the ring.
1115 */
1116 cqe = io_get_cqring(ctx);
1d7bb1d5 1117 if (likely(cqe)) {
78e19bbe 1118 WRITE_ONCE(cqe->user_data, req->user_data);
2b188cc1 1119 WRITE_ONCE(cqe->res, res);
c71ffb67 1120 WRITE_ONCE(cqe->flags, 0);
1d7bb1d5 1121 } else if (ctx->cq_overflow_flushed) {
498ccd9e
JA
1122 WRITE_ONCE(ctx->rings->cq_overflow,
1123 atomic_inc_return(&ctx->cached_cq_overflow));
1d7bb1d5 1124 } else {
ad3eb2c8
JA
1125 if (list_empty(&ctx->cq_overflow_list)) {
1126 set_bit(0, &ctx->sq_check_overflow);
1127 set_bit(0, &ctx->cq_check_overflow);
1128 }
1d7bb1d5
JA
1129 refcount_inc(&req->refs);
1130 req->result = res;
1131 list_add_tail(&req->list, &ctx->cq_overflow_list);
2b188cc1
JA
1132 }
1133}
1134
78e19bbe 1135static void io_cqring_add_event(struct io_kiocb *req, long res)
2b188cc1 1136{
78e19bbe 1137 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1138 unsigned long flags;
1139
1140 spin_lock_irqsave(&ctx->completion_lock, flags);
78e19bbe 1141 io_cqring_fill_event(req, res);
2b188cc1
JA
1142 io_commit_cqring(ctx);
1143 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1144
8c838788 1145 io_cqring_ev_posted(ctx);
2b188cc1
JA
1146}
1147
0ddf92e8
JA
1148static inline bool io_is_fallback_req(struct io_kiocb *req)
1149{
1150 return req == (struct io_kiocb *)
1151 ((unsigned long) req->ctx->fallback_req & ~1UL);
1152}
1153
1154static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1155{
1156 struct io_kiocb *req;
1157
1158 req = ctx->fallback_req;
1159 if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req))
1160 return req;
1161
1162 return NULL;
1163}
1164
2579f913
JA
1165static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
1166 struct io_submit_state *state)
2b188cc1 1167{
fd6fab2c 1168 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2b188cc1
JA
1169 struct io_kiocb *req;
1170
2579f913 1171 if (!state) {
fd6fab2c 1172 req = kmem_cache_alloc(req_cachep, gfp);
2579f913 1173 if (unlikely(!req))
0ddf92e8 1174 goto fallback;
2579f913
JA
1175 } else if (!state->free_reqs) {
1176 size_t sz;
1177 int ret;
1178
1179 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
fd6fab2c
JA
1180 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1181
1182 /*
1183 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1184 * retry single alloc to be on the safe side.
1185 */
1186 if (unlikely(ret <= 0)) {
1187 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1188 if (!state->reqs[0])
0ddf92e8 1189 goto fallback;
fd6fab2c
JA
1190 ret = 1;
1191 }
2579f913 1192 state->free_reqs = ret - 1;
6c8a3134 1193 req = state->reqs[ret - 1];
2579f913 1194 } else {
2579f913 1195 state->free_reqs--;
6c8a3134 1196 req = state->reqs[state->free_reqs];
2b188cc1
JA
1197 }
1198
0ddf92e8 1199got_it:
1a6b74fc 1200 req->io = NULL;
60c112b0 1201 req->file = NULL;
2579f913
JA
1202 req->ctx = ctx;
1203 req->flags = 0;
e65ef56d
JA
1204 /* one is dropped after submission, the other at completion */
1205 refcount_set(&req->refs, 2);
9e645e11 1206 req->result = 0;
561fb04a 1207 INIT_IO_WORK(&req->work, io_wq_submit_work);
2579f913 1208 return req;
0ddf92e8
JA
1209fallback:
1210 req = io_get_fallback_req(ctx);
1211 if (req)
1212 goto got_it;
6805b32e 1213 percpu_ref_put(&ctx->refs);
2b188cc1
JA
1214 return NULL;
1215}
1216
2b85edfc 1217static void __io_req_do_free(struct io_kiocb *req)
def596e9 1218{
2b85edfc
PB
1219 if (likely(!io_is_fallback_req(req)))
1220 kmem_cache_free(req_cachep, req);
1221 else
1222 clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
1223}
1224
c6ca97b3 1225static void __io_req_aux_free(struct io_kiocb *req)
2b188cc1 1226{
fcb323cc
JA
1227 struct io_ring_ctx *ctx = req->ctx;
1228
96fd84d8 1229 kfree(req->io);
05f3fb3c
JA
1230 if (req->file) {
1231 if (req->flags & REQ_F_FIXED_FILE)
1232 percpu_ref_put(&ctx->file_data->refs);
1233 else
1234 fput(req->file);
def596e9 1235 }
cccf0ee8
JA
1236
1237 io_req_work_drop_env(req);
def596e9
JA
1238}
1239
9e645e11 1240static void __io_free_req(struct io_kiocb *req)
2b188cc1 1241{
c6ca97b3 1242 __io_req_aux_free(req);
fcb323cc 1243
fcb323cc 1244 if (req->flags & REQ_F_INFLIGHT) {
c6ca97b3 1245 struct io_ring_ctx *ctx = req->ctx;
fcb323cc
JA
1246 unsigned long flags;
1247
1248 spin_lock_irqsave(&ctx->inflight_lock, flags);
1249 list_del(&req->inflight_entry);
1250 if (waitqueue_active(&ctx->inflight_wait))
1251 wake_up(&ctx->inflight_wait);
1252 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1253 }
2b85edfc
PB
1254
1255 percpu_ref_put(&req->ctx->refs);
1256 __io_req_do_free(req);
e65ef56d
JA
1257}
1258
c6ca97b3
JA
1259struct req_batch {
1260 void *reqs[IO_IOPOLL_BATCH];
1261 int to_free;
1262 int need_iter;
1263};
1264
1265static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
1266{
10fef4be
JA
1267 int fixed_refs = rb->to_free;
1268
c6ca97b3
JA
1269 if (!rb->to_free)
1270 return;
1271 if (rb->need_iter) {
1272 int i, inflight = 0;
1273 unsigned long flags;
1274
10fef4be 1275 fixed_refs = 0;
c6ca97b3
JA
1276 for (i = 0; i < rb->to_free; i++) {
1277 struct io_kiocb *req = rb->reqs[i];
1278
10fef4be 1279 if (req->flags & REQ_F_FIXED_FILE) {
c6ca97b3 1280 req->file = NULL;
10fef4be
JA
1281 fixed_refs++;
1282 }
c6ca97b3
JA
1283 if (req->flags & REQ_F_INFLIGHT)
1284 inflight++;
c6ca97b3
JA
1285 __io_req_aux_free(req);
1286 }
1287 if (!inflight)
1288 goto do_free;
1289
1290 spin_lock_irqsave(&ctx->inflight_lock, flags);
1291 for (i = 0; i < rb->to_free; i++) {
1292 struct io_kiocb *req = rb->reqs[i];
1293
10fef4be 1294 if (req->flags & REQ_F_INFLIGHT) {
c6ca97b3
JA
1295 list_del(&req->inflight_entry);
1296 if (!--inflight)
1297 break;
1298 }
1299 }
1300 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1301
1302 if (waitqueue_active(&ctx->inflight_wait))
1303 wake_up(&ctx->inflight_wait);
1304 }
1305do_free:
1306 kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
10fef4be
JA
1307 if (fixed_refs)
1308 percpu_ref_put_many(&ctx->file_data->refs, fixed_refs);
c6ca97b3 1309 percpu_ref_put_many(&ctx->refs, rb->to_free);
c6ca97b3 1310 rb->to_free = rb->need_iter = 0;
e65ef56d
JA
1311}
1312
a197f664 1313static bool io_link_cancel_timeout(struct io_kiocb *req)
2665abfd 1314{
a197f664 1315 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
1316 int ret;
1317
2d28390a 1318 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
2665abfd 1319 if (ret != -1) {
78e19bbe 1320 io_cqring_fill_event(req, -ECANCELED);
2665abfd
JA
1321 io_commit_cqring(ctx);
1322 req->flags &= ~REQ_F_LINK;
ec9c02ad 1323 io_put_req(req);
2665abfd
JA
1324 return true;
1325 }
1326
1327 return false;
e65ef56d
JA
1328}
1329
ba816ad6 1330static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
9e645e11 1331{
2665abfd 1332 struct io_ring_ctx *ctx = req->ctx;
2665abfd 1333 bool wake_ev = false;
9e645e11 1334
4d7dd462
JA
1335 /* Already got next link */
1336 if (req->flags & REQ_F_LINK_NEXT)
1337 return;
1338
9e645e11
JA
1339 /*
1340 * The list should never be empty when we are called here. But could
1341 * potentially happen if the chain is messed up, check to be on the
1342 * safe side.
1343 */
4493233e
PB
1344 while (!list_empty(&req->link_list)) {
1345 struct io_kiocb *nxt = list_first_entry(&req->link_list,
1346 struct io_kiocb, link_list);
94ae5e77 1347
4493233e
PB
1348 if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) &&
1349 (nxt->flags & REQ_F_TIMEOUT))) {
1350 list_del_init(&nxt->link_list);
94ae5e77 1351 wake_ev |= io_link_cancel_timeout(nxt);
94ae5e77
JA
1352 req->flags &= ~REQ_F_LINK_TIMEOUT;
1353 continue;
1354 }
9e645e11 1355
4493233e
PB
1356 list_del_init(&req->link_list);
1357 if (!list_empty(&nxt->link_list))
1358 nxt->flags |= REQ_F_LINK;
b18fdf71 1359 *nxtptr = nxt;
94ae5e77 1360 break;
9e645e11 1361 }
2665abfd 1362
4d7dd462 1363 req->flags |= REQ_F_LINK_NEXT;
2665abfd
JA
1364 if (wake_ev)
1365 io_cqring_ev_posted(ctx);
9e645e11
JA
1366}
1367
1368/*
1369 * Called if REQ_F_LINK is set, and we fail the head request
1370 */
1371static void io_fail_links(struct io_kiocb *req)
1372{
2665abfd 1373 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
1374 unsigned long flags;
1375
1376 spin_lock_irqsave(&ctx->completion_lock, flags);
9e645e11
JA
1377
1378 while (!list_empty(&req->link_list)) {
4493233e
PB
1379 struct io_kiocb *link = list_first_entry(&req->link_list,
1380 struct io_kiocb, link_list);
9e645e11 1381
4493233e 1382 list_del_init(&link->link_list);
c826bd7a 1383 trace_io_uring_fail_link(req, link);
2665abfd
JA
1384
1385 if ((req->flags & REQ_F_LINK_TIMEOUT) &&
d625c6ee 1386 link->opcode == IORING_OP_LINK_TIMEOUT) {
a197f664 1387 io_link_cancel_timeout(link);
2665abfd 1388 } else {
78e19bbe 1389 io_cqring_fill_event(link, -ECANCELED);
978db57e 1390 __io_double_put_req(link);
2665abfd 1391 }
5d960724 1392 req->flags &= ~REQ_F_LINK_TIMEOUT;
9e645e11 1393 }
2665abfd
JA
1394
1395 io_commit_cqring(ctx);
1396 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1397 io_cqring_ev_posted(ctx);
9e645e11
JA
1398}
1399
4d7dd462 1400static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
9e645e11 1401{
4d7dd462 1402 if (likely(!(req->flags & REQ_F_LINK)))
2665abfd 1403 return;
2665abfd 1404
9e645e11
JA
1405 /*
1406 * If LINK is set, we have dependent requests in this chain. If we
1407 * didn't fail this request, queue the first one up, moving any other
1408 * dependencies to the next request. In case of failure, fail the rest
1409 * of the chain.
1410 */
2665abfd
JA
1411 if (req->flags & REQ_F_FAIL_LINK) {
1412 io_fail_links(req);
7c9e7f0f
JA
1413 } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
1414 REQ_F_LINK_TIMEOUT) {
2665abfd
JA
1415 struct io_ring_ctx *ctx = req->ctx;
1416 unsigned long flags;
1417
1418 /*
1419 * If this is a timeout link, we could be racing with the
1420 * timeout timer. Grab the completion lock for this case to
7c9e7f0f 1421 * protect against that.
2665abfd
JA
1422 */
1423 spin_lock_irqsave(&ctx->completion_lock, flags);
1424 io_req_link_next(req, nxt);
1425 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1426 } else {
1427 io_req_link_next(req, nxt);
9e645e11 1428 }
4d7dd462 1429}
9e645e11 1430
c69f8dbe
JL
1431static void io_free_req(struct io_kiocb *req)
1432{
944e58bf
PB
1433 struct io_kiocb *nxt = NULL;
1434
1435 io_req_find_next(req, &nxt);
70cf9f32 1436 __io_free_req(req);
944e58bf
PB
1437
1438 if (nxt)
1439 io_queue_async_work(nxt);
c69f8dbe
JL
1440}
1441
ba816ad6
JA
1442/*
1443 * Drop reference to request, return next in chain (if there is one) if this
1444 * was the last reference to this request.
1445 */
f9bd67f6 1446__attribute__((nonnull))
ec9c02ad 1447static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
e65ef56d 1448{
f9bd67f6 1449 io_req_find_next(req, nxtptr);
4d7dd462 1450
e65ef56d 1451 if (refcount_dec_and_test(&req->refs))
4d7dd462 1452 __io_free_req(req);
2b188cc1
JA
1453}
1454
e65ef56d
JA
1455static void io_put_req(struct io_kiocb *req)
1456{
1457 if (refcount_dec_and_test(&req->refs))
1458 io_free_req(req);
2b188cc1
JA
1459}
1460
978db57e
JA
1461/*
1462 * Must only be used if we don't need to care about links, usually from
1463 * within the completion handling itself.
1464 */
1465static void __io_double_put_req(struct io_kiocb *req)
78e19bbe
JA
1466{
1467 /* drop both submit and complete references */
1468 if (refcount_sub_and_test(2, &req->refs))
1469 __io_free_req(req);
1470}
1471
978db57e
JA
1472static void io_double_put_req(struct io_kiocb *req)
1473{
1474 /* drop both submit and complete references */
1475 if (refcount_sub_and_test(2, &req->refs))
1476 io_free_req(req);
1477}
1478
1d7bb1d5 1479static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
a3a0e43f 1480{
84f97dc2
JA
1481 struct io_rings *rings = ctx->rings;
1482
ad3eb2c8
JA
1483 if (test_bit(0, &ctx->cq_check_overflow)) {
1484 /*
1485 * noflush == true is from the waitqueue handler, just ensure
1486 * we wake up the task, and the next invocation will flush the
1487 * entries. We cannot safely to it from here.
1488 */
1489 if (noflush && !list_empty(&ctx->cq_overflow_list))
1490 return -1U;
1d7bb1d5 1491
ad3eb2c8
JA
1492 io_cqring_overflow_flush(ctx, false);
1493 }
1d7bb1d5 1494
a3a0e43f
JA
1495 /* See comment at the top of this file */
1496 smp_rmb();
ad3eb2c8 1497 return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
a3a0e43f
JA
1498}
1499
fb5ccc98
PB
1500static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
1501{
1502 struct io_rings *rings = ctx->rings;
1503
1504 /* make sure SQ entry isn't read before tail */
1505 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
1506}
1507
8237e045 1508static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
e94f141b 1509{
c6ca97b3
JA
1510 if ((req->flags & REQ_F_LINK) || io_is_fallback_req(req))
1511 return false;
e94f141b 1512
c6ca97b3
JA
1513 if (!(req->flags & REQ_F_FIXED_FILE) || req->io)
1514 rb->need_iter++;
1515
1516 rb->reqs[rb->to_free++] = req;
1517 if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
1518 io_free_req_many(req->ctx, rb);
1519 return true;
e94f141b
JA
1520}
1521
def596e9
JA
1522/*
1523 * Find and free completed poll iocbs
1524 */
1525static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
1526 struct list_head *done)
1527{
8237e045 1528 struct req_batch rb;
def596e9 1529 struct io_kiocb *req;
def596e9 1530
c6ca97b3 1531 rb.to_free = rb.need_iter = 0;
def596e9
JA
1532 while (!list_empty(done)) {
1533 req = list_first_entry(done, struct io_kiocb, list);
1534 list_del(&req->list);
1535
78e19bbe 1536 io_cqring_fill_event(req, req->result);
def596e9
JA
1537 (*nr_events)++;
1538
8237e045
JA
1539 if (refcount_dec_and_test(&req->refs) &&
1540 !io_req_multi_free(&rb, req))
1541 io_free_req(req);
def596e9 1542 }
def596e9 1543
09bb8394 1544 io_commit_cqring(ctx);
8237e045 1545 io_free_req_many(ctx, &rb);
def596e9
JA
1546}
1547
1548static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
1549 long min)
1550{
1551 struct io_kiocb *req, *tmp;
1552 LIST_HEAD(done);
1553 bool spin;
1554 int ret;
1555
1556 /*
1557 * Only spin for completions if we don't have multiple devices hanging
1558 * off our complete list, and we're under the requested amount.
1559 */
1560 spin = !ctx->poll_multi_file && *nr_events < min;
1561
1562 ret = 0;
1563 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
9adbd45d 1564 struct kiocb *kiocb = &req->rw.kiocb;
def596e9
JA
1565
1566 /*
1567 * Move completed entries to our local list. If we find a
1568 * request that requires polling, break out and complete
1569 * the done list first, if we have entries there.
1570 */
1571 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
1572 list_move_tail(&req->list, &done);
1573 continue;
1574 }
1575 if (!list_empty(&done))
1576 break;
1577
1578 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
1579 if (ret < 0)
1580 break;
1581
1582 if (ret && spin)
1583 spin = false;
1584 ret = 0;
1585 }
1586
1587 if (!list_empty(&done))
1588 io_iopoll_complete(ctx, nr_events, &done);
1589
1590 return ret;
1591}
1592
1593/*
d195a66e 1594 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
def596e9
JA
1595 * non-spinning poll check - we'll still enter the driver poll loop, but only
1596 * as a non-spinning completion check.
1597 */
1598static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
1599 long min)
1600{
08f5439f 1601 while (!list_empty(&ctx->poll_list) && !need_resched()) {
def596e9
JA
1602 int ret;
1603
1604 ret = io_do_iopoll(ctx, nr_events, min);
1605 if (ret < 0)
1606 return ret;
1607 if (!min || *nr_events >= min)
1608 return 0;
1609 }
1610
1611 return 1;
1612}
1613
1614/*
1615 * We can't just wait for polled events to come to us, we have to actively
1616 * find and complete them.
1617 */
1618static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
1619{
1620 if (!(ctx->flags & IORING_SETUP_IOPOLL))
1621 return;
1622
1623 mutex_lock(&ctx->uring_lock);
1624 while (!list_empty(&ctx->poll_list)) {
1625 unsigned int nr_events = 0;
1626
1627 io_iopoll_getevents(ctx, &nr_events, 1);
08f5439f
JA
1628
1629 /*
1630 * Ensure we allow local-to-the-cpu processing to take place,
1631 * in this case we need to ensure that we reap all events.
1632 */
1633 cond_resched();
def596e9
JA
1634 }
1635 mutex_unlock(&ctx->uring_lock);
1636}
1637
2b2ed975
JA
1638static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1639 long min)
def596e9 1640{
2b2ed975 1641 int iters = 0, ret = 0;
500f9fba 1642
def596e9
JA
1643 do {
1644 int tmin = 0;
1645
a3a0e43f
JA
1646 /*
1647 * Don't enter poll loop if we already have events pending.
1648 * If we do, we can potentially be spinning for commands that
1649 * already triggered a CQE (eg in error).
1650 */
1d7bb1d5 1651 if (io_cqring_events(ctx, false))
a3a0e43f
JA
1652 break;
1653
500f9fba
JA
1654 /*
1655 * If a submit got punted to a workqueue, we can have the
1656 * application entering polling for a command before it gets
1657 * issued. That app will hold the uring_lock for the duration
1658 * of the poll right here, so we need to take a breather every
1659 * now and then to ensure that the issue has a chance to add
1660 * the poll to the issued list. Otherwise we can spin here
1661 * forever, while the workqueue is stuck trying to acquire the
1662 * very same mutex.
1663 */
1664 if (!(++iters & 7)) {
1665 mutex_unlock(&ctx->uring_lock);
1666 mutex_lock(&ctx->uring_lock);
1667 }
1668
def596e9
JA
1669 if (*nr_events < min)
1670 tmin = min - *nr_events;
1671
1672 ret = io_iopoll_getevents(ctx, nr_events, tmin);
1673 if (ret <= 0)
1674 break;
1675 ret = 0;
1676 } while (min && !*nr_events && !need_resched());
1677
2b2ed975
JA
1678 return ret;
1679}
1680
1681static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1682 long min)
1683{
1684 int ret;
1685
1686 /*
1687 * We disallow the app entering submit/complete with polling, but we
1688 * still need to lock the ring to prevent racing with polled issue
1689 * that got punted to a workqueue.
1690 */
1691 mutex_lock(&ctx->uring_lock);
1692 ret = __io_iopoll_check(ctx, nr_events, min);
500f9fba 1693 mutex_unlock(&ctx->uring_lock);
def596e9
JA
1694 return ret;
1695}
1696
491381ce 1697static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 1698{
491381ce
JA
1699 /*
1700 * Tell lockdep we inherited freeze protection from submission
1701 * thread.
1702 */
1703 if (req->flags & REQ_F_ISREG) {
1704 struct inode *inode = file_inode(req->file);
2b188cc1 1705
491381ce 1706 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2b188cc1 1707 }
491381ce 1708 file_end_write(req->file);
2b188cc1
JA
1709}
1710
4e88d6e7
JA
1711static inline void req_set_fail_links(struct io_kiocb *req)
1712{
1713 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1714 req->flags |= REQ_F_FAIL_LINK;
1715}
1716
ba816ad6 1717static void io_complete_rw_common(struct kiocb *kiocb, long res)
2b188cc1 1718{
9adbd45d 1719 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2b188cc1 1720
491381ce
JA
1721 if (kiocb->ki_flags & IOCB_WRITE)
1722 kiocb_end_write(req);
2b188cc1 1723
4e88d6e7
JA
1724 if (res != req->result)
1725 req_set_fail_links(req);
78e19bbe 1726 io_cqring_add_event(req, res);
ba816ad6
JA
1727}
1728
1729static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
1730{
9adbd45d 1731 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6
JA
1732
1733 io_complete_rw_common(kiocb, res);
e65ef56d 1734 io_put_req(req);
2b188cc1
JA
1735}
1736
ba816ad6
JA
1737static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
1738{
9adbd45d 1739 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ec9c02ad 1740 struct io_kiocb *nxt = NULL;
ba816ad6
JA
1741
1742 io_complete_rw_common(kiocb, res);
ec9c02ad
JL
1743 io_put_req_find_next(req, &nxt);
1744
1745 return nxt;
2b188cc1
JA
1746}
1747
def596e9
JA
1748static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
1749{
9adbd45d 1750 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 1751
491381ce
JA
1752 if (kiocb->ki_flags & IOCB_WRITE)
1753 kiocb_end_write(req);
def596e9 1754
4e88d6e7
JA
1755 if (res != req->result)
1756 req_set_fail_links(req);
9e645e11 1757 req->result = res;
def596e9
JA
1758 if (res != -EAGAIN)
1759 req->flags |= REQ_F_IOPOLL_COMPLETED;
1760}
1761
1762/*
1763 * After the iocb has been issued, it's safe to be found on the poll list.
1764 * Adding the kiocb to the list AFTER submission ensures that we don't
1765 * find it from a io_iopoll_getevents() thread before the issuer is done
1766 * accessing the kiocb cookie.
1767 */
1768static void io_iopoll_req_issued(struct io_kiocb *req)
1769{
1770 struct io_ring_ctx *ctx = req->ctx;
1771
1772 /*
1773 * Track whether we have multiple files in our lists. This will impact
1774 * how we do polling eventually, not spinning if we're on potentially
1775 * different devices.
1776 */
1777 if (list_empty(&ctx->poll_list)) {
1778 ctx->poll_multi_file = false;
1779 } else if (!ctx->poll_multi_file) {
1780 struct io_kiocb *list_req;
1781
1782 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
1783 list);
9adbd45d 1784 if (list_req->file != req->file)
def596e9
JA
1785 ctx->poll_multi_file = true;
1786 }
1787
1788 /*
1789 * For fast devices, IO may have already completed. If it has, add
1790 * it to the front so we find it first.
1791 */
1792 if (req->flags & REQ_F_IOPOLL_COMPLETED)
1793 list_add(&req->list, &ctx->poll_list);
1794 else
1795 list_add_tail(&req->list, &ctx->poll_list);
1796}
1797
3d6770fb 1798static void io_file_put(struct io_submit_state *state)
9a56a232 1799{
3d6770fb 1800 if (state->file) {
9a56a232
JA
1801 int diff = state->has_refs - state->used_refs;
1802
1803 if (diff)
1804 fput_many(state->file, diff);
1805 state->file = NULL;
1806 }
1807}
1808
1809/*
1810 * Get as many references to a file as we have IOs left in this submission,
1811 * assuming most submissions are for one file, or at least that each file
1812 * has more than one submission.
1813 */
1814static struct file *io_file_get(struct io_submit_state *state, int fd)
1815{
1816 if (!state)
1817 return fget(fd);
1818
1819 if (state->file) {
1820 if (state->fd == fd) {
1821 state->used_refs++;
1822 state->ios_left--;
1823 return state->file;
1824 }
3d6770fb 1825 io_file_put(state);
9a56a232
JA
1826 }
1827 state->file = fget_many(fd, state->ios_left);
1828 if (!state->file)
1829 return NULL;
1830
1831 state->fd = fd;
1832 state->has_refs = state->ios_left;
1833 state->used_refs = 1;
1834 state->ios_left--;
1835 return state->file;
1836}
1837
2b188cc1
JA
1838/*
1839 * If we tracked the file through the SCM inflight mechanism, we could support
1840 * any file. For now, just ensure that anything potentially problematic is done
1841 * inline.
1842 */
1843static bool io_file_supports_async(struct file *file)
1844{
1845 umode_t mode = file_inode(file)->i_mode;
1846
10d59345 1847 if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISSOCK(mode))
2b188cc1
JA
1848 return true;
1849 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
1850 return true;
1851
1852 return false;
1853}
1854
3529d8c2
JA
1855static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1856 bool force_nonblock)
2b188cc1 1857{
def596e9 1858 struct io_ring_ctx *ctx = req->ctx;
9adbd45d 1859 struct kiocb *kiocb = &req->rw.kiocb;
09bb8394
JA
1860 unsigned ioprio;
1861 int ret;
2b188cc1 1862
491381ce
JA
1863 if (S_ISREG(file_inode(req->file)->i_mode))
1864 req->flags |= REQ_F_ISREG;
1865
2b188cc1 1866 kiocb->ki_pos = READ_ONCE(sqe->off);
ba04291e
JA
1867 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
1868 req->flags |= REQ_F_CUR_POS;
1869 kiocb->ki_pos = req->file->f_pos;
1870 }
2b188cc1 1871 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
3e577dcd
PB
1872 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
1873 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
1874 if (unlikely(ret))
1875 return ret;
2b188cc1
JA
1876
1877 ioprio = READ_ONCE(sqe->ioprio);
1878 if (ioprio) {
1879 ret = ioprio_check_cap(ioprio);
1880 if (ret)
09bb8394 1881 return ret;
2b188cc1
JA
1882
1883 kiocb->ki_ioprio = ioprio;
1884 } else
1885 kiocb->ki_ioprio = get_current_ioprio();
1886
8449eeda 1887 /* don't allow async punt if RWF_NOWAIT was requested */
491381ce
JA
1888 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
1889 (req->file->f_flags & O_NONBLOCK))
8449eeda
SB
1890 req->flags |= REQ_F_NOWAIT;
1891
1892 if (force_nonblock)
2b188cc1 1893 kiocb->ki_flags |= IOCB_NOWAIT;
8449eeda 1894
def596e9 1895 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
1896 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
1897 !kiocb->ki_filp->f_op->iopoll)
09bb8394 1898 return -EOPNOTSUPP;
2b188cc1 1899
def596e9
JA
1900 kiocb->ki_flags |= IOCB_HIPRI;
1901 kiocb->ki_complete = io_complete_rw_iopoll;
6873e0bd 1902 req->result = 0;
def596e9 1903 } else {
09bb8394
JA
1904 if (kiocb->ki_flags & IOCB_HIPRI)
1905 return -EINVAL;
def596e9
JA
1906 kiocb->ki_complete = io_complete_rw;
1907 }
9adbd45d 1908
3529d8c2
JA
1909 req->rw.addr = READ_ONCE(sqe->addr);
1910 req->rw.len = READ_ONCE(sqe->len);
9adbd45d
JA
1911 /* we own ->private, reuse it for the buffer index */
1912 req->rw.kiocb.private = (void *) (unsigned long)
3529d8c2 1913 READ_ONCE(sqe->buf_index);
2b188cc1 1914 return 0;
2b188cc1
JA
1915}
1916
1917static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
1918{
1919 switch (ret) {
1920 case -EIOCBQUEUED:
1921 break;
1922 case -ERESTARTSYS:
1923 case -ERESTARTNOINTR:
1924 case -ERESTARTNOHAND:
1925 case -ERESTART_RESTARTBLOCK:
1926 /*
1927 * We can't just restart the syscall, since previously
1928 * submitted sqes may already be in progress. Just fail this
1929 * IO with EINTR.
1930 */
1931 ret = -EINTR;
1932 /* fall through */
1933 default:
1934 kiocb->ki_complete(kiocb, ret, 0);
1935 }
1936}
1937
ba816ad6
JA
1938static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
1939 bool in_async)
1940{
ba04291e
JA
1941 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
1942
1943 if (req->flags & REQ_F_CUR_POS)
1944 req->file->f_pos = kiocb->ki_pos;
f9bd67f6 1945 if (in_async && ret >= 0 && kiocb->ki_complete == io_complete_rw)
ba816ad6
JA
1946 *nxt = __io_complete_rw(kiocb, ret);
1947 else
1948 io_rw_done(kiocb, ret);
1949}
1950
9adbd45d 1951static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
7d009165 1952 struct iov_iter *iter)
edafccee 1953{
9adbd45d
JA
1954 struct io_ring_ctx *ctx = req->ctx;
1955 size_t len = req->rw.len;
edafccee
JA
1956 struct io_mapped_ubuf *imu;
1957 unsigned index, buf_index;
1958 size_t offset;
1959 u64 buf_addr;
1960
1961 /* attempt to use fixed buffers without having provided iovecs */
1962 if (unlikely(!ctx->user_bufs))
1963 return -EFAULT;
1964
9adbd45d 1965 buf_index = (unsigned long) req->rw.kiocb.private;
edafccee
JA
1966 if (unlikely(buf_index >= ctx->nr_user_bufs))
1967 return -EFAULT;
1968
1969 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
1970 imu = &ctx->user_bufs[index];
9adbd45d 1971 buf_addr = req->rw.addr;
edafccee
JA
1972
1973 /* overflow */
1974 if (buf_addr + len < buf_addr)
1975 return -EFAULT;
1976 /* not inside the mapped region */
1977 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
1978 return -EFAULT;
1979
1980 /*
1981 * May not be a start of buffer, set size appropriately
1982 * and advance us to the beginning.
1983 */
1984 offset = buf_addr - imu->ubuf;
1985 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
1986
1987 if (offset) {
1988 /*
1989 * Don't use iov_iter_advance() here, as it's really slow for
1990 * using the latter parts of a big fixed buffer - it iterates
1991 * over each segment manually. We can cheat a bit here, because
1992 * we know that:
1993 *
1994 * 1) it's a BVEC iter, we set it up
1995 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1996 * first and last bvec
1997 *
1998 * So just find our index, and adjust the iterator afterwards.
1999 * If the offset is within the first bvec (or the whole first
2000 * bvec, just use iov_iter_advance(). This makes it easier
2001 * since we can just skip the first segment, which may not
2002 * be PAGE_SIZE aligned.
2003 */
2004 const struct bio_vec *bvec = imu->bvec;
2005
2006 if (offset <= bvec->bv_len) {
2007 iov_iter_advance(iter, offset);
2008 } else {
2009 unsigned long seg_skip;
2010
2011 /* skip first vec */
2012 offset -= bvec->bv_len;
2013 seg_skip = 1 + (offset >> PAGE_SHIFT);
2014
2015 iter->bvec = bvec + seg_skip;
2016 iter->nr_segs -= seg_skip;
99c79f66 2017 iter->count -= bvec->bv_len + offset;
bd11b3a3 2018 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
2019 }
2020 }
2021
5e559561 2022 return len;
edafccee
JA
2023}
2024
cf6fd4bd
PB
2025static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
2026 struct iovec **iovec, struct iov_iter *iter)
2b188cc1 2027{
9adbd45d
JA
2028 void __user *buf = u64_to_user_ptr(req->rw.addr);
2029 size_t sqe_len = req->rw.len;
edafccee
JA
2030 u8 opcode;
2031
d625c6ee 2032 opcode = req->opcode;
7d009165 2033 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
edafccee 2034 *iovec = NULL;
9adbd45d 2035 return io_import_fixed(req, rw, iter);
edafccee 2036 }
2b188cc1 2037
9adbd45d
JA
2038 /* buffer index only valid with fixed read/write */
2039 if (req->rw.kiocb.private)
2040 return -EINVAL;
2041
3a6820f2
JA
2042 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
2043 ssize_t ret;
2044 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
2045 *iovec = NULL;
2046 return ret;
2047 }
2048
f67676d1
JA
2049 if (req->io) {
2050 struct io_async_rw *iorw = &req->io->rw;
2051
2052 *iovec = iorw->iov;
2053 iov_iter_init(iter, rw, *iovec, iorw->nr_segs, iorw->size);
2054 if (iorw->iov == iorw->fast_iov)
2055 *iovec = NULL;
2056 return iorw->size;
2057 }
2058
cf6fd4bd 2059 if (!req->has_user)
2b188cc1
JA
2060 return -EFAULT;
2061
2062#ifdef CONFIG_COMPAT
cf6fd4bd 2063 if (req->ctx->compat)
2b188cc1
JA
2064 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
2065 iovec, iter);
2066#endif
2067
2068 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
2069}
2070
31b51510 2071/*
32960613
JA
2072 * For files that don't have ->read_iter() and ->write_iter(), handle them
2073 * by looping over ->read() or ->write() manually.
31b51510 2074 */
32960613
JA
2075static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
2076 struct iov_iter *iter)
2077{
2078 ssize_t ret = 0;
2079
2080 /*
2081 * Don't support polled IO through this interface, and we can't
2082 * support non-blocking either. For the latter, this just causes
2083 * the kiocb to be handled from an async context.
2084 */
2085 if (kiocb->ki_flags & IOCB_HIPRI)
2086 return -EOPNOTSUPP;
2087 if (kiocb->ki_flags & IOCB_NOWAIT)
2088 return -EAGAIN;
2089
2090 while (iov_iter_count(iter)) {
311ae9e1 2091 struct iovec iovec;
32960613
JA
2092 ssize_t nr;
2093
311ae9e1
PB
2094 if (!iov_iter_is_bvec(iter)) {
2095 iovec = iov_iter_iovec(iter);
2096 } else {
2097 /* fixed buffers import bvec */
2098 iovec.iov_base = kmap(iter->bvec->bv_page)
2099 + iter->iov_offset;
2100 iovec.iov_len = min(iter->count,
2101 iter->bvec->bv_len - iter->iov_offset);
2102 }
2103
32960613
JA
2104 if (rw == READ) {
2105 nr = file->f_op->read(file, iovec.iov_base,
2106 iovec.iov_len, &kiocb->ki_pos);
2107 } else {
2108 nr = file->f_op->write(file, iovec.iov_base,
2109 iovec.iov_len, &kiocb->ki_pos);
2110 }
2111
311ae9e1
PB
2112 if (iov_iter_is_bvec(iter))
2113 kunmap(iter->bvec->bv_page);
2114
32960613
JA
2115 if (nr < 0) {
2116 if (!ret)
2117 ret = nr;
2118 break;
2119 }
2120 ret += nr;
2121 if (nr != iovec.iov_len)
2122 break;
2123 iov_iter_advance(iter, nr);
2124 }
2125
2126 return ret;
2127}
2128
b7bb4f7d 2129static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
f67676d1
JA
2130 struct iovec *iovec, struct iovec *fast_iov,
2131 struct iov_iter *iter)
2132{
2133 req->io->rw.nr_segs = iter->nr_segs;
2134 req->io->rw.size = io_size;
2135 req->io->rw.iov = iovec;
2136 if (!req->io->rw.iov) {
2137 req->io->rw.iov = req->io->rw.fast_iov;
2138 memcpy(req->io->rw.iov, fast_iov,
2139 sizeof(struct iovec) * iter->nr_segs);
2140 }
2141}
2142
b7bb4f7d 2143static int io_alloc_async_ctx(struct io_kiocb *req)
f67676d1 2144{
d3656344
JA
2145 if (!io_op_defs[req->opcode].async_ctx)
2146 return 0;
f67676d1 2147 req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
06b76d44 2148 return req->io == NULL;
b7bb4f7d
JA
2149}
2150
2151static void io_rw_async(struct io_wq_work **workptr)
2152{
2153 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2154 struct iovec *iov = NULL;
2155
2156 if (req->io->rw.iov != req->io->rw.fast_iov)
2157 iov = req->io->rw.iov;
2158 io_wq_submit_work(workptr);
2159 kfree(iov);
2160}
2161
2162static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
2163 struct iovec *iovec, struct iovec *fast_iov,
2164 struct iov_iter *iter)
2165{
980ad263 2166 if (!io_op_defs[req->opcode].async_ctx)
74566df3 2167 return 0;
5d204bcf
JA
2168 if (!req->io) {
2169 if (io_alloc_async_ctx(req))
2170 return -ENOMEM;
b7bb4f7d 2171
5d204bcf
JA
2172 io_req_map_rw(req, io_size, iovec, fast_iov, iter);
2173 }
b7bb4f7d
JA
2174 req->work.func = io_rw_async;
2175 return 0;
f67676d1
JA
2176}
2177
3529d8c2
JA
2178static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2179 bool force_nonblock)
f67676d1 2180{
3529d8c2
JA
2181 struct io_async_ctx *io;
2182 struct iov_iter iter;
f67676d1
JA
2183 ssize_t ret;
2184
3529d8c2
JA
2185 ret = io_prep_rw(req, sqe, force_nonblock);
2186 if (ret)
2187 return ret;
f67676d1 2188
3529d8c2
JA
2189 if (unlikely(!(req->file->f_mode & FMODE_READ)))
2190 return -EBADF;
f67676d1 2191
3529d8c2
JA
2192 if (!req->io)
2193 return 0;
2194
2195 io = req->io;
2196 io->rw.iov = io->rw.fast_iov;
2197 req->io = NULL;
2198 ret = io_import_iovec(READ, req, &io->rw.iov, &iter);
2199 req->io = io;
2200 if (ret < 0)
2201 return ret;
2202
2203 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2204 return 0;
f67676d1
JA
2205}
2206
267bc904 2207static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
8358e3a8 2208 bool force_nonblock)
2b188cc1
JA
2209{
2210 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 2211 struct kiocb *kiocb = &req->rw.kiocb;
2b188cc1 2212 struct iov_iter iter;
31b51510 2213 size_t iov_count;
f67676d1 2214 ssize_t io_size, ret;
2b188cc1 2215
3529d8c2 2216 ret = io_import_iovec(READ, req, &iovec, &iter);
06b76d44
JA
2217 if (ret < 0)
2218 return ret;
2b188cc1 2219
fd6c2e4c
JA
2220 /* Ensure we clear previously set non-block flag */
2221 if (!force_nonblock)
9adbd45d 2222 req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 2223
797f3f53 2224 req->result = 0;
f67676d1 2225 io_size = ret;
9e645e11 2226 if (req->flags & REQ_F_LINK)
f67676d1
JA
2227 req->result = io_size;
2228
2229 /*
2230 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2231 * we know to async punt it even if it was opened O_NONBLOCK
2232 */
9adbd45d 2233 if (force_nonblock && !io_file_supports_async(req->file)) {
f67676d1
JA
2234 req->flags |= REQ_F_MUST_PUNT;
2235 goto copy_iov;
2236 }
9e645e11 2237
31b51510 2238 iov_count = iov_iter_count(&iter);
9adbd45d 2239 ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
2b188cc1
JA
2240 if (!ret) {
2241 ssize_t ret2;
2242
9adbd45d
JA
2243 if (req->file->f_op->read_iter)
2244 ret2 = call_read_iter(req->file, kiocb, &iter);
32960613 2245 else
9adbd45d 2246 ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
32960613 2247
9d93a3f5 2248 /* Catch -EAGAIN return for forced non-blocking submission */
f67676d1 2249 if (!force_nonblock || ret2 != -EAGAIN) {
cf6fd4bd 2250 kiocb_done(kiocb, ret2, nxt, req->in_async);
f67676d1
JA
2251 } else {
2252copy_iov:
b7bb4f7d 2253 ret = io_setup_async_rw(req, io_size, iovec,
f67676d1
JA
2254 inline_vecs, &iter);
2255 if (ret)
2256 goto out_free;
2257 return -EAGAIN;
2258 }
2b188cc1 2259 }
f67676d1 2260out_free:
b7bb4f7d
JA
2261 if (!io_wq_current_is_worker())
2262 kfree(iovec);
2b188cc1
JA
2263 return ret;
2264}
2265
3529d8c2
JA
2266static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2267 bool force_nonblock)
f67676d1 2268{
3529d8c2
JA
2269 struct io_async_ctx *io;
2270 struct iov_iter iter;
f67676d1
JA
2271 ssize_t ret;
2272
3529d8c2
JA
2273 ret = io_prep_rw(req, sqe, force_nonblock);
2274 if (ret)
2275 return ret;
f67676d1 2276
3529d8c2
JA
2277 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
2278 return -EBADF;
f67676d1 2279
3529d8c2
JA
2280 if (!req->io)
2281 return 0;
2282
2283 io = req->io;
2284 io->rw.iov = io->rw.fast_iov;
2285 req->io = NULL;
2286 ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter);
2287 req->io = io;
2288 if (ret < 0)
2289 return ret;
2290
2291 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2292 return 0;
f67676d1
JA
2293}
2294
267bc904 2295static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
8358e3a8 2296 bool force_nonblock)
2b188cc1
JA
2297{
2298 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 2299 struct kiocb *kiocb = &req->rw.kiocb;
2b188cc1 2300 struct iov_iter iter;
31b51510 2301 size_t iov_count;
f67676d1 2302 ssize_t ret, io_size;
2b188cc1 2303
3529d8c2 2304 ret = io_import_iovec(WRITE, req, &iovec, &iter);
06b76d44
JA
2305 if (ret < 0)
2306 return ret;
2b188cc1 2307
fd6c2e4c
JA
2308 /* Ensure we clear previously set non-block flag */
2309 if (!force_nonblock)
9adbd45d 2310 req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 2311
797f3f53 2312 req->result = 0;
f67676d1 2313 io_size = ret;
9e645e11 2314 if (req->flags & REQ_F_LINK)
f67676d1 2315 req->result = io_size;
9e645e11 2316
f67676d1
JA
2317 /*
2318 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2319 * we know to async punt it even if it was opened O_NONBLOCK
2320 */
2321 if (force_nonblock && !io_file_supports_async(req->file)) {
2322 req->flags |= REQ_F_MUST_PUNT;
2323 goto copy_iov;
2324 }
31b51510 2325
10d59345
JA
2326 /* file path doesn't support NOWAIT for non-direct_IO */
2327 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
2328 (req->flags & REQ_F_ISREG))
f67676d1 2329 goto copy_iov;
31b51510 2330
f67676d1 2331 iov_count = iov_iter_count(&iter);
9adbd45d 2332 ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
2b188cc1 2333 if (!ret) {
9bf7933f
RP
2334 ssize_t ret2;
2335
2b188cc1
JA
2336 /*
2337 * Open-code file_start_write here to grab freeze protection,
2338 * which will be released by another thread in
2339 * io_complete_rw(). Fool lockdep by telling it the lock got
2340 * released so that it doesn't complain about the held lock when
2341 * we return to userspace.
2342 */
491381ce 2343 if (req->flags & REQ_F_ISREG) {
9adbd45d 2344 __sb_start_write(file_inode(req->file)->i_sb,
2b188cc1 2345 SB_FREEZE_WRITE, true);
9adbd45d 2346 __sb_writers_release(file_inode(req->file)->i_sb,
2b188cc1
JA
2347 SB_FREEZE_WRITE);
2348 }
2349 kiocb->ki_flags |= IOCB_WRITE;
9bf7933f 2350
9adbd45d
JA
2351 if (req->file->f_op->write_iter)
2352 ret2 = call_write_iter(req->file, kiocb, &iter);
32960613 2353 else
9adbd45d 2354 ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
f67676d1 2355 if (!force_nonblock || ret2 != -EAGAIN) {
cf6fd4bd 2356 kiocb_done(kiocb, ret2, nxt, req->in_async);
f67676d1
JA
2357 } else {
2358copy_iov:
b7bb4f7d 2359 ret = io_setup_async_rw(req, io_size, iovec,
f67676d1
JA
2360 inline_vecs, &iter);
2361 if (ret)
2362 goto out_free;
2363 return -EAGAIN;
2364 }
2b188cc1 2365 }
31b51510 2366out_free:
b7bb4f7d
JA
2367 if (!io_wq_current_is_worker())
2368 kfree(iovec);
2b188cc1
JA
2369 return ret;
2370}
2371
2372/*
2373 * IORING_OP_NOP just posts a completion event, nothing else.
2374 */
78e19bbe 2375static int io_nop(struct io_kiocb *req)
2b188cc1
JA
2376{
2377 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 2378
def596e9
JA
2379 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2380 return -EINVAL;
2381
78e19bbe 2382 io_cqring_add_event(req, 0);
e65ef56d 2383 io_put_req(req);
2b188cc1
JA
2384 return 0;
2385}
2386
3529d8c2 2387static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 2388{
6b06314c 2389 struct io_ring_ctx *ctx = req->ctx;
c992fe29 2390
09bb8394
JA
2391 if (!req->file)
2392 return -EBADF;
c992fe29 2393
6b06314c 2394 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 2395 return -EINVAL;
edafccee 2396 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
2397 return -EINVAL;
2398
8ed8d3c3
JA
2399 req->sync.flags = READ_ONCE(sqe->fsync_flags);
2400 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
2401 return -EINVAL;
2402
2403 req->sync.off = READ_ONCE(sqe->off);
2404 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
2405 return 0;
2406}
2407
8ed8d3c3
JA
2408static bool io_req_cancelled(struct io_kiocb *req)
2409{
2410 if (req->work.flags & IO_WQ_WORK_CANCEL) {
2411 req_set_fail_links(req);
2412 io_cqring_add_event(req, -ECANCELED);
2413 io_put_req(req);
2414 return true;
2415 }
2416
2417 return false;
2418}
2419
78912934
JA
2420static void io_link_work_cb(struct io_wq_work **workptr)
2421{
2422 struct io_wq_work *work = *workptr;
2423 struct io_kiocb *link = work->data;
2424
2425 io_queue_linked_timeout(link);
2426 work->func = io_wq_submit_work;
2427}
2428
2429static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
2430{
2431 struct io_kiocb *link;
2432
2433 io_prep_async_work(nxt, &link);
2434 *workptr = &nxt->work;
2435 if (link) {
2436 nxt->work.flags |= IO_WQ_WORK_CB;
2437 nxt->work.func = io_link_work_cb;
2438 nxt->work.data = link;
2439 }
2440}
2441
8ed8d3c3
JA
2442static void io_fsync_finish(struct io_wq_work **workptr)
2443{
2444 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2445 loff_t end = req->sync.off + req->sync.len;
2446 struct io_kiocb *nxt = NULL;
2447 int ret;
2448
2449 if (io_req_cancelled(req))
2450 return;
2451
9adbd45d 2452 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
2453 end > 0 ? end : LLONG_MAX,
2454 req->sync.flags & IORING_FSYNC_DATASYNC);
2455 if (ret < 0)
2456 req_set_fail_links(req);
2457 io_cqring_add_event(req, ret);
2458 io_put_req_find_next(req, &nxt);
2459 if (nxt)
78912934 2460 io_wq_assign_next(workptr, nxt);
8ed8d3c3
JA
2461}
2462
fc4df999
JA
2463static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
2464 bool force_nonblock)
c992fe29 2465{
8ed8d3c3 2466 struct io_wq_work *work, *old_work;
c992fe29
CH
2467
2468 /* fsync always requires a blocking context */
8ed8d3c3
JA
2469 if (force_nonblock) {
2470 io_put_req(req);
2471 req->work.func = io_fsync_finish;
c992fe29 2472 return -EAGAIN;
8ed8d3c3 2473 }
c992fe29 2474
8ed8d3c3
JA
2475 work = old_work = &req->work;
2476 io_fsync_finish(&work);
2477 if (work && work != old_work)
2478 *nxt = container_of(work, struct io_kiocb, work);
c992fe29
CH
2479 return 0;
2480}
2481
d63d1b5e 2482static void io_fallocate_finish(struct io_wq_work **workptr)
8ed8d3c3
JA
2483{
2484 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2485 struct io_kiocb *nxt = NULL;
2486 int ret;
2487
d63d1b5e
JA
2488 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
2489 req->sync.len);
8ed8d3c3
JA
2490 if (ret < 0)
2491 req_set_fail_links(req);
2492 io_cqring_add_event(req, ret);
2493 io_put_req_find_next(req, &nxt);
2494 if (nxt)
78912934 2495 io_wq_assign_next(workptr, nxt);
5d17b4a4
JA
2496}
2497
d63d1b5e
JA
2498static int io_fallocate_prep(struct io_kiocb *req,
2499 const struct io_uring_sqe *sqe)
2500{
2501 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
2502 return -EINVAL;
2503
2504 req->sync.off = READ_ONCE(sqe->off);
2505 req->sync.len = READ_ONCE(sqe->addr);
2506 req->sync.mode = READ_ONCE(sqe->len);
2507 return 0;
2508}
2509
2510static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt,
2511 bool force_nonblock)
5d17b4a4 2512{
8ed8d3c3 2513 struct io_wq_work *work, *old_work;
5d17b4a4 2514
d63d1b5e 2515 /* fallocate always requiring blocking context */
8ed8d3c3
JA
2516 if (force_nonblock) {
2517 io_put_req(req);
d63d1b5e 2518 req->work.func = io_fallocate_finish;
5d17b4a4 2519 return -EAGAIN;
8ed8d3c3 2520 }
5d17b4a4 2521
8ed8d3c3 2522 work = old_work = &req->work;
d63d1b5e 2523 io_fallocate_finish(&work);
8ed8d3c3
JA
2524 if (work && work != old_work)
2525 *nxt = container_of(work, struct io_kiocb, work);
d63d1b5e 2526
5d17b4a4
JA
2527 return 0;
2528}
2529
15b71abe 2530static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 2531{
f8748881 2532 const char __user *fname;
15b71abe 2533 int ret;
b7bb4f7d 2534
15b71abe
JA
2535 if (sqe->ioprio || sqe->buf_index)
2536 return -EINVAL;
03b1230c 2537
15b71abe 2538 req->open.dfd = READ_ONCE(sqe->fd);
c12cedf2 2539 req->open.how.mode = READ_ONCE(sqe->len);
f8748881 2540 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
c12cedf2 2541 req->open.how.flags = READ_ONCE(sqe->open_flags);
3529d8c2 2542
f8748881 2543 req->open.filename = getname(fname);
15b71abe
JA
2544 if (IS_ERR(req->open.filename)) {
2545 ret = PTR_ERR(req->open.filename);
2546 req->open.filename = NULL;
2547 return ret;
2548 }
3529d8c2 2549
15b71abe 2550 return 0;
03b1230c
JA
2551}
2552
cebdb986 2553static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 2554{
cebdb986
JA
2555 struct open_how __user *how;
2556 const char __user *fname;
2557 size_t len;
0fa03c62
JA
2558 int ret;
2559
cebdb986 2560 if (sqe->ioprio || sqe->buf_index)
0fa03c62
JA
2561 return -EINVAL;
2562
cebdb986
JA
2563 req->open.dfd = READ_ONCE(sqe->fd);
2564 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
2565 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
2566 len = READ_ONCE(sqe->len);
0fa03c62 2567
cebdb986
JA
2568 if (len < OPEN_HOW_SIZE_VER0)
2569 return -EINVAL;
3529d8c2 2570
cebdb986
JA
2571 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
2572 len);
2573 if (ret)
2574 return ret;
3529d8c2 2575
cebdb986
JA
2576 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
2577 req->open.how.flags |= O_LARGEFILE;
0fa03c62 2578
cebdb986
JA
2579 req->open.filename = getname(fname);
2580 if (IS_ERR(req->open.filename)) {
2581 ret = PTR_ERR(req->open.filename);
2582 req->open.filename = NULL;
2583 return ret;
2584 }
2585
2586 return 0;
2587}
2588
2589static int io_openat2(struct io_kiocb *req, struct io_kiocb **nxt,
2590 bool force_nonblock)
15b71abe
JA
2591{
2592 struct open_flags op;
15b71abe
JA
2593 struct file *file;
2594 int ret;
2595
f86cd20c 2596 if (force_nonblock)
15b71abe 2597 return -EAGAIN;
15b71abe 2598
cebdb986 2599 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
2600 if (ret)
2601 goto err;
2602
cebdb986 2603 ret = get_unused_fd_flags(req->open.how.flags);
15b71abe
JA
2604 if (ret < 0)
2605 goto err;
2606
2607 file = do_filp_open(req->open.dfd, req->open.filename, &op);
2608 if (IS_ERR(file)) {
2609 put_unused_fd(ret);
2610 ret = PTR_ERR(file);
2611 } else {
2612 fsnotify_open(file);
2613 fd_install(ret, file);
2614 }
2615err:
2616 putname(req->open.filename);
2617 if (ret < 0)
2618 req_set_fail_links(req);
2619 io_cqring_add_event(req, ret);
2620 io_put_req_find_next(req, nxt);
2621 return 0;
2622}
2623
cebdb986
JA
2624static int io_openat(struct io_kiocb *req, struct io_kiocb **nxt,
2625 bool force_nonblock)
2626{
2627 req->open.how = build_open_how(req->open.how.flags, req->open.how.mode);
2628 return io_openat2(req, nxt, force_nonblock);
2629}
2630
3e4827b0
JA
2631static int io_epoll_ctl_prep(struct io_kiocb *req,
2632 const struct io_uring_sqe *sqe)
2633{
2634#if defined(CONFIG_EPOLL)
2635 if (sqe->ioprio || sqe->buf_index)
2636 return -EINVAL;
2637
2638 req->epoll.epfd = READ_ONCE(sqe->fd);
2639 req->epoll.op = READ_ONCE(sqe->len);
2640 req->epoll.fd = READ_ONCE(sqe->off);
2641
2642 if (ep_op_has_event(req->epoll.op)) {
2643 struct epoll_event __user *ev;
2644
2645 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
2646 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
2647 return -EFAULT;
2648 }
2649
2650 return 0;
2651#else
2652 return -EOPNOTSUPP;
2653#endif
2654}
2655
2656static int io_epoll_ctl(struct io_kiocb *req, struct io_kiocb **nxt,
2657 bool force_nonblock)
2658{
2659#if defined(CONFIG_EPOLL)
2660 struct io_epoll *ie = &req->epoll;
2661 int ret;
2662
2663 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
2664 if (force_nonblock && ret == -EAGAIN)
2665 return -EAGAIN;
2666
2667 if (ret < 0)
2668 req_set_fail_links(req);
2669 io_cqring_add_event(req, ret);
2670 io_put_req_find_next(req, nxt);
2671 return 0;
2672#else
2673 return -EOPNOTSUPP;
2674#endif
2675}
2676
c1ca757b
JA
2677static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2678{
2679#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
2680 if (sqe->ioprio || sqe->buf_index || sqe->off)
2681 return -EINVAL;
2682
2683 req->madvise.addr = READ_ONCE(sqe->addr);
2684 req->madvise.len = READ_ONCE(sqe->len);
2685 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
2686 return 0;
2687#else
2688 return -EOPNOTSUPP;
2689#endif
2690}
2691
2692static int io_madvise(struct io_kiocb *req, struct io_kiocb **nxt,
2693 bool force_nonblock)
2694{
2695#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
2696 struct io_madvise *ma = &req->madvise;
2697 int ret;
2698
2699 if (force_nonblock)
2700 return -EAGAIN;
2701
2702 ret = do_madvise(ma->addr, ma->len, ma->advice);
2703 if (ret < 0)
2704 req_set_fail_links(req);
2705 io_cqring_add_event(req, ret);
2706 io_put_req_find_next(req, nxt);
2707 return 0;
2708#else
2709 return -EOPNOTSUPP;
2710#endif
2711}
2712
4840e418
JA
2713static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2714{
2715 if (sqe->ioprio || sqe->buf_index || sqe->addr)
2716 return -EINVAL;
2717
2718 req->fadvise.offset = READ_ONCE(sqe->off);
2719 req->fadvise.len = READ_ONCE(sqe->len);
2720 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
2721 return 0;
2722}
2723
2724static int io_fadvise(struct io_kiocb *req, struct io_kiocb **nxt,
2725 bool force_nonblock)
2726{
2727 struct io_fadvise *fa = &req->fadvise;
2728 int ret;
2729
3e69426d
JA
2730 if (force_nonblock) {
2731 switch (fa->advice) {
2732 case POSIX_FADV_NORMAL:
2733 case POSIX_FADV_RANDOM:
2734 case POSIX_FADV_SEQUENTIAL:
2735 break;
2736 default:
2737 return -EAGAIN;
2738 }
2739 }
4840e418
JA
2740
2741 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
2742 if (ret < 0)
2743 req_set_fail_links(req);
2744 io_cqring_add_event(req, ret);
2745 io_put_req_find_next(req, nxt);
2746 return 0;
2747}
2748
eddc7ef5
JA
2749static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2750{
f8748881 2751 const char __user *fname;
eddc7ef5
JA
2752 unsigned lookup_flags;
2753 int ret;
2754
2755 if (sqe->ioprio || sqe->buf_index)
2756 return -EINVAL;
2757
2758 req->open.dfd = READ_ONCE(sqe->fd);
2759 req->open.mask = READ_ONCE(sqe->len);
f8748881 2760 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
eddc7ef5 2761 req->open.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
c12cedf2 2762 req->open.how.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5 2763
c12cedf2 2764 if (vfs_stat_set_lookup_flags(&lookup_flags, req->open.how.flags))
eddc7ef5
JA
2765 return -EINVAL;
2766
f8748881 2767 req->open.filename = getname_flags(fname, lookup_flags, NULL);
eddc7ef5
JA
2768 if (IS_ERR(req->open.filename)) {
2769 ret = PTR_ERR(req->open.filename);
2770 req->open.filename = NULL;
2771 return ret;
2772 }
2773
2774 return 0;
2775}
2776
2777static int io_statx(struct io_kiocb *req, struct io_kiocb **nxt,
2778 bool force_nonblock)
2779{
2780 struct io_open *ctx = &req->open;
2781 unsigned lookup_flags;
2782 struct path path;
2783 struct kstat stat;
2784 int ret;
2785
2786 if (force_nonblock)
2787 return -EAGAIN;
2788
c12cedf2 2789 if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags))
eddc7ef5
JA
2790 return -EINVAL;
2791
2792retry:
2793 /* filename_lookup() drops it, keep a reference */
2794 ctx->filename->refcnt++;
2795
2796 ret = filename_lookup(ctx->dfd, ctx->filename, lookup_flags, &path,
2797 NULL);
2798 if (ret)
2799 goto err;
2800
c12cedf2 2801 ret = vfs_getattr(&path, &stat, ctx->mask, ctx->how.flags);
eddc7ef5
JA
2802 path_put(&path);
2803 if (retry_estale(ret, lookup_flags)) {
2804 lookup_flags |= LOOKUP_REVAL;
2805 goto retry;
2806 }
2807 if (!ret)
2808 ret = cp_statx(&stat, ctx->buffer);
2809err:
2810 putname(ctx->filename);
2811 if (ret < 0)
2812 req_set_fail_links(req);
2813 io_cqring_add_event(req, ret);
2814 io_put_req_find_next(req, nxt);
2815 return 0;
2816}
2817
b5dba59e
JA
2818static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2819{
2820 /*
2821 * If we queue this for async, it must not be cancellable. That would
2822 * leave the 'file' in an undeterminate state.
2823 */
2824 req->work.flags |= IO_WQ_WORK_NO_CANCEL;
2825
2826 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
2827 sqe->rw_flags || sqe->buf_index)
2828 return -EINVAL;
2829 if (sqe->flags & IOSQE_FIXED_FILE)
2830 return -EINVAL;
2831
2832 req->close.fd = READ_ONCE(sqe->fd);
2833 if (req->file->f_op == &io_uring_fops ||
b14cca0c 2834 req->close.fd == req->ctx->ring_fd)
b5dba59e
JA
2835 return -EBADF;
2836
2837 return 0;
2838}
2839
2840static void io_close_finish(struct io_wq_work **workptr)
2841{
2842 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2843 struct io_kiocb *nxt = NULL;
2844
2845 /* Invoked with files, we need to do the close */
2846 if (req->work.files) {
2847 int ret;
2848
2849 ret = filp_close(req->close.put_file, req->work.files);
1a417f4e 2850 if (ret < 0)
b5dba59e 2851 req_set_fail_links(req);
b5dba59e
JA
2852 io_cqring_add_event(req, ret);
2853 }
2854
2855 fput(req->close.put_file);
2856
b5dba59e
JA
2857 io_put_req_find_next(req, &nxt);
2858 if (nxt)
2859 io_wq_assign_next(workptr, nxt);
2860}
2861
2862static int io_close(struct io_kiocb *req, struct io_kiocb **nxt,
2863 bool force_nonblock)
2864{
2865 int ret;
2866
2867 req->close.put_file = NULL;
2868 ret = __close_fd_get_file(req->close.fd, &req->close.put_file);
2869 if (ret < 0)
2870 return ret;
2871
2872 /* if the file has a flush method, be safe and punt to async */
f86cd20c 2873 if (req->close.put_file->f_op->flush && !io_wq_current_is_worker())
b5dba59e 2874 goto eagain;
b5dba59e
JA
2875
2876 /*
2877 * No ->flush(), safely close from here and just punt the
2878 * fput() to async context.
2879 */
2880 ret = filp_close(req->close.put_file, current->files);
2881
2882 if (ret < 0)
2883 req_set_fail_links(req);
2884 io_cqring_add_event(req, ret);
2885
2886 if (io_wq_current_is_worker()) {
2887 struct io_wq_work *old_work, *work;
2888
2889 old_work = work = &req->work;
2890 io_close_finish(&work);
2891 if (work && work != old_work)
2892 *nxt = container_of(work, struct io_kiocb, work);
2893 return 0;
2894 }
2895
2896eagain:
2897 req->work.func = io_close_finish;
1a417f4e
JA
2898 /*
2899 * Do manual async queue here to avoid grabbing files - we don't
2900 * need the files, and it'll cause io_close_finish() to close
2901 * the file again and cause a double CQE entry for this request
2902 */
2903 io_queue_async_work(req);
2904 return 0;
b5dba59e
JA
2905}
2906
3529d8c2 2907static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4
JA
2908{
2909 struct io_ring_ctx *ctx = req->ctx;
5d17b4a4
JA
2910
2911 if (!req->file)
2912 return -EBADF;
5d17b4a4
JA
2913
2914 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2915 return -EINVAL;
2916 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
2917 return -EINVAL;
2918
8ed8d3c3
JA
2919 req->sync.off = READ_ONCE(sqe->off);
2920 req->sync.len = READ_ONCE(sqe->len);
2921 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
2922 return 0;
2923}
2924
2925static void io_sync_file_range_finish(struct io_wq_work **workptr)
2926{
2927 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2928 struct io_kiocb *nxt = NULL;
2929 int ret;
2930
2931 if (io_req_cancelled(req))
2932 return;
2933
9adbd45d 2934 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3
JA
2935 req->sync.flags);
2936 if (ret < 0)
2937 req_set_fail_links(req);
2938 io_cqring_add_event(req, ret);
2939 io_put_req_find_next(req, &nxt);
2940 if (nxt)
78912934 2941 io_wq_assign_next(workptr, nxt);
5d17b4a4
JA
2942}
2943
fc4df999 2944static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
5d17b4a4
JA
2945 bool force_nonblock)
2946{
8ed8d3c3 2947 struct io_wq_work *work, *old_work;
5d17b4a4
JA
2948
2949 /* sync_file_range always requires a blocking context */
8ed8d3c3
JA
2950 if (force_nonblock) {
2951 io_put_req(req);
2952 req->work.func = io_sync_file_range_finish;
5d17b4a4 2953 return -EAGAIN;
8ed8d3c3 2954 }
5d17b4a4 2955
8ed8d3c3
JA
2956 work = old_work = &req->work;
2957 io_sync_file_range_finish(&work);
2958 if (work && work != old_work)
2959 *nxt = container_of(work, struct io_kiocb, work);
5d17b4a4
JA
2960 return 0;
2961}
2962
b7bb4f7d
JA
2963#if defined(CONFIG_NET)
2964static void io_sendrecv_async(struct io_wq_work **workptr)
2965{
2966 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2967 struct iovec *iov = NULL;
2968
2969 if (req->io->rw.iov != req->io->rw.fast_iov)
2970 iov = req->io->msg.iov;
2971 io_wq_submit_work(workptr);
2972 kfree(iov);
2973}
2974#endif
2975
3529d8c2 2976static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 2977{
0fa03c62 2978#if defined(CONFIG_NET)
e47293fd 2979 struct io_sr_msg *sr = &req->sr_msg;
3529d8c2 2980 struct io_async_ctx *io = req->io;
03b1230c 2981
e47293fd
JA
2982 sr->msg_flags = READ_ONCE(sqe->msg_flags);
2983 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 2984 sr->len = READ_ONCE(sqe->len);
3529d8c2 2985
fddaface 2986 if (!io || req->opcode == IORING_OP_SEND)
3529d8c2
JA
2987 return 0;
2988
d9688565 2989 io->msg.iov = io->msg.fast_iov;
3529d8c2 2990 return sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
e47293fd 2991 &io->msg.iov);
03b1230c 2992#else
e47293fd 2993 return -EOPNOTSUPP;
03b1230c
JA
2994#endif
2995}
2996
fc4df999
JA
2997static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
2998 bool force_nonblock)
aa1fa28f 2999{
03b1230c 3000#if defined(CONFIG_NET)
0b416c3e 3001 struct io_async_msghdr *kmsg = NULL;
0fa03c62
JA
3002 struct socket *sock;
3003 int ret;
3004
3005 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3006 return -EINVAL;
3007
3008 sock = sock_from_file(req->file, &ret);
3009 if (sock) {
b7bb4f7d 3010 struct io_async_ctx io;
03b1230c 3011 struct sockaddr_storage addr;
0fa03c62
JA
3012 unsigned flags;
3013
03b1230c 3014 if (req->io) {
0b416c3e
JA
3015 kmsg = &req->io->msg;
3016 kmsg->msg.msg_name = &addr;
3017 /* if iov is set, it's allocated already */
3018 if (!kmsg->iov)
3019 kmsg->iov = kmsg->fast_iov;
3020 kmsg->msg.msg_iter.iov = kmsg->iov;
03b1230c 3021 } else {
3529d8c2
JA
3022 struct io_sr_msg *sr = &req->sr_msg;
3023
0b416c3e
JA
3024 kmsg = &io.msg;
3025 kmsg->msg.msg_name = &addr;
3529d8c2
JA
3026
3027 io.msg.iov = io.msg.fast_iov;
3028 ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
3029 sr->msg_flags, &io.msg.iov);
03b1230c 3030 if (ret)
3529d8c2 3031 return ret;
03b1230c 3032 }
0fa03c62 3033
e47293fd
JA
3034 flags = req->sr_msg.msg_flags;
3035 if (flags & MSG_DONTWAIT)
3036 req->flags |= REQ_F_NOWAIT;
3037 else if (force_nonblock)
3038 flags |= MSG_DONTWAIT;
3039
0b416c3e 3040 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
03b1230c 3041 if (force_nonblock && ret == -EAGAIN) {
b7bb4f7d
JA
3042 if (req->io)
3043 return -EAGAIN;
3044 if (io_alloc_async_ctx(req))
3045 return -ENOMEM;
3046 memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
3047 req->work.func = io_sendrecv_async;
0b416c3e 3048 return -EAGAIN;
03b1230c 3049 }
441cdbd5
JA
3050 if (ret == -ERESTARTSYS)
3051 ret = -EINTR;
0fa03c62
JA
3052 }
3053
b7bb4f7d 3054 if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
0b416c3e 3055 kfree(kmsg->iov);
78e19bbe 3056 io_cqring_add_event(req, ret);
4e88d6e7
JA
3057 if (ret < 0)
3058 req_set_fail_links(req);
ec9c02ad 3059 io_put_req_find_next(req, nxt);
5d17b4a4 3060 return 0;
03b1230c
JA
3061#else
3062 return -EOPNOTSUPP;
aa1fa28f 3063#endif
03b1230c 3064}
aa1fa28f 3065
fddaface
JA
3066static int io_send(struct io_kiocb *req, struct io_kiocb **nxt,
3067 bool force_nonblock)
3068{
3069#if defined(CONFIG_NET)
3070 struct socket *sock;
3071 int ret;
3072
3073 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3074 return -EINVAL;
3075
3076 sock = sock_from_file(req->file, &ret);
3077 if (sock) {
3078 struct io_sr_msg *sr = &req->sr_msg;
3079 struct msghdr msg;
3080 struct iovec iov;
3081 unsigned flags;
3082
3083 ret = import_single_range(WRITE, sr->buf, sr->len, &iov,
3084 &msg.msg_iter);
3085 if (ret)
3086 return ret;
3087
3088 msg.msg_name = NULL;
3089 msg.msg_control = NULL;
3090 msg.msg_controllen = 0;
3091 msg.msg_namelen = 0;
3092
3093 flags = req->sr_msg.msg_flags;
3094 if (flags & MSG_DONTWAIT)
3095 req->flags |= REQ_F_NOWAIT;
3096 else if (force_nonblock)
3097 flags |= MSG_DONTWAIT;
3098
0b7b21e4
JA
3099 msg.msg_flags = flags;
3100 ret = sock_sendmsg(sock, &msg);
fddaface
JA
3101 if (force_nonblock && ret == -EAGAIN)
3102 return -EAGAIN;
3103 if (ret == -ERESTARTSYS)
3104 ret = -EINTR;
3105 }
3106
3107 io_cqring_add_event(req, ret);
3108 if (ret < 0)
3109 req_set_fail_links(req);
3110 io_put_req_find_next(req, nxt);
3111 return 0;
3112#else
3113 return -EOPNOTSUPP;
3114#endif
3115}
3116
3529d8c2
JA
3117static int io_recvmsg_prep(struct io_kiocb *req,
3118 const struct io_uring_sqe *sqe)
aa1fa28f
JA
3119{
3120#if defined(CONFIG_NET)
e47293fd 3121 struct io_sr_msg *sr = &req->sr_msg;
3529d8c2
JA
3122 struct io_async_ctx *io = req->io;
3123
3124 sr->msg_flags = READ_ONCE(sqe->msg_flags);
3125 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 3126 sr->len = READ_ONCE(sqe->len);
06b76d44 3127
fddaface 3128 if (!io || req->opcode == IORING_OP_RECV)
06b76d44 3129 return 0;
03b1230c 3130
d9688565 3131 io->msg.iov = io->msg.fast_iov;
3529d8c2 3132 return recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
e47293fd 3133 &io->msg.uaddr, &io->msg.iov);
aa1fa28f 3134#else
e47293fd 3135 return -EOPNOTSUPP;
aa1fa28f
JA
3136#endif
3137}
3138
fc4df999
JA
3139static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
3140 bool force_nonblock)
aa1fa28f
JA
3141{
3142#if defined(CONFIG_NET)
0b416c3e 3143 struct io_async_msghdr *kmsg = NULL;
03b1230c
JA
3144 struct socket *sock;
3145 int ret;
3146
3147 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3148 return -EINVAL;
3149
3150 sock = sock_from_file(req->file, &ret);
3151 if (sock) {
b7bb4f7d 3152 struct io_async_ctx io;
03b1230c 3153 struct sockaddr_storage addr;
03b1230c
JA
3154 unsigned flags;
3155
03b1230c 3156 if (req->io) {
0b416c3e
JA
3157 kmsg = &req->io->msg;
3158 kmsg->msg.msg_name = &addr;
3159 /* if iov is set, it's allocated already */
3160 if (!kmsg->iov)
3161 kmsg->iov = kmsg->fast_iov;
3162 kmsg->msg.msg_iter.iov = kmsg->iov;
03b1230c 3163 } else {
3529d8c2
JA
3164 struct io_sr_msg *sr = &req->sr_msg;
3165
0b416c3e
JA
3166 kmsg = &io.msg;
3167 kmsg->msg.msg_name = &addr;
3529d8c2
JA
3168
3169 io.msg.iov = io.msg.fast_iov;
3170 ret = recvmsg_copy_msghdr(&io.msg.msg, sr->msg,
3171 sr->msg_flags, &io.msg.uaddr,
3172 &io.msg.iov);
03b1230c 3173 if (ret)
3529d8c2 3174 return ret;
03b1230c
JA
3175 }
3176
e47293fd
JA
3177 flags = req->sr_msg.msg_flags;
3178 if (flags & MSG_DONTWAIT)
3179 req->flags |= REQ_F_NOWAIT;
3180 else if (force_nonblock)
3181 flags |= MSG_DONTWAIT;
3182
3183 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg,
3184 kmsg->uaddr, flags);
03b1230c 3185 if (force_nonblock && ret == -EAGAIN) {
b7bb4f7d
JA
3186 if (req->io)
3187 return -EAGAIN;
3188 if (io_alloc_async_ctx(req))
3189 return -ENOMEM;
3190 memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
3191 req->work.func = io_sendrecv_async;
0b416c3e 3192 return -EAGAIN;
03b1230c
JA
3193 }
3194 if (ret == -ERESTARTSYS)
3195 ret = -EINTR;
3196 }
3197
b7bb4f7d 3198 if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
0b416c3e 3199 kfree(kmsg->iov);
03b1230c 3200 io_cqring_add_event(req, ret);
4e88d6e7
JA
3201 if (ret < 0)
3202 req_set_fail_links(req);
03b1230c
JA
3203 io_put_req_find_next(req, nxt);
3204 return 0;
0fa03c62
JA
3205#else
3206 return -EOPNOTSUPP;
3207#endif
3208}
5d17b4a4 3209
fddaface
JA
3210static int io_recv(struct io_kiocb *req, struct io_kiocb **nxt,
3211 bool force_nonblock)
3212{
3213#if defined(CONFIG_NET)
3214 struct socket *sock;
3215 int ret;
3216
3217 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3218 return -EINVAL;
3219
3220 sock = sock_from_file(req->file, &ret);
3221 if (sock) {
3222 struct io_sr_msg *sr = &req->sr_msg;
3223 struct msghdr msg;
3224 struct iovec iov;
3225 unsigned flags;
3226
3227 ret = import_single_range(READ, sr->buf, sr->len, &iov,
3228 &msg.msg_iter);
3229 if (ret)
3230 return ret;
3231
3232 msg.msg_name = NULL;
3233 msg.msg_control = NULL;
3234 msg.msg_controllen = 0;
3235 msg.msg_namelen = 0;
3236 msg.msg_iocb = NULL;
3237 msg.msg_flags = 0;
3238
3239 flags = req->sr_msg.msg_flags;
3240 if (flags & MSG_DONTWAIT)
3241 req->flags |= REQ_F_NOWAIT;
3242 else if (force_nonblock)
3243 flags |= MSG_DONTWAIT;
3244
0b7b21e4 3245 ret = sock_recvmsg(sock, &msg, flags);
fddaface
JA
3246 if (force_nonblock && ret == -EAGAIN)
3247 return -EAGAIN;
3248 if (ret == -ERESTARTSYS)
3249 ret = -EINTR;
3250 }
3251
3252 io_cqring_add_event(req, ret);
3253 if (ret < 0)
3254 req_set_fail_links(req);
3255 io_put_req_find_next(req, nxt);
3256 return 0;
3257#else
3258 return -EOPNOTSUPP;
3259#endif
3260}
3261
3262
3529d8c2 3263static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35
JA
3264{
3265#if defined(CONFIG_NET)
8ed8d3c3
JA
3266 struct io_accept *accept = &req->accept;
3267
17f2fe35
JA
3268 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3269 return -EINVAL;
8042d6ce 3270 if (sqe->ioprio || sqe->len || sqe->buf_index)
17f2fe35
JA
3271 return -EINVAL;
3272
d55e5f5b
JA
3273 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3274 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 3275 accept->flags = READ_ONCE(sqe->accept_flags);
8ed8d3c3
JA
3276 return 0;
3277#else
3278 return -EOPNOTSUPP;
3279#endif
3280}
17f2fe35 3281
8ed8d3c3
JA
3282#if defined(CONFIG_NET)
3283static int __io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
3284 bool force_nonblock)
3285{
3286 struct io_accept *accept = &req->accept;
3287 unsigned file_flags;
3288 int ret;
3289
3290 file_flags = force_nonblock ? O_NONBLOCK : 0;
3291 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
3292 accept->addr_len, accept->flags);
3293 if (ret == -EAGAIN && force_nonblock)
17f2fe35 3294 return -EAGAIN;
8e3cca12
JA
3295 if (ret == -ERESTARTSYS)
3296 ret = -EINTR;
4e88d6e7
JA
3297 if (ret < 0)
3298 req_set_fail_links(req);
78e19bbe 3299 io_cqring_add_event(req, ret);
ec9c02ad 3300 io_put_req_find_next(req, nxt);
17f2fe35 3301 return 0;
8ed8d3c3
JA
3302}
3303
3304static void io_accept_finish(struct io_wq_work **workptr)
3305{
3306 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
3307 struct io_kiocb *nxt = NULL;
3308
3309 if (io_req_cancelled(req))
3310 return;
3311 __io_accept(req, &nxt, false);
3312 if (nxt)
78912934 3313 io_wq_assign_next(workptr, nxt);
8ed8d3c3
JA
3314}
3315#endif
3316
3317static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
3318 bool force_nonblock)
3319{
3320#if defined(CONFIG_NET)
3321 int ret;
3322
8ed8d3c3
JA
3323 ret = __io_accept(req, nxt, force_nonblock);
3324 if (ret == -EAGAIN && force_nonblock) {
3325 req->work.func = io_accept_finish;
8ed8d3c3
JA
3326 io_put_req(req);
3327 return -EAGAIN;
3328 }
3329 return 0;
0fa03c62
JA
3330#else
3331 return -EOPNOTSUPP;
3332#endif
3333}
5d17b4a4 3334
3529d8c2 3335static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021
JA
3336{
3337#if defined(CONFIG_NET)
3529d8c2
JA
3338 struct io_connect *conn = &req->connect;
3339 struct io_async_ctx *io = req->io;
f499a021 3340
3fbb51c1
JA
3341 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3342 return -EINVAL;
3343 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
3344 return -EINVAL;
3345
3529d8c2
JA
3346 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3347 conn->addr_len = READ_ONCE(sqe->addr2);
3348
3349 if (!io)
3350 return 0;
3351
3352 return move_addr_to_kernel(conn->addr, conn->addr_len,
3fbb51c1 3353 &io->connect.address);
f499a021 3354#else
3fbb51c1 3355 return -EOPNOTSUPP;
f499a021
JA
3356#endif
3357}
3358
fc4df999
JA
3359static int io_connect(struct io_kiocb *req, struct io_kiocb **nxt,
3360 bool force_nonblock)
f8e85cf2
JA
3361{
3362#if defined(CONFIG_NET)
f499a021 3363 struct io_async_ctx __io, *io;
f8e85cf2 3364 unsigned file_flags;
3fbb51c1 3365 int ret;
f8e85cf2 3366
f499a021
JA
3367 if (req->io) {
3368 io = req->io;
3369 } else {
3529d8c2
JA
3370 ret = move_addr_to_kernel(req->connect.addr,
3371 req->connect.addr_len,
3372 &__io.connect.address);
f499a021
JA
3373 if (ret)
3374 goto out;
3375 io = &__io;
3376 }
3377
3fbb51c1
JA
3378 file_flags = force_nonblock ? O_NONBLOCK : 0;
3379
3380 ret = __sys_connect_file(req->file, &io->connect.address,
3381 req->connect.addr_len, file_flags);
87f80d62 3382 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
b7bb4f7d
JA
3383 if (req->io)
3384 return -EAGAIN;
3385 if (io_alloc_async_ctx(req)) {
f499a021
JA
3386 ret = -ENOMEM;
3387 goto out;
3388 }
b7bb4f7d 3389 memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect));
f8e85cf2 3390 return -EAGAIN;
f499a021 3391 }
f8e85cf2
JA
3392 if (ret == -ERESTARTSYS)
3393 ret = -EINTR;
f499a021 3394out:
4e88d6e7
JA
3395 if (ret < 0)
3396 req_set_fail_links(req);
f8e85cf2
JA
3397 io_cqring_add_event(req, ret);
3398 io_put_req_find_next(req, nxt);
3399 return 0;
3400#else
3401 return -EOPNOTSUPP;
3402#endif
3403}
3404
221c5eb2
JA
3405static void io_poll_remove_one(struct io_kiocb *req)
3406{
3407 struct io_poll_iocb *poll = &req->poll;
3408
3409 spin_lock(&poll->head->lock);
3410 WRITE_ONCE(poll->canceled, true);
392edb45
JA
3411 if (!list_empty(&poll->wait.entry)) {
3412 list_del_init(&poll->wait.entry);
a197f664 3413 io_queue_async_work(req);
221c5eb2
JA
3414 }
3415 spin_unlock(&poll->head->lock);
78076bb6 3416 hash_del(&req->hash_node);
221c5eb2
JA
3417}
3418
3419static void io_poll_remove_all(struct io_ring_ctx *ctx)
3420{
78076bb6 3421 struct hlist_node *tmp;
221c5eb2 3422 struct io_kiocb *req;
78076bb6 3423 int i;
221c5eb2
JA
3424
3425 spin_lock_irq(&ctx->completion_lock);
78076bb6
JA
3426 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
3427 struct hlist_head *list;
3428
3429 list = &ctx->cancel_hash[i];
3430 hlist_for_each_entry_safe(req, tmp, list, hash_node)
3431 io_poll_remove_one(req);
221c5eb2
JA
3432 }
3433 spin_unlock_irq(&ctx->completion_lock);
3434}
3435
47f46768
JA
3436static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
3437{
78076bb6 3438 struct hlist_head *list;
47f46768
JA
3439 struct io_kiocb *req;
3440
78076bb6
JA
3441 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
3442 hlist_for_each_entry(req, list, hash_node) {
3443 if (sqe_addr == req->user_data) {
eac406c6
JA
3444 io_poll_remove_one(req);
3445 return 0;
3446 }
47f46768
JA
3447 }
3448
3449 return -ENOENT;
3450}
3451
3529d8c2
JA
3452static int io_poll_remove_prep(struct io_kiocb *req,
3453 const struct io_uring_sqe *sqe)
0969e783 3454{
0969e783
JA
3455 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3456 return -EINVAL;
3457 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
3458 sqe->poll_events)
3459 return -EINVAL;
3460
3461 req->poll.addr = READ_ONCE(sqe->addr);
0969e783
JA
3462 return 0;
3463}
3464
221c5eb2
JA
3465/*
3466 * Find a running poll command that matches one specified in sqe->addr,
3467 * and remove it if found.
3468 */
fc4df999 3469static int io_poll_remove(struct io_kiocb *req)
221c5eb2
JA
3470{
3471 struct io_ring_ctx *ctx = req->ctx;
0969e783 3472 u64 addr;
47f46768 3473 int ret;
221c5eb2 3474
0969e783 3475 addr = req->poll.addr;
221c5eb2 3476 spin_lock_irq(&ctx->completion_lock);
0969e783 3477 ret = io_poll_cancel(ctx, addr);
221c5eb2
JA
3478 spin_unlock_irq(&ctx->completion_lock);
3479
78e19bbe 3480 io_cqring_add_event(req, ret);
4e88d6e7
JA
3481 if (ret < 0)
3482 req_set_fail_links(req);
e65ef56d 3483 io_put_req(req);
221c5eb2
JA
3484 return 0;
3485}
3486
b0dd8a41 3487static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
221c5eb2 3488{
a197f664
JL
3489 struct io_ring_ctx *ctx = req->ctx;
3490
8c838788 3491 req->poll.done = true;
b0dd8a41
JA
3492 if (error)
3493 io_cqring_fill_event(req, error);
3494 else
3495 io_cqring_fill_event(req, mangle_poll(mask));
8c838788 3496 io_commit_cqring(ctx);
221c5eb2
JA
3497}
3498
561fb04a 3499static void io_poll_complete_work(struct io_wq_work **workptr)
221c5eb2 3500{
561fb04a 3501 struct io_wq_work *work = *workptr;
221c5eb2
JA
3502 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3503 struct io_poll_iocb *poll = &req->poll;
3504 struct poll_table_struct pt = { ._key = poll->events };
3505 struct io_ring_ctx *ctx = req->ctx;
89723d0b 3506 struct io_kiocb *nxt = NULL;
221c5eb2 3507 __poll_t mask = 0;
b0dd8a41 3508 int ret = 0;
221c5eb2 3509
b0dd8a41 3510 if (work->flags & IO_WQ_WORK_CANCEL) {
561fb04a 3511 WRITE_ONCE(poll->canceled, true);
b0dd8a41
JA
3512 ret = -ECANCELED;
3513 } else if (READ_ONCE(poll->canceled)) {
3514 ret = -ECANCELED;
3515 }
561fb04a 3516
b0dd8a41 3517 if (ret != -ECANCELED)
221c5eb2
JA
3518 mask = vfs_poll(poll->file, &pt) & poll->events;
3519
3520 /*
3521 * Note that ->ki_cancel callers also delete iocb from active_reqs after
3522 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
3523 * synchronize with them. In the cancellation case the list_del_init
3524 * itself is not actually needed, but harmless so we keep it in to
3525 * avoid further branches in the fast path.
3526 */
3527 spin_lock_irq(&ctx->completion_lock);
b0dd8a41 3528 if (!mask && ret != -ECANCELED) {
392edb45 3529 add_wait_queue(poll->head, &poll->wait);
221c5eb2
JA
3530 spin_unlock_irq(&ctx->completion_lock);
3531 return;
3532 }
78076bb6 3533 hash_del(&req->hash_node);
b0dd8a41 3534 io_poll_complete(req, mask, ret);
221c5eb2
JA
3535 spin_unlock_irq(&ctx->completion_lock);
3536
8c838788 3537 io_cqring_ev_posted(ctx);
89723d0b 3538
4e88d6e7
JA
3539 if (ret < 0)
3540 req_set_fail_links(req);
ec9c02ad 3541 io_put_req_find_next(req, &nxt);
89723d0b 3542 if (nxt)
78912934 3543 io_wq_assign_next(workptr, nxt);
221c5eb2
JA
3544}
3545
e94f141b
JA
3546static void __io_poll_flush(struct io_ring_ctx *ctx, struct llist_node *nodes)
3547{
e94f141b 3548 struct io_kiocb *req, *tmp;
8237e045 3549 struct req_batch rb;
e94f141b 3550
c6ca97b3 3551 rb.to_free = rb.need_iter = 0;
e94f141b
JA
3552 spin_lock_irq(&ctx->completion_lock);
3553 llist_for_each_entry_safe(req, tmp, nodes, llist_node) {
3554 hash_del(&req->hash_node);
3555 io_poll_complete(req, req->result, 0);
3556
8237e045
JA
3557 if (refcount_dec_and_test(&req->refs) &&
3558 !io_req_multi_free(&rb, req)) {
3559 req->flags |= REQ_F_COMP_LOCKED;
3560 io_free_req(req);
e94f141b
JA
3561 }
3562 }
3563 spin_unlock_irq(&ctx->completion_lock);
3564
3565 io_cqring_ev_posted(ctx);
8237e045 3566 io_free_req_many(ctx, &rb);
e94f141b
JA
3567}
3568
3569static void io_poll_flush(struct io_wq_work **workptr)
3570{
3571 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
3572 struct llist_node *nodes;
3573
3574 nodes = llist_del_all(&req->ctx->poll_llist);
3575 if (nodes)
3576 __io_poll_flush(req->ctx, nodes);
3577}
3578
f0b493e6
JA
3579static void io_poll_trigger_evfd(struct io_wq_work **workptr)
3580{
3581 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
3582
3583 eventfd_signal(req->ctx->cq_ev_fd, 1);
3584 io_put_req(req);
3585}
3586
221c5eb2
JA
3587static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
3588 void *key)
3589{
e944475e 3590 struct io_poll_iocb *poll = wait->private;
221c5eb2
JA
3591 struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
3592 struct io_ring_ctx *ctx = req->ctx;
3593 __poll_t mask = key_to_poll(key);
221c5eb2
JA
3594
3595 /* for instances that support it check for an event match first: */
8c838788
JA
3596 if (mask && !(mask & poll->events))
3597 return 0;
221c5eb2 3598
392edb45 3599 list_del_init(&poll->wait.entry);
221c5eb2 3600
7c9e7f0f
JA
3601 /*
3602 * Run completion inline if we can. We're using trylock here because
3603 * we are violating the completion_lock -> poll wq lock ordering.
3604 * If we have a link timeout we're going to need the completion_lock
3605 * for finalizing the request, mark us as having grabbed that already.
3606 */
e94f141b
JA
3607 if (mask) {
3608 unsigned long flags;
221c5eb2 3609
e94f141b
JA
3610 if (llist_empty(&ctx->poll_llist) &&
3611 spin_trylock_irqsave(&ctx->completion_lock, flags)) {
f0b493e6
JA
3612 bool trigger_ev;
3613
e94f141b
JA
3614 hash_del(&req->hash_node);
3615 io_poll_complete(req, mask, 0);
e94f141b 3616
f0b493e6
JA
3617 trigger_ev = io_should_trigger_evfd(ctx);
3618 if (trigger_ev && eventfd_signal_count()) {
3619 trigger_ev = false;
3620 req->work.func = io_poll_trigger_evfd;
3621 } else {
3622 req->flags |= REQ_F_COMP_LOCKED;
3623 io_put_req(req);
3624 req = NULL;
3625 }
3626 spin_unlock_irqrestore(&ctx->completion_lock, flags);
3627 __io_cqring_ev_posted(ctx, trigger_ev);
e94f141b
JA
3628 } else {
3629 req->result = mask;
3630 req->llist_node.next = NULL;
3631 /* if the list wasn't empty, we're done */
3632 if (!llist_add(&req->llist_node, &ctx->poll_llist))
3633 req = NULL;
3634 else
3635 req->work.func = io_poll_flush;
3636 }
221c5eb2 3637 }
e94f141b
JA
3638 if (req)
3639 io_queue_async_work(req);
221c5eb2 3640
221c5eb2
JA
3641 return 1;
3642}
3643
3644struct io_poll_table {
3645 struct poll_table_struct pt;
3646 struct io_kiocb *req;
3647 int error;
3648};
3649
3650static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
3651 struct poll_table_struct *p)
3652{
3653 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
3654
3655 if (unlikely(pt->req->poll.head)) {
3656 pt->error = -EINVAL;
3657 return;
3658 }
3659
3660 pt->error = 0;
3661 pt->req->poll.head = head;
392edb45 3662 add_wait_queue(head, &pt->req->poll.wait);
221c5eb2
JA
3663}
3664
eac406c6
JA
3665static void io_poll_req_insert(struct io_kiocb *req)
3666{
3667 struct io_ring_ctx *ctx = req->ctx;
78076bb6
JA
3668 struct hlist_head *list;
3669
3670 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
3671 hlist_add_head(&req->hash_node, list);
eac406c6
JA
3672}
3673
3529d8c2 3674static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
3675{
3676 struct io_poll_iocb *poll = &req->poll;
221c5eb2 3677 u16 events;
221c5eb2
JA
3678
3679 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3680 return -EINVAL;
3681 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
3682 return -EINVAL;
09bb8394
JA
3683 if (!poll->file)
3684 return -EBADF;
221c5eb2 3685
221c5eb2
JA
3686 events = READ_ONCE(sqe->poll_events);
3687 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
0969e783
JA
3688 return 0;
3689}
3690
3691static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt)
3692{
3693 struct io_poll_iocb *poll = &req->poll;
3694 struct io_ring_ctx *ctx = req->ctx;
3695 struct io_poll_table ipt;
3696 bool cancel = false;
3697 __poll_t mask;
0969e783
JA
3698
3699 INIT_IO_WORK(&req->work, io_poll_complete_work);
78076bb6 3700 INIT_HLIST_NODE(&req->hash_node);
221c5eb2 3701
221c5eb2 3702 poll->head = NULL;
8c838788 3703 poll->done = false;
221c5eb2
JA
3704 poll->canceled = false;
3705
3706 ipt.pt._qproc = io_poll_queue_proc;
3707 ipt.pt._key = poll->events;
3708 ipt.req = req;
3709 ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
3710
3711 /* initialized the list so that we can do list_empty checks */
392edb45
JA
3712 INIT_LIST_HEAD(&poll->wait.entry);
3713 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
3714 poll->wait.private = poll;
221c5eb2 3715
36703247
JA
3716 INIT_LIST_HEAD(&req->list);
3717
221c5eb2 3718 mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
221c5eb2
JA
3719
3720 spin_lock_irq(&ctx->completion_lock);
8c838788
JA
3721 if (likely(poll->head)) {
3722 spin_lock(&poll->head->lock);
392edb45 3723 if (unlikely(list_empty(&poll->wait.entry))) {
8c838788
JA
3724 if (ipt.error)
3725 cancel = true;
3726 ipt.error = 0;
3727 mask = 0;
3728 }
3729 if (mask || ipt.error)
392edb45 3730 list_del_init(&poll->wait.entry);
8c838788
JA
3731 else if (cancel)
3732 WRITE_ONCE(poll->canceled, true);
3733 else if (!poll->done) /* actually waiting for an event */
eac406c6 3734 io_poll_req_insert(req);
8c838788
JA
3735 spin_unlock(&poll->head->lock);
3736 }
3737 if (mask) { /* no async, we'd stolen it */
221c5eb2 3738 ipt.error = 0;
b0dd8a41 3739 io_poll_complete(req, mask, 0);
221c5eb2 3740 }
221c5eb2
JA
3741 spin_unlock_irq(&ctx->completion_lock);
3742
8c838788
JA
3743 if (mask) {
3744 io_cqring_ev_posted(ctx);
ec9c02ad 3745 io_put_req_find_next(req, nxt);
221c5eb2 3746 }
8c838788 3747 return ipt.error;
221c5eb2
JA
3748}
3749
5262f567
JA
3750static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
3751{
ad8a48ac
JA
3752 struct io_timeout_data *data = container_of(timer,
3753 struct io_timeout_data, timer);
3754 struct io_kiocb *req = data->req;
3755 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
3756 unsigned long flags;
3757
5262f567
JA
3758 atomic_inc(&ctx->cq_timeouts);
3759
3760 spin_lock_irqsave(&ctx->completion_lock, flags);
ef03681a 3761 /*
11365043
JA
3762 * We could be racing with timeout deletion. If the list is empty,
3763 * then timeout lookup already found it and will be handling it.
ef03681a 3764 */
842f9612 3765 if (!list_empty(&req->list)) {
11365043 3766 struct io_kiocb *prev;
5262f567 3767
11365043
JA
3768 /*
3769 * Adjust the reqs sequence before the current one because it
d195a66e 3770 * will consume a slot in the cq_ring and the cq_tail
11365043
JA
3771 * pointer will be increased, otherwise other timeout reqs may
3772 * return in advance without waiting for enough wait_nr.
3773 */
3774 prev = req;
3775 list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
3776 prev->sequence++;
11365043 3777 list_del_init(&req->list);
11365043 3778 }
5262f567 3779
78e19bbe 3780 io_cqring_fill_event(req, -ETIME);
5262f567
JA
3781 io_commit_cqring(ctx);
3782 spin_unlock_irqrestore(&ctx->completion_lock, flags);
3783
3784 io_cqring_ev_posted(ctx);
4e88d6e7 3785 req_set_fail_links(req);
5262f567
JA
3786 io_put_req(req);
3787 return HRTIMER_NORESTART;
3788}
3789
47f46768
JA
3790static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
3791{
3792 struct io_kiocb *req;
3793 int ret = -ENOENT;
3794
3795 list_for_each_entry(req, &ctx->timeout_list, list) {
3796 if (user_data == req->user_data) {
3797 list_del_init(&req->list);
3798 ret = 0;
3799 break;
3800 }
3801 }
3802
3803 if (ret == -ENOENT)
3804 return ret;
3805
2d28390a 3806 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
47f46768
JA
3807 if (ret == -1)
3808 return -EALREADY;
3809
4e88d6e7 3810 req_set_fail_links(req);
47f46768
JA
3811 io_cqring_fill_event(req, -ECANCELED);
3812 io_put_req(req);
3813 return 0;
3814}
3815
3529d8c2
JA
3816static int io_timeout_remove_prep(struct io_kiocb *req,
3817 const struct io_uring_sqe *sqe)
b29472ee 3818{
b29472ee
JA
3819 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3820 return -EINVAL;
3821 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
3822 return -EINVAL;
3823
3824 req->timeout.addr = READ_ONCE(sqe->addr);
3825 req->timeout.flags = READ_ONCE(sqe->timeout_flags);
3826 if (req->timeout.flags)
3827 return -EINVAL;
3828
b29472ee
JA
3829 return 0;
3830}
3831
11365043
JA
3832/*
3833 * Remove or update an existing timeout command
3834 */
fc4df999 3835static int io_timeout_remove(struct io_kiocb *req)
11365043
JA
3836{
3837 struct io_ring_ctx *ctx = req->ctx;
47f46768 3838 int ret;
11365043 3839
11365043 3840 spin_lock_irq(&ctx->completion_lock);
b29472ee 3841 ret = io_timeout_cancel(ctx, req->timeout.addr);
11365043 3842
47f46768 3843 io_cqring_fill_event(req, ret);
11365043
JA
3844 io_commit_cqring(ctx);
3845 spin_unlock_irq(&ctx->completion_lock);
5262f567 3846 io_cqring_ev_posted(ctx);
4e88d6e7
JA
3847 if (ret < 0)
3848 req_set_fail_links(req);
ec9c02ad 3849 io_put_req(req);
11365043 3850 return 0;
5262f567
JA
3851}
3852
3529d8c2 3853static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2d28390a 3854 bool is_timeout_link)
5262f567 3855{
ad8a48ac 3856 struct io_timeout_data *data;
a41525ab 3857 unsigned flags;
5262f567 3858
ad8a48ac 3859 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5262f567 3860 return -EINVAL;
ad8a48ac 3861 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
a41525ab 3862 return -EINVAL;
2d28390a
JA
3863 if (sqe->off && is_timeout_link)
3864 return -EINVAL;
a41525ab
JA
3865 flags = READ_ONCE(sqe->timeout_flags);
3866 if (flags & ~IORING_TIMEOUT_ABS)
5262f567 3867 return -EINVAL;
bdf20073 3868
26a61679
JA
3869 req->timeout.count = READ_ONCE(sqe->off);
3870
3529d8c2 3871 if (!req->io && io_alloc_async_ctx(req))
26a61679
JA
3872 return -ENOMEM;
3873
3874 data = &req->io->timeout;
ad8a48ac 3875 data->req = req;
ad8a48ac
JA
3876 req->flags |= REQ_F_TIMEOUT;
3877
3878 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
3879 return -EFAULT;
3880
11365043 3881 if (flags & IORING_TIMEOUT_ABS)
ad8a48ac 3882 data->mode = HRTIMER_MODE_ABS;
11365043 3883 else
ad8a48ac 3884 data->mode = HRTIMER_MODE_REL;
11365043 3885
ad8a48ac
JA
3886 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
3887 return 0;
3888}
3889
fc4df999 3890static int io_timeout(struct io_kiocb *req)
ad8a48ac
JA
3891{
3892 unsigned count;
3893 struct io_ring_ctx *ctx = req->ctx;
3894 struct io_timeout_data *data;
3895 struct list_head *entry;
3896 unsigned span = 0;
ad8a48ac 3897
2d28390a 3898 data = &req->io->timeout;
93bd25bb 3899
5262f567
JA
3900 /*
3901 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
3902 * timeout event to be satisfied. If it isn't set, then this is
3903 * a pure timeout request, sequence isn't used.
5262f567 3904 */
26a61679 3905 count = req->timeout.count;
93bd25bb
JA
3906 if (!count) {
3907 req->flags |= REQ_F_TIMEOUT_NOSEQ;
3908 spin_lock_irq(&ctx->completion_lock);
3909 entry = ctx->timeout_list.prev;
3910 goto add;
3911 }
5262f567
JA
3912
3913 req->sequence = ctx->cached_sq_head + count - 1;
2d28390a 3914 data->seq_offset = count;
5262f567
JA
3915
3916 /*
3917 * Insertion sort, ensuring the first entry in the list is always
3918 * the one we need first.
3919 */
5262f567
JA
3920 spin_lock_irq(&ctx->completion_lock);
3921 list_for_each_prev(entry, &ctx->timeout_list) {
3922 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
5da0fb1a 3923 unsigned nxt_sq_head;
3924 long long tmp, tmp_nxt;
2d28390a 3925 u32 nxt_offset = nxt->io->timeout.seq_offset;
5262f567 3926
93bd25bb
JA
3927 if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
3928 continue;
3929
5da0fb1a 3930 /*
3931 * Since cached_sq_head + count - 1 can overflow, use type long
3932 * long to store it.
3933 */
3934 tmp = (long long)ctx->cached_sq_head + count - 1;
cc42e0ac
PB
3935 nxt_sq_head = nxt->sequence - nxt_offset + 1;
3936 tmp_nxt = (long long)nxt_sq_head + nxt_offset - 1;
5da0fb1a 3937
3938 /*
3939 * cached_sq_head may overflow, and it will never overflow twice
3940 * once there is some timeout req still be valid.
3941 */
3942 if (ctx->cached_sq_head < nxt_sq_head)
8b07a65a 3943 tmp += UINT_MAX;
5da0fb1a 3944
a1f58ba4 3945 if (tmp > tmp_nxt)
5262f567 3946 break;
a1f58ba4 3947
3948 /*
3949 * Sequence of reqs after the insert one and itself should
3950 * be adjusted because each timeout req consumes a slot.
3951 */
3952 span++;
3953 nxt->sequence++;
5262f567 3954 }
a1f58ba4 3955 req->sequence -= span;
93bd25bb 3956add:
5262f567 3957 list_add(&req->list, entry);
ad8a48ac
JA
3958 data->timer.function = io_timeout_fn;
3959 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5262f567 3960 spin_unlock_irq(&ctx->completion_lock);
5262f567
JA
3961 return 0;
3962}
5262f567 3963
62755e35
JA
3964static bool io_cancel_cb(struct io_wq_work *work, void *data)
3965{
3966 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3967
3968 return req->user_data == (unsigned long) data;
3969}
3970
e977d6d3 3971static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
62755e35 3972{
62755e35 3973 enum io_wq_cancel cancel_ret;
62755e35
JA
3974 int ret = 0;
3975
62755e35
JA
3976 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
3977 switch (cancel_ret) {
3978 case IO_WQ_CANCEL_OK:
3979 ret = 0;
3980 break;
3981 case IO_WQ_CANCEL_RUNNING:
3982 ret = -EALREADY;
3983 break;
3984 case IO_WQ_CANCEL_NOTFOUND:
3985 ret = -ENOENT;
3986 break;
3987 }
3988
e977d6d3
JA
3989 return ret;
3990}
3991
47f46768
JA
3992static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
3993 struct io_kiocb *req, __u64 sqe_addr,
b0dd8a41 3994 struct io_kiocb **nxt, int success_ret)
47f46768
JA
3995{
3996 unsigned long flags;
3997 int ret;
3998
3999 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
4000 if (ret != -ENOENT) {
4001 spin_lock_irqsave(&ctx->completion_lock, flags);
4002 goto done;
4003 }
4004
4005 spin_lock_irqsave(&ctx->completion_lock, flags);
4006 ret = io_timeout_cancel(ctx, sqe_addr);
4007 if (ret != -ENOENT)
4008 goto done;
4009 ret = io_poll_cancel(ctx, sqe_addr);
4010done:
b0dd8a41
JA
4011 if (!ret)
4012 ret = success_ret;
47f46768
JA
4013 io_cqring_fill_event(req, ret);
4014 io_commit_cqring(ctx);
4015 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4016 io_cqring_ev_posted(ctx);
4017
4e88d6e7
JA
4018 if (ret < 0)
4019 req_set_fail_links(req);
47f46768
JA
4020 io_put_req_find_next(req, nxt);
4021}
4022
3529d8c2
JA
4023static int io_async_cancel_prep(struct io_kiocb *req,
4024 const struct io_uring_sqe *sqe)
e977d6d3 4025{
fbf23849 4026 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
e977d6d3
JA
4027 return -EINVAL;
4028 if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
4029 sqe->cancel_flags)
4030 return -EINVAL;
4031
fbf23849
JA
4032 req->cancel.addr = READ_ONCE(sqe->addr);
4033 return 0;
4034}
4035
4036static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt)
4037{
4038 struct io_ring_ctx *ctx = req->ctx;
fbf23849
JA
4039
4040 io_async_find_and_cancel(ctx, req, req->cancel.addr, nxt, 0);
5262f567
JA
4041 return 0;
4042}
4043
05f3fb3c
JA
4044static int io_files_update_prep(struct io_kiocb *req,
4045 const struct io_uring_sqe *sqe)
4046{
4047 if (sqe->flags || sqe->ioprio || sqe->rw_flags)
4048 return -EINVAL;
4049
4050 req->files_update.offset = READ_ONCE(sqe->off);
4051 req->files_update.nr_args = READ_ONCE(sqe->len);
4052 if (!req->files_update.nr_args)
4053 return -EINVAL;
4054 req->files_update.arg = READ_ONCE(sqe->addr);
4055 return 0;
4056}
4057
4058static int io_files_update(struct io_kiocb *req, bool force_nonblock)
fbf23849
JA
4059{
4060 struct io_ring_ctx *ctx = req->ctx;
05f3fb3c
JA
4061 struct io_uring_files_update up;
4062 int ret;
fbf23849 4063
f86cd20c 4064 if (force_nonblock)
05f3fb3c 4065 return -EAGAIN;
05f3fb3c
JA
4066
4067 up.offset = req->files_update.offset;
4068 up.fds = req->files_update.arg;
4069
4070 mutex_lock(&ctx->uring_lock);
4071 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
4072 mutex_unlock(&ctx->uring_lock);
4073
4074 if (ret < 0)
4075 req_set_fail_links(req);
4076 io_cqring_add_event(req, ret);
4077 io_put_req(req);
5262f567
JA
4078 return 0;
4079}
4080
3529d8c2
JA
4081static int io_req_defer_prep(struct io_kiocb *req,
4082 const struct io_uring_sqe *sqe)
f67676d1 4083{
e781573e 4084 ssize_t ret = 0;
f67676d1 4085
f86cd20c
JA
4086 if (io_op_defs[req->opcode].file_table) {
4087 ret = io_grab_files(req);
4088 if (unlikely(ret))
4089 return ret;
4090 }
4091
cccf0ee8
JA
4092 io_req_work_grab_env(req, &io_op_defs[req->opcode]);
4093
d625c6ee 4094 switch (req->opcode) {
e781573e
JA
4095 case IORING_OP_NOP:
4096 break;
f67676d1
JA
4097 case IORING_OP_READV:
4098 case IORING_OP_READ_FIXED:
3a6820f2 4099 case IORING_OP_READ:
3529d8c2 4100 ret = io_read_prep(req, sqe, true);
f67676d1
JA
4101 break;
4102 case IORING_OP_WRITEV:
4103 case IORING_OP_WRITE_FIXED:
3a6820f2 4104 case IORING_OP_WRITE:
3529d8c2 4105 ret = io_write_prep(req, sqe, true);
f67676d1 4106 break;
0969e783 4107 case IORING_OP_POLL_ADD:
3529d8c2 4108 ret = io_poll_add_prep(req, sqe);
0969e783
JA
4109 break;
4110 case IORING_OP_POLL_REMOVE:
3529d8c2 4111 ret = io_poll_remove_prep(req, sqe);
0969e783 4112 break;
8ed8d3c3 4113 case IORING_OP_FSYNC:
3529d8c2 4114 ret = io_prep_fsync(req, sqe);
8ed8d3c3
JA
4115 break;
4116 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2 4117 ret = io_prep_sfr(req, sqe);
8ed8d3c3 4118 break;
03b1230c 4119 case IORING_OP_SENDMSG:
fddaface 4120 case IORING_OP_SEND:
3529d8c2 4121 ret = io_sendmsg_prep(req, sqe);
03b1230c
JA
4122 break;
4123 case IORING_OP_RECVMSG:
fddaface 4124 case IORING_OP_RECV:
3529d8c2 4125 ret = io_recvmsg_prep(req, sqe);
03b1230c 4126 break;
f499a021 4127 case IORING_OP_CONNECT:
3529d8c2 4128 ret = io_connect_prep(req, sqe);
f499a021 4129 break;
2d28390a 4130 case IORING_OP_TIMEOUT:
3529d8c2 4131 ret = io_timeout_prep(req, sqe, false);
b7bb4f7d 4132 break;
b29472ee 4133 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2 4134 ret = io_timeout_remove_prep(req, sqe);
b29472ee 4135 break;
fbf23849 4136 case IORING_OP_ASYNC_CANCEL:
3529d8c2 4137 ret = io_async_cancel_prep(req, sqe);
fbf23849 4138 break;
2d28390a 4139 case IORING_OP_LINK_TIMEOUT:
3529d8c2 4140 ret = io_timeout_prep(req, sqe, true);
b7bb4f7d 4141 break;
8ed8d3c3 4142 case IORING_OP_ACCEPT:
3529d8c2 4143 ret = io_accept_prep(req, sqe);
8ed8d3c3 4144 break;
d63d1b5e
JA
4145 case IORING_OP_FALLOCATE:
4146 ret = io_fallocate_prep(req, sqe);
4147 break;
15b71abe
JA
4148 case IORING_OP_OPENAT:
4149 ret = io_openat_prep(req, sqe);
4150 break;
b5dba59e
JA
4151 case IORING_OP_CLOSE:
4152 ret = io_close_prep(req, sqe);
4153 break;
05f3fb3c
JA
4154 case IORING_OP_FILES_UPDATE:
4155 ret = io_files_update_prep(req, sqe);
4156 break;
eddc7ef5
JA
4157 case IORING_OP_STATX:
4158 ret = io_statx_prep(req, sqe);
4159 break;
4840e418
JA
4160 case IORING_OP_FADVISE:
4161 ret = io_fadvise_prep(req, sqe);
4162 break;
c1ca757b
JA
4163 case IORING_OP_MADVISE:
4164 ret = io_madvise_prep(req, sqe);
4165 break;
cebdb986
JA
4166 case IORING_OP_OPENAT2:
4167 ret = io_openat2_prep(req, sqe);
4168 break;
3e4827b0
JA
4169 case IORING_OP_EPOLL_CTL:
4170 ret = io_epoll_ctl_prep(req, sqe);
4171 break;
f67676d1 4172 default:
e781573e
JA
4173 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
4174 req->opcode);
4175 ret = -EINVAL;
b7bb4f7d 4176 break;
f67676d1
JA
4177 }
4178
b7bb4f7d 4179 return ret;
f67676d1
JA
4180}
4181
3529d8c2 4182static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
de0617e4 4183{
a197f664 4184 struct io_ring_ctx *ctx = req->ctx;
f67676d1 4185 int ret;
de0617e4 4186
9d858b21
BL
4187 /* Still need defer if there is pending req in defer list. */
4188 if (!req_need_defer(req) && list_empty(&ctx->defer_list))
de0617e4
JA
4189 return 0;
4190
3529d8c2 4191 if (!req->io && io_alloc_async_ctx(req))
de0617e4
JA
4192 return -EAGAIN;
4193
3529d8c2 4194 ret = io_req_defer_prep(req, sqe);
b7bb4f7d 4195 if (ret < 0)
2d28390a 4196 return ret;
2d28390a 4197
de0617e4 4198 spin_lock_irq(&ctx->completion_lock);
9d858b21 4199 if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
de0617e4 4200 spin_unlock_irq(&ctx->completion_lock);
de0617e4
JA
4201 return 0;
4202 }
4203
915967f6 4204 trace_io_uring_defer(ctx, req, req->user_data);
de0617e4
JA
4205 list_add_tail(&req->list, &ctx->defer_list);
4206 spin_unlock_irq(&ctx->completion_lock);
4207 return -EIOCBQUEUED;
4208}
4209
3529d8c2
JA
4210static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
4211 struct io_kiocb **nxt, bool force_nonblock)
2b188cc1 4212{
a197f664 4213 struct io_ring_ctx *ctx = req->ctx;
d625c6ee 4214 int ret;
2b188cc1 4215
d625c6ee 4216 switch (req->opcode) {
2b188cc1 4217 case IORING_OP_NOP:
78e19bbe 4218 ret = io_nop(req);
2b188cc1
JA
4219 break;
4220 case IORING_OP_READV:
edafccee 4221 case IORING_OP_READ_FIXED:
3a6820f2 4222 case IORING_OP_READ:
3529d8c2
JA
4223 if (sqe) {
4224 ret = io_read_prep(req, sqe, force_nonblock);
4225 if (ret < 0)
4226 break;
4227 }
267bc904 4228 ret = io_read(req, nxt, force_nonblock);
edafccee 4229 break;
3529d8c2 4230 case IORING_OP_WRITEV:
edafccee 4231 case IORING_OP_WRITE_FIXED:
3a6820f2 4232 case IORING_OP_WRITE:
3529d8c2
JA
4233 if (sqe) {
4234 ret = io_write_prep(req, sqe, force_nonblock);
4235 if (ret < 0)
4236 break;
4237 }
267bc904 4238 ret = io_write(req, nxt, force_nonblock);
2b188cc1 4239 break;
c992fe29 4240 case IORING_OP_FSYNC:
3529d8c2
JA
4241 if (sqe) {
4242 ret = io_prep_fsync(req, sqe);
4243 if (ret < 0)
4244 break;
4245 }
fc4df999 4246 ret = io_fsync(req, nxt, force_nonblock);
c992fe29 4247 break;
221c5eb2 4248 case IORING_OP_POLL_ADD:
3529d8c2
JA
4249 if (sqe) {
4250 ret = io_poll_add_prep(req, sqe);
4251 if (ret)
4252 break;
4253 }
fc4df999 4254 ret = io_poll_add(req, nxt);
221c5eb2
JA
4255 break;
4256 case IORING_OP_POLL_REMOVE:
3529d8c2
JA
4257 if (sqe) {
4258 ret = io_poll_remove_prep(req, sqe);
4259 if (ret < 0)
4260 break;
4261 }
fc4df999 4262 ret = io_poll_remove(req);
221c5eb2 4263 break;
5d17b4a4 4264 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2
JA
4265 if (sqe) {
4266 ret = io_prep_sfr(req, sqe);
4267 if (ret < 0)
4268 break;
4269 }
fc4df999 4270 ret = io_sync_file_range(req, nxt, force_nonblock);
5d17b4a4 4271 break;
0fa03c62 4272 case IORING_OP_SENDMSG:
fddaface 4273 case IORING_OP_SEND:
3529d8c2
JA
4274 if (sqe) {
4275 ret = io_sendmsg_prep(req, sqe);
4276 if (ret < 0)
4277 break;
4278 }
fddaface
JA
4279 if (req->opcode == IORING_OP_SENDMSG)
4280 ret = io_sendmsg(req, nxt, force_nonblock);
4281 else
4282 ret = io_send(req, nxt, force_nonblock);
0fa03c62 4283 break;
aa1fa28f 4284 case IORING_OP_RECVMSG:
fddaface 4285 case IORING_OP_RECV:
3529d8c2
JA
4286 if (sqe) {
4287 ret = io_recvmsg_prep(req, sqe);
4288 if (ret)
4289 break;
4290 }
fddaface
JA
4291 if (req->opcode == IORING_OP_RECVMSG)
4292 ret = io_recvmsg(req, nxt, force_nonblock);
4293 else
4294 ret = io_recv(req, nxt, force_nonblock);
aa1fa28f 4295 break;
5262f567 4296 case IORING_OP_TIMEOUT:
3529d8c2
JA
4297 if (sqe) {
4298 ret = io_timeout_prep(req, sqe, false);
4299 if (ret)
4300 break;
4301 }
fc4df999 4302 ret = io_timeout(req);
5262f567 4303 break;
11365043 4304 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2
JA
4305 if (sqe) {
4306 ret = io_timeout_remove_prep(req, sqe);
4307 if (ret)
4308 break;
4309 }
fc4df999 4310 ret = io_timeout_remove(req);
11365043 4311 break;
17f2fe35 4312 case IORING_OP_ACCEPT:
3529d8c2
JA
4313 if (sqe) {
4314 ret = io_accept_prep(req, sqe);
4315 if (ret)
4316 break;
4317 }
fc4df999 4318 ret = io_accept(req, nxt, force_nonblock);
17f2fe35 4319 break;
f8e85cf2 4320 case IORING_OP_CONNECT:
3529d8c2
JA
4321 if (sqe) {
4322 ret = io_connect_prep(req, sqe);
4323 if (ret)
4324 break;
4325 }
fc4df999 4326 ret = io_connect(req, nxt, force_nonblock);
f8e85cf2 4327 break;
62755e35 4328 case IORING_OP_ASYNC_CANCEL:
3529d8c2
JA
4329 if (sqe) {
4330 ret = io_async_cancel_prep(req, sqe);
4331 if (ret)
4332 break;
4333 }
fc4df999 4334 ret = io_async_cancel(req, nxt);
62755e35 4335 break;
d63d1b5e
JA
4336 case IORING_OP_FALLOCATE:
4337 if (sqe) {
4338 ret = io_fallocate_prep(req, sqe);
4339 if (ret)
4340 break;
4341 }
4342 ret = io_fallocate(req, nxt, force_nonblock);
4343 break;
15b71abe
JA
4344 case IORING_OP_OPENAT:
4345 if (sqe) {
4346 ret = io_openat_prep(req, sqe);
4347 if (ret)
4348 break;
4349 }
4350 ret = io_openat(req, nxt, force_nonblock);
4351 break;
b5dba59e
JA
4352 case IORING_OP_CLOSE:
4353 if (sqe) {
4354 ret = io_close_prep(req, sqe);
4355 if (ret)
4356 break;
4357 }
4358 ret = io_close(req, nxt, force_nonblock);
4359 break;
05f3fb3c
JA
4360 case IORING_OP_FILES_UPDATE:
4361 if (sqe) {
4362 ret = io_files_update_prep(req, sqe);
4363 if (ret)
4364 break;
4365 }
4366 ret = io_files_update(req, force_nonblock);
4367 break;
eddc7ef5
JA
4368 case IORING_OP_STATX:
4369 if (sqe) {
4370 ret = io_statx_prep(req, sqe);
4371 if (ret)
4372 break;
4373 }
4374 ret = io_statx(req, nxt, force_nonblock);
4375 break;
4840e418
JA
4376 case IORING_OP_FADVISE:
4377 if (sqe) {
4378 ret = io_fadvise_prep(req, sqe);
4379 if (ret)
4380 break;
4381 }
4382 ret = io_fadvise(req, nxt, force_nonblock);
4383 break;
c1ca757b
JA
4384 case IORING_OP_MADVISE:
4385 if (sqe) {
4386 ret = io_madvise_prep(req, sqe);
4387 if (ret)
4388 break;
4389 }
4390 ret = io_madvise(req, nxt, force_nonblock);
4391 break;
cebdb986
JA
4392 case IORING_OP_OPENAT2:
4393 if (sqe) {
4394 ret = io_openat2_prep(req, sqe);
4395 if (ret)
4396 break;
4397 }
4398 ret = io_openat2(req, nxt, force_nonblock);
4399 break;
3e4827b0
JA
4400 case IORING_OP_EPOLL_CTL:
4401 if (sqe) {
4402 ret = io_epoll_ctl_prep(req, sqe);
4403 if (ret)
4404 break;
4405 }
4406 ret = io_epoll_ctl(req, nxt, force_nonblock);
4407 break;
2b188cc1
JA
4408 default:
4409 ret = -EINVAL;
4410 break;
4411 }
4412
def596e9
JA
4413 if (ret)
4414 return ret;
4415
4416 if (ctx->flags & IORING_SETUP_IOPOLL) {
11ba820b
JA
4417 const bool in_async = io_wq_current_is_worker();
4418
9e645e11 4419 if (req->result == -EAGAIN)
def596e9
JA
4420 return -EAGAIN;
4421
11ba820b
JA
4422 /* workqueue context doesn't hold uring_lock, grab it now */
4423 if (in_async)
4424 mutex_lock(&ctx->uring_lock);
4425
def596e9 4426 io_iopoll_req_issued(req);
11ba820b
JA
4427
4428 if (in_async)
4429 mutex_unlock(&ctx->uring_lock);
def596e9
JA
4430 }
4431
4432 return 0;
2b188cc1
JA
4433}
4434
561fb04a 4435static void io_wq_submit_work(struct io_wq_work **workptr)
2b188cc1 4436{
561fb04a 4437 struct io_wq_work *work = *workptr;
2b188cc1 4438 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
561fb04a
JA
4439 struct io_kiocb *nxt = NULL;
4440 int ret = 0;
2b188cc1 4441
0c9d5ccd
JA
4442 /* if NO_CANCEL is set, we must still run the work */
4443 if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
4444 IO_WQ_WORK_CANCEL) {
561fb04a 4445 ret = -ECANCELED;
0c9d5ccd 4446 }
31b51510 4447
561fb04a 4448 if (!ret) {
cf6fd4bd
PB
4449 req->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
4450 req->in_async = true;
561fb04a 4451 do {
3529d8c2 4452 ret = io_issue_sqe(req, NULL, &nxt, false);
561fb04a
JA
4453 /*
4454 * We can get EAGAIN for polled IO even though we're
4455 * forcing a sync submission from here, since we can't
4456 * wait for request slots on the block side.
4457 */
4458 if (ret != -EAGAIN)
4459 break;
4460 cond_resched();
4461 } while (1);
4462 }
31b51510 4463
561fb04a 4464 /* drop submission reference */
ec9c02ad 4465 io_put_req(req);
817869d2 4466
561fb04a 4467 if (ret) {
4e88d6e7 4468 req_set_fail_links(req);
78e19bbe 4469 io_cqring_add_event(req, ret);
817869d2 4470 io_put_req(req);
edafccee 4471 }
2b188cc1 4472
561fb04a 4473 /* if a dependent link is ready, pass it back */
78912934
JA
4474 if (!ret && nxt)
4475 io_wq_assign_next(workptr, nxt);
2b188cc1
JA
4476}
4477
15b71abe 4478static int io_req_needs_file(struct io_kiocb *req, int fd)
9e3aa61a 4479{
d3656344 4480 if (!io_op_defs[req->opcode].needs_file)
9e3aa61a 4481 return 0;
d3656344
JA
4482 if (fd == -1 && io_op_defs[req->opcode].fd_non_neg)
4483 return 0;
4484 return 1;
09bb8394
JA
4485}
4486
65e19f54
JA
4487static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
4488 int index)
4489{
4490 struct fixed_file_table *table;
4491
05f3fb3c
JA
4492 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
4493 return table->files[index & IORING_FILE_TABLE_MASK];;
65e19f54
JA
4494}
4495
3529d8c2
JA
4496static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
4497 const struct io_uring_sqe *sqe)
09bb8394 4498{
a197f664 4499 struct io_ring_ctx *ctx = req->ctx;
09bb8394 4500 unsigned flags;
d3656344 4501 int fd;
09bb8394 4502
3529d8c2
JA
4503 flags = READ_ONCE(sqe->flags);
4504 fd = READ_ONCE(sqe->fd);
09bb8394 4505
d3656344
JA
4506 if (!io_req_needs_file(req, fd))
4507 return 0;
09bb8394
JA
4508
4509 if (flags & IOSQE_FIXED_FILE) {
05f3fb3c 4510 if (unlikely(!ctx->file_data ||
09bb8394
JA
4511 (unsigned) fd >= ctx->nr_user_files))
4512 return -EBADF;
b7620121 4513 fd = array_index_nospec(fd, ctx->nr_user_files);
65e19f54
JA
4514 req->file = io_file_from_index(ctx, fd);
4515 if (!req->file)
08a45173 4516 return -EBADF;
09bb8394 4517 req->flags |= REQ_F_FIXED_FILE;
05f3fb3c 4518 percpu_ref_get(&ctx->file_data->refs);
09bb8394 4519 } else {
cf6fd4bd 4520 if (req->needs_fixed_file)
09bb8394 4521 return -EBADF;
c826bd7a 4522 trace_io_uring_file_get(ctx, fd);
09bb8394
JA
4523 req->file = io_file_get(state, fd);
4524 if (unlikely(!req->file))
4525 return -EBADF;
4526 }
4527
4528 return 0;
4529}
4530
a197f664 4531static int io_grab_files(struct io_kiocb *req)
fcb323cc
JA
4532{
4533 int ret = -EBADF;
a197f664 4534 struct io_ring_ctx *ctx = req->ctx;
fcb323cc 4535
f86cd20c
JA
4536 if (req->work.files)
4537 return 0;
b14cca0c 4538 if (!ctx->ring_file)
b5dba59e
JA
4539 return -EBADF;
4540
fcb323cc
JA
4541 rcu_read_lock();
4542 spin_lock_irq(&ctx->inflight_lock);
4543 /*
4544 * We use the f_ops->flush() handler to ensure that we can flush
4545 * out work accessing these files if the fd is closed. Check if
4546 * the fd has changed since we started down this path, and disallow
4547 * this operation if it has.
4548 */
b14cca0c 4549 if (fcheck(ctx->ring_fd) == ctx->ring_file) {
fcb323cc
JA
4550 list_add(&req->inflight_entry, &ctx->inflight_list);
4551 req->flags |= REQ_F_INFLIGHT;
4552 req->work.files = current->files;
4553 ret = 0;
4554 }
4555 spin_unlock_irq(&ctx->inflight_lock);
4556 rcu_read_unlock();
4557
4558 return ret;
4559}
4560
2665abfd 4561static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 4562{
ad8a48ac
JA
4563 struct io_timeout_data *data = container_of(timer,
4564 struct io_timeout_data, timer);
4565 struct io_kiocb *req = data->req;
2665abfd
JA
4566 struct io_ring_ctx *ctx = req->ctx;
4567 struct io_kiocb *prev = NULL;
4568 unsigned long flags;
2665abfd
JA
4569
4570 spin_lock_irqsave(&ctx->completion_lock, flags);
4571
4572 /*
4573 * We don't expect the list to be empty, that will only happen if we
4574 * race with the completion of the linked work.
4575 */
4493233e
PB
4576 if (!list_empty(&req->link_list)) {
4577 prev = list_entry(req->link_list.prev, struct io_kiocb,
4578 link_list);
5d960724 4579 if (refcount_inc_not_zero(&prev->refs)) {
4493233e 4580 list_del_init(&req->link_list);
5d960724
JA
4581 prev->flags &= ~REQ_F_LINK_TIMEOUT;
4582 } else
76a46e06 4583 prev = NULL;
2665abfd
JA
4584 }
4585
4586 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4587
4588 if (prev) {
4e88d6e7 4589 req_set_fail_links(prev);
b0dd8a41
JA
4590 io_async_find_and_cancel(ctx, req, prev->user_data, NULL,
4591 -ETIME);
76a46e06 4592 io_put_req(prev);
47f46768
JA
4593 } else {
4594 io_cqring_add_event(req, -ETIME);
4595 io_put_req(req);
2665abfd 4596 }
2665abfd
JA
4597 return HRTIMER_NORESTART;
4598}
4599
ad8a48ac 4600static void io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 4601{
76a46e06 4602 struct io_ring_ctx *ctx = req->ctx;
2665abfd 4603
76a46e06
JA
4604 /*
4605 * If the list is now empty, then our linked request finished before
4606 * we got a chance to setup the timer
4607 */
4608 spin_lock_irq(&ctx->completion_lock);
4493233e 4609 if (!list_empty(&req->link_list)) {
2d28390a 4610 struct io_timeout_data *data = &req->io->timeout;
94ae5e77 4611
ad8a48ac
JA
4612 data->timer.function = io_link_timeout_fn;
4613 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
4614 data->mode);
2665abfd 4615 }
76a46e06 4616 spin_unlock_irq(&ctx->completion_lock);
2665abfd 4617
2665abfd 4618 /* drop submission reference */
76a46e06
JA
4619 io_put_req(req);
4620}
2665abfd 4621
ad8a48ac 4622static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
2665abfd
JA
4623{
4624 struct io_kiocb *nxt;
4625
4626 if (!(req->flags & REQ_F_LINK))
4627 return NULL;
4628
4493233e
PB
4629 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
4630 link_list);
d625c6ee 4631 if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
76a46e06 4632 return NULL;
2665abfd 4633
76a46e06 4634 req->flags |= REQ_F_LINK_TIMEOUT;
76a46e06 4635 return nxt;
2665abfd
JA
4636}
4637
3529d8c2 4638static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2b188cc1 4639{
4a0a7a18 4640 struct io_kiocb *linked_timeout;
f9bd67f6 4641 struct io_kiocb *nxt = NULL;
e0c5c576 4642 int ret;
2b188cc1 4643
4a0a7a18
JA
4644again:
4645 linked_timeout = io_prep_linked_timeout(req);
4646
3529d8c2 4647 ret = io_issue_sqe(req, sqe, &nxt, true);
491381ce
JA
4648
4649 /*
4650 * We async punt it if the file wasn't marked NOWAIT, or if the file
4651 * doesn't support non-blocking read/write attempts
4652 */
4653 if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
4654 (req->flags & REQ_F_MUST_PUNT))) {
86a761f8 4655punt:
f86cd20c 4656 if (io_op_defs[req->opcode].file_table) {
bbad27b2
PB
4657 ret = io_grab_files(req);
4658 if (ret)
4659 goto err;
2b188cc1 4660 }
bbad27b2
PB
4661
4662 /*
4663 * Queued up for async execution, worker will release
4664 * submit reference when the iocb is actually submitted.
4665 */
4666 io_queue_async_work(req);
4a0a7a18 4667 goto done_req;
2b188cc1 4668 }
e65ef56d 4669
fcb323cc 4670err:
76a46e06 4671 /* drop submission reference */
ec9c02ad 4672 io_put_req(req);
e65ef56d 4673
f9bd67f6 4674 if (linked_timeout) {
76a46e06 4675 if (!ret)
f9bd67f6 4676 io_queue_linked_timeout(linked_timeout);
76a46e06 4677 else
f9bd67f6 4678 io_put_req(linked_timeout);
76a46e06
JA
4679 }
4680
e65ef56d 4681 /* and drop final reference, if we failed */
9e645e11 4682 if (ret) {
78e19bbe 4683 io_cqring_add_event(req, ret);
4e88d6e7 4684 req_set_fail_links(req);
e65ef56d 4685 io_put_req(req);
9e645e11 4686 }
4a0a7a18
JA
4687done_req:
4688 if (nxt) {
4689 req = nxt;
4690 nxt = NULL;
86a761f8
PB
4691
4692 if (req->flags & REQ_F_FORCE_ASYNC)
4693 goto punt;
4a0a7a18
JA
4694 goto again;
4695 }
2b188cc1
JA
4696}
4697
3529d8c2 4698static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4fe2c963
JL
4699{
4700 int ret;
4701
3529d8c2 4702 ret = io_req_defer(req, sqe);
4fe2c963
JL
4703 if (ret) {
4704 if (ret != -EIOCBQUEUED) {
1118591a 4705fail_req:
78e19bbe 4706 io_cqring_add_event(req, ret);
4e88d6e7 4707 req_set_fail_links(req);
78e19bbe 4708 io_double_put_req(req);
4fe2c963 4709 }
2550878f 4710 } else if (req->flags & REQ_F_FORCE_ASYNC) {
1118591a
PB
4711 ret = io_req_defer_prep(req, sqe);
4712 if (unlikely(ret < 0))
4713 goto fail_req;
ce35a47a
JA
4714 /*
4715 * Never try inline submit of IOSQE_ASYNC is set, go straight
4716 * to async execution.
4717 */
4718 req->work.flags |= IO_WQ_WORK_CONCURRENT;
4719 io_queue_async_work(req);
4720 } else {
3529d8c2 4721 __io_queue_sqe(req, sqe);
ce35a47a 4722 }
4fe2c963
JL
4723}
4724
1b4a51b6 4725static inline void io_queue_link_head(struct io_kiocb *req)
4fe2c963 4726{
94ae5e77 4727 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
1b4a51b6
PB
4728 io_cqring_add_event(req, -ECANCELED);
4729 io_double_put_req(req);
4730 } else
3529d8c2 4731 io_queue_sqe(req, NULL);
4fe2c963
JL
4732}
4733
4e88d6e7 4734#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
ce35a47a 4735 IOSQE_IO_HARDLINK | IOSQE_ASYNC)
9e645e11 4736
3529d8c2
JA
4737static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
4738 struct io_submit_state *state, struct io_kiocb **link)
9e645e11 4739{
75c6a039 4740 const struct cred *old_creds = NULL;
a197f664 4741 struct io_ring_ctx *ctx = req->ctx;
32fe525b 4742 unsigned int sqe_flags;
75c6a039 4743 int ret, id;
9e645e11 4744
32fe525b 4745 sqe_flags = READ_ONCE(sqe->flags);
9e645e11
JA
4746
4747 /* enforce forwards compatibility on users */
32fe525b 4748 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
9e645e11 4749 ret = -EINVAL;
196be95c 4750 goto err_req;
9e645e11
JA
4751 }
4752
75c6a039
JA
4753 id = READ_ONCE(sqe->personality);
4754 if (id) {
4755 const struct cred *personality_creds;
4756
4757 personality_creds = idr_find(&ctx->personality_idr, id);
4758 if (unlikely(!personality_creds)) {
4759 ret = -EINVAL;
4760 goto err_req;
4761 }
4762 old_creds = override_creds(personality_creds);
4763 }
4764
6b47ee6e
PB
4765 /* same numerical values with corresponding REQ_F_*, safe to copy */
4766 req->flags |= sqe_flags & (IOSQE_IO_DRAIN|IOSQE_IO_HARDLINK|
4767 IOSQE_ASYNC);
9e645e11 4768
3529d8c2 4769 ret = io_req_set_file(state, req, sqe);
9e645e11
JA
4770 if (unlikely(ret)) {
4771err_req:
78e19bbe
JA
4772 io_cqring_add_event(req, ret);
4773 io_double_put_req(req);
75c6a039
JA
4774 if (old_creds)
4775 revert_creds(old_creds);
2e6e1fde 4776 return false;
9e645e11
JA
4777 }
4778
9e645e11
JA
4779 /*
4780 * If we already have a head request, queue this one for async
4781 * submittal once the head completes. If we don't have a head but
4782 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
4783 * submitted sync once the chain is complete. If none of those
4784 * conditions are true (normal request), then just queue it.
4785 */
4786 if (*link) {
9d76377f 4787 struct io_kiocb *head = *link;
4e88d6e7 4788
8cdf2193
PB
4789 /*
4790 * Taking sequential execution of a link, draining both sides
4791 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
4792 * requests in the link. So, it drains the head and the
4793 * next after the link request. The last one is done via
4794 * drain_next flag to persist the effect across calls.
4795 */
711be031
PB
4796 if (sqe_flags & IOSQE_IO_DRAIN) {
4797 head->flags |= REQ_F_IO_DRAIN;
4798 ctx->drain_next = 1;
4799 }
b7bb4f7d 4800 if (io_alloc_async_ctx(req)) {
9e645e11
JA
4801 ret = -EAGAIN;
4802 goto err_req;
4803 }
4804
3529d8c2 4805 ret = io_req_defer_prep(req, sqe);
2d28390a 4806 if (ret) {
4e88d6e7 4807 /* fail even hard links since we don't submit */
9d76377f 4808 head->flags |= REQ_F_FAIL_LINK;
f67676d1 4809 goto err_req;
2d28390a 4810 }
9d76377f
PB
4811 trace_io_uring_link(ctx, req, head);
4812 list_add_tail(&req->link_list, &head->link_list);
32fe525b
PB
4813
4814 /* last request of a link, enqueue the link */
4815 if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK))) {
4816 io_queue_link_head(head);
4817 *link = NULL;
4818 }
9e645e11 4819 } else {
711be031
PB
4820 if (unlikely(ctx->drain_next)) {
4821 req->flags |= REQ_F_IO_DRAIN;
4822 req->ctx->drain_next = 0;
4823 }
4824 if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
4825 req->flags |= REQ_F_LINK;
711be031
PB
4826 INIT_LIST_HEAD(&req->link_list);
4827 ret = io_req_defer_prep(req, sqe);
4828 if (ret)
4829 req->flags |= REQ_F_FAIL_LINK;
4830 *link = req;
4831 } else {
4832 io_queue_sqe(req, sqe);
4833 }
9e645e11 4834 }
2e6e1fde 4835
75c6a039
JA
4836 if (old_creds)
4837 revert_creds(old_creds);
2e6e1fde 4838 return true;
9e645e11
JA
4839}
4840
9a56a232
JA
4841/*
4842 * Batched submission is done, ensure local IO is flushed out.
4843 */
4844static void io_submit_state_end(struct io_submit_state *state)
4845{
4846 blk_finish_plug(&state->plug);
3d6770fb 4847 io_file_put(state);
2579f913 4848 if (state->free_reqs)
6c8a3134 4849 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
9a56a232
JA
4850}
4851
4852/*
4853 * Start submission side cache.
4854 */
4855static void io_submit_state_start(struct io_submit_state *state,
22efde59 4856 unsigned int max_ios)
9a56a232
JA
4857{
4858 blk_start_plug(&state->plug);
2579f913 4859 state->free_reqs = 0;
9a56a232
JA
4860 state->file = NULL;
4861 state->ios_left = max_ios;
4862}
4863
2b188cc1
JA
4864static void io_commit_sqring(struct io_ring_ctx *ctx)
4865{
75b28aff 4866 struct io_rings *rings = ctx->rings;
2b188cc1 4867
caf582c6
PB
4868 /*
4869 * Ensure any loads from the SQEs are done at this point,
4870 * since once we write the new head, the application could
4871 * write new data to them.
4872 */
4873 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
4874}
4875
2b188cc1 4876/*
3529d8c2 4877 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
2b188cc1
JA
4878 * that is mapped by userspace. This means that care needs to be taken to
4879 * ensure that reads are stable, as we cannot rely on userspace always
4880 * being a good citizen. If members of the sqe are validated and then later
4881 * used, it's important that those reads are done through READ_ONCE() to
4882 * prevent a re-load down the line.
4883 */
3529d8c2
JA
4884static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
4885 const struct io_uring_sqe **sqe_ptr)
2b188cc1 4886{
75b28aff 4887 u32 *sq_array = ctx->sq_array;
2b188cc1
JA
4888 unsigned head;
4889
4890 /*
4891 * The cached sq head (or cq tail) serves two purposes:
4892 *
4893 * 1) allows us to batch the cost of updating the user visible
4894 * head updates.
4895 * 2) allows the kernel side to track the head on its own, even
4896 * though the application is the one updating it.
4897 */
ee7d46d9 4898 head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
9835d6fa 4899 if (likely(head < ctx->sq_entries)) {
cf6fd4bd
PB
4900 /*
4901 * All io need record the previous position, if LINK vs DARIN,
4902 * it can be used to mark the position of the first IO in the
4903 * link list.
4904 */
4905 req->sequence = ctx->cached_sq_head;
3529d8c2
JA
4906 *sqe_ptr = &ctx->sq_sqes[head];
4907 req->opcode = READ_ONCE((*sqe_ptr)->opcode);
4908 req->user_data = READ_ONCE((*sqe_ptr)->user_data);
2b188cc1
JA
4909 ctx->cached_sq_head++;
4910 return true;
4911 }
4912
4913 /* drop invalid entries */
4914 ctx->cached_sq_head++;
498ccd9e 4915 ctx->cached_sq_dropped++;
ee7d46d9 4916 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
2b188cc1
JA
4917 return false;
4918}
4919
fb5ccc98 4920static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
ae9428ca
PB
4921 struct file *ring_file, int ring_fd,
4922 struct mm_struct **mm, bool async)
6c271ce2
JA
4923{
4924 struct io_submit_state state, *statep = NULL;
9e645e11 4925 struct io_kiocb *link = NULL;
9e645e11 4926 int i, submitted = 0;
95a1b3ff 4927 bool mm_fault = false;
6c271ce2 4928
c4a2ed72 4929 /* if we have a backlog and couldn't flush it all, return BUSY */
ad3eb2c8
JA
4930 if (test_bit(0, &ctx->sq_check_overflow)) {
4931 if (!list_empty(&ctx->cq_overflow_list) &&
4932 !io_cqring_overflow_flush(ctx, false))
4933 return -EBUSY;
4934 }
6c271ce2 4935
ee7d46d9
PB
4936 /* make sure SQ entry isn't read before tail */
4937 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
9ef4f124 4938
2b85edfc
PB
4939 if (!percpu_ref_tryget_many(&ctx->refs, nr))
4940 return -EAGAIN;
6c271ce2
JA
4941
4942 if (nr > IO_PLUG_THRESHOLD) {
22efde59 4943 io_submit_state_start(&state, nr);
6c271ce2
JA
4944 statep = &state;
4945 }
4946
b14cca0c
PB
4947 ctx->ring_fd = ring_fd;
4948 ctx->ring_file = ring_file;
4949
6c271ce2 4950 for (i = 0; i < nr; i++) {
3529d8c2 4951 const struct io_uring_sqe *sqe;
196be95c 4952 struct io_kiocb *req;
fb5ccc98 4953
196be95c
PB
4954 req = io_get_req(ctx, statep);
4955 if (unlikely(!req)) {
4956 if (!submitted)
4957 submitted = -EAGAIN;
fb5ccc98 4958 break;
196be95c 4959 }
3529d8c2 4960 if (!io_get_sqring(ctx, req, &sqe)) {
2b85edfc 4961 __io_req_do_free(req);
196be95c
PB
4962 break;
4963 }
fb5ccc98 4964
d3656344
JA
4965 /* will complete beyond this point, count as submitted */
4966 submitted++;
4967
4968 if (unlikely(req->opcode >= IORING_OP_LAST)) {
4969 io_cqring_add_event(req, -EINVAL);
4970 io_double_put_req(req);
196be95c
PB
4971 break;
4972 }
fb5ccc98 4973
d3656344 4974 if (io_op_defs[req->opcode].needs_mm && !*mm) {
95a1b3ff
PB
4975 mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
4976 if (!mm_fault) {
4977 use_mm(ctx->sqo_mm);
4978 *mm = ctx->sqo_mm;
4979 }
9e645e11 4980 }
9e645e11 4981
cf6fd4bd
PB
4982 req->has_user = *mm != NULL;
4983 req->in_async = async;
4984 req->needs_fixed_file = async;
354420f7
JA
4985 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
4986 true, async);
3529d8c2 4987 if (!io_submit_sqe(req, sqe, statep, &link))
2e6e1fde 4988 break;
6c271ce2
JA
4989 }
4990
9466f437
PB
4991 if (unlikely(submitted != nr)) {
4992 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
4993
4994 percpu_ref_put_many(&ctx->refs, nr - ref_used);
4995 }
9e645e11 4996 if (link)
1b4a51b6 4997 io_queue_link_head(link);
6c271ce2
JA
4998 if (statep)
4999 io_submit_state_end(&state);
5000
ae9428ca
PB
5001 /* Commit SQ ring head once we've consumed and submitted all SQEs */
5002 io_commit_sqring(ctx);
5003
6c271ce2
JA
5004 return submitted;
5005}
5006
5007static int io_sq_thread(void *data)
5008{
6c271ce2
JA
5009 struct io_ring_ctx *ctx = data;
5010 struct mm_struct *cur_mm = NULL;
181e448d 5011 const struct cred *old_cred;
6c271ce2
JA
5012 mm_segment_t old_fs;
5013 DEFINE_WAIT(wait);
5014 unsigned inflight;
5015 unsigned long timeout;
c1edbf5f 5016 int ret;
6c271ce2 5017
206aefde 5018 complete(&ctx->completions[1]);
a4c0b3de 5019
6c271ce2
JA
5020 old_fs = get_fs();
5021 set_fs(USER_DS);
181e448d 5022 old_cred = override_creds(ctx->creds);
6c271ce2 5023
c1edbf5f 5024 ret = timeout = inflight = 0;
2bbcd6d3 5025 while (!kthread_should_park()) {
fb5ccc98 5026 unsigned int to_submit;
6c271ce2
JA
5027
5028 if (inflight) {
5029 unsigned nr_events = 0;
5030
5031 if (ctx->flags & IORING_SETUP_IOPOLL) {
2b2ed975
JA
5032 /*
5033 * inflight is the count of the maximum possible
5034 * entries we submitted, but it can be smaller
5035 * if we dropped some of them. If we don't have
5036 * poll entries available, then we know that we
5037 * have nothing left to poll for. Reset the
5038 * inflight count to zero in that case.
5039 */
5040 mutex_lock(&ctx->uring_lock);
5041 if (!list_empty(&ctx->poll_list))
5042 __io_iopoll_check(ctx, &nr_events, 0);
5043 else
5044 inflight = 0;
5045 mutex_unlock(&ctx->uring_lock);
6c271ce2
JA
5046 } else {
5047 /*
5048 * Normal IO, just pretend everything completed.
5049 * We don't have to poll completions for that.
5050 */
5051 nr_events = inflight;
5052 }
5053
5054 inflight -= nr_events;
5055 if (!inflight)
5056 timeout = jiffies + ctx->sq_thread_idle;
5057 }
5058
fb5ccc98 5059 to_submit = io_sqring_entries(ctx);
c1edbf5f
JA
5060
5061 /*
5062 * If submit got -EBUSY, flag us as needing the application
5063 * to enter the kernel to reap and flush events.
5064 */
5065 if (!to_submit || ret == -EBUSY) {
6c271ce2
JA
5066 /*
5067 * We're polling. If we're within the defined idle
5068 * period, then let us spin without work before going
c1edbf5f
JA
5069 * to sleep. The exception is if we got EBUSY doing
5070 * more IO, we should wait for the application to
5071 * reap events and wake us up.
6c271ce2 5072 */
c1edbf5f 5073 if (inflight ||
df069d80
JA
5074 (!time_after(jiffies, timeout) && ret != -EBUSY &&
5075 !percpu_ref_is_dying(&ctx->refs))) {
9831a90c 5076 cond_resched();
6c271ce2
JA
5077 continue;
5078 }
5079
5080 /*
5081 * Drop cur_mm before scheduling, we can't hold it for
5082 * long periods (or over schedule()). Do this before
5083 * adding ourselves to the waitqueue, as the unuse/drop
5084 * may sleep.
5085 */
5086 if (cur_mm) {
5087 unuse_mm(cur_mm);
5088 mmput(cur_mm);
5089 cur_mm = NULL;
5090 }
5091
5092 prepare_to_wait(&ctx->sqo_wait, &wait,
5093 TASK_INTERRUPTIBLE);
5094
5095 /* Tell userspace we may need a wakeup call */
75b28aff 5096 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
0d7bae69
SB
5097 /* make sure to read SQ tail after writing flags */
5098 smp_mb();
6c271ce2 5099
fb5ccc98 5100 to_submit = io_sqring_entries(ctx);
c1edbf5f 5101 if (!to_submit || ret == -EBUSY) {
2bbcd6d3 5102 if (kthread_should_park()) {
6c271ce2
JA
5103 finish_wait(&ctx->sqo_wait, &wait);
5104 break;
5105 }
5106 if (signal_pending(current))
5107 flush_signals(current);
5108 schedule();
5109 finish_wait(&ctx->sqo_wait, &wait);
5110
75b28aff 5111 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
5112 continue;
5113 }
5114 finish_wait(&ctx->sqo_wait, &wait);
5115
75b28aff 5116 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
5117 }
5118
8a4955ff 5119 mutex_lock(&ctx->uring_lock);
1d7bb1d5 5120 ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
8a4955ff 5121 mutex_unlock(&ctx->uring_lock);
1d7bb1d5
JA
5122 if (ret > 0)
5123 inflight += ret;
6c271ce2
JA
5124 }
5125
5126 set_fs(old_fs);
5127 if (cur_mm) {
5128 unuse_mm(cur_mm);
5129 mmput(cur_mm);
5130 }
181e448d 5131 revert_creds(old_cred);
06058632 5132
2bbcd6d3 5133 kthread_parkme();
06058632 5134
6c271ce2
JA
5135 return 0;
5136}
5137
bda52162
JA
5138struct io_wait_queue {
5139 struct wait_queue_entry wq;
5140 struct io_ring_ctx *ctx;
5141 unsigned to_wait;
5142 unsigned nr_timeouts;
5143};
5144
1d7bb1d5 5145static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
bda52162
JA
5146{
5147 struct io_ring_ctx *ctx = iowq->ctx;
5148
5149 /*
d195a66e 5150 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
5151 * started waiting. For timeouts, we always want to return to userspace,
5152 * regardless of event count.
5153 */
1d7bb1d5 5154 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
bda52162
JA
5155 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
5156}
5157
5158static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
5159 int wake_flags, void *key)
5160{
5161 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
5162 wq);
5163
1d7bb1d5
JA
5164 /* use noflush == true, as we can't safely rely on locking context */
5165 if (!io_should_wake(iowq, true))
bda52162
JA
5166 return -1;
5167
5168 return autoremove_wake_function(curr, mode, wake_flags, key);
5169}
5170
2b188cc1
JA
5171/*
5172 * Wait until events become available, if we don't already have some. The
5173 * application must reap them itself, as they reside on the shared cq ring.
5174 */
5175static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
5176 const sigset_t __user *sig, size_t sigsz)
5177{
bda52162
JA
5178 struct io_wait_queue iowq = {
5179 .wq = {
5180 .private = current,
5181 .func = io_wake_function,
5182 .entry = LIST_HEAD_INIT(iowq.wq.entry),
5183 },
5184 .ctx = ctx,
5185 .to_wait = min_events,
5186 };
75b28aff 5187 struct io_rings *rings = ctx->rings;
e9ffa5c2 5188 int ret = 0;
2b188cc1 5189
1d7bb1d5 5190 if (io_cqring_events(ctx, false) >= min_events)
2b188cc1
JA
5191 return 0;
5192
5193 if (sig) {
9e75ad5d
AB
5194#ifdef CONFIG_COMPAT
5195 if (in_compat_syscall())
5196 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 5197 sigsz);
9e75ad5d
AB
5198 else
5199#endif
b772434b 5200 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 5201
2b188cc1
JA
5202 if (ret)
5203 return ret;
5204 }
5205
bda52162 5206 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
c826bd7a 5207 trace_io_uring_cqring_wait(ctx, min_events);
bda52162
JA
5208 do {
5209 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
5210 TASK_INTERRUPTIBLE);
1d7bb1d5 5211 if (io_should_wake(&iowq, false))
bda52162
JA
5212 break;
5213 schedule();
5214 if (signal_pending(current)) {
e9ffa5c2 5215 ret = -EINTR;
bda52162
JA
5216 break;
5217 }
5218 } while (1);
5219 finish_wait(&ctx->wait, &iowq.wq);
5220
e9ffa5c2 5221 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 5222
75b28aff 5223 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
5224}
5225
6b06314c
JA
5226static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
5227{
5228#if defined(CONFIG_UNIX)
5229 if (ctx->ring_sock) {
5230 struct sock *sock = ctx->ring_sock->sk;
5231 struct sk_buff *skb;
5232
5233 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
5234 kfree_skb(skb);
5235 }
5236#else
5237 int i;
5238
65e19f54
JA
5239 for (i = 0; i < ctx->nr_user_files; i++) {
5240 struct file *file;
5241
5242 file = io_file_from_index(ctx, i);
5243 if (file)
5244 fput(file);
5245 }
6b06314c
JA
5246#endif
5247}
5248
05f3fb3c
JA
5249static void io_file_ref_kill(struct percpu_ref *ref)
5250{
5251 struct fixed_file_data *data;
5252
5253 data = container_of(ref, struct fixed_file_data, refs);
5254 complete(&data->done);
5255}
5256
6b06314c
JA
5257static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
5258{
05f3fb3c 5259 struct fixed_file_data *data = ctx->file_data;
65e19f54
JA
5260 unsigned nr_tables, i;
5261
05f3fb3c 5262 if (!data)
6b06314c
JA
5263 return -ENXIO;
5264
05f3fb3c 5265 percpu_ref_kill_and_confirm(&data->refs, io_file_ref_kill);
e46a7950 5266 flush_work(&data->ref_work);
2faf852d
JA
5267 wait_for_completion(&data->done);
5268 io_ring_file_ref_flush(data);
05f3fb3c
JA
5269 percpu_ref_exit(&data->refs);
5270
6b06314c 5271 __io_sqe_files_unregister(ctx);
65e19f54
JA
5272 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
5273 for (i = 0; i < nr_tables; i++)
05f3fb3c
JA
5274 kfree(data->table[i].files);
5275 kfree(data->table);
5276 kfree(data);
5277 ctx->file_data = NULL;
6b06314c
JA
5278 ctx->nr_user_files = 0;
5279 return 0;
5280}
5281
6c271ce2
JA
5282static void io_sq_thread_stop(struct io_ring_ctx *ctx)
5283{
5284 if (ctx->sqo_thread) {
206aefde 5285 wait_for_completion(&ctx->completions[1]);
2bbcd6d3
RP
5286 /*
5287 * The park is a bit of a work-around, without it we get
5288 * warning spews on shutdown with SQPOLL set and affinity
5289 * set to a single CPU.
5290 */
06058632 5291 kthread_park(ctx->sqo_thread);
6c271ce2
JA
5292 kthread_stop(ctx->sqo_thread);
5293 ctx->sqo_thread = NULL;
5294 }
5295}
5296
6b06314c
JA
5297static void io_finish_async(struct io_ring_ctx *ctx)
5298{
6c271ce2
JA
5299 io_sq_thread_stop(ctx);
5300
561fb04a
JA
5301 if (ctx->io_wq) {
5302 io_wq_destroy(ctx->io_wq);
5303 ctx->io_wq = NULL;
6b06314c
JA
5304 }
5305}
5306
5307#if defined(CONFIG_UNIX)
6b06314c
JA
5308/*
5309 * Ensure the UNIX gc is aware of our file set, so we are certain that
5310 * the io_uring can be safely unregistered on process exit, even if we have
5311 * loops in the file referencing.
5312 */
5313static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
5314{
5315 struct sock *sk = ctx->ring_sock->sk;
5316 struct scm_fp_list *fpl;
5317 struct sk_buff *skb;
08a45173 5318 int i, nr_files;
6b06314c
JA
5319
5320 if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
5321 unsigned long inflight = ctx->user->unix_inflight + nr;
5322
5323 if (inflight > task_rlimit(current, RLIMIT_NOFILE))
5324 return -EMFILE;
5325 }
5326
5327 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
5328 if (!fpl)
5329 return -ENOMEM;
5330
5331 skb = alloc_skb(0, GFP_KERNEL);
5332 if (!skb) {
5333 kfree(fpl);
5334 return -ENOMEM;
5335 }
5336
5337 skb->sk = sk;
6b06314c 5338
08a45173 5339 nr_files = 0;
6b06314c
JA
5340 fpl->user = get_uid(ctx->user);
5341 for (i = 0; i < nr; i++) {
65e19f54
JA
5342 struct file *file = io_file_from_index(ctx, i + offset);
5343
5344 if (!file)
08a45173 5345 continue;
65e19f54 5346 fpl->fp[nr_files] = get_file(file);
08a45173
JA
5347 unix_inflight(fpl->user, fpl->fp[nr_files]);
5348 nr_files++;
6b06314c
JA
5349 }
5350
08a45173
JA
5351 if (nr_files) {
5352 fpl->max = SCM_MAX_FD;
5353 fpl->count = nr_files;
5354 UNIXCB(skb).fp = fpl;
05f3fb3c 5355 skb->destructor = unix_destruct_scm;
08a45173
JA
5356 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
5357 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 5358
08a45173
JA
5359 for (i = 0; i < nr_files; i++)
5360 fput(fpl->fp[i]);
5361 } else {
5362 kfree_skb(skb);
5363 kfree(fpl);
5364 }
6b06314c
JA
5365
5366 return 0;
5367}
5368
5369/*
5370 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
5371 * causes regular reference counting to break down. We rely on the UNIX
5372 * garbage collection to take care of this problem for us.
5373 */
5374static int io_sqe_files_scm(struct io_ring_ctx *ctx)
5375{
5376 unsigned left, total;
5377 int ret = 0;
5378
5379 total = 0;
5380 left = ctx->nr_user_files;
5381 while (left) {
5382 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
5383
5384 ret = __io_sqe_files_scm(ctx, this_files, total);
5385 if (ret)
5386 break;
5387 left -= this_files;
5388 total += this_files;
5389 }
5390
5391 if (!ret)
5392 return 0;
5393
5394 while (total < ctx->nr_user_files) {
65e19f54
JA
5395 struct file *file = io_file_from_index(ctx, total);
5396
5397 if (file)
5398 fput(file);
6b06314c
JA
5399 total++;
5400 }
5401
5402 return ret;
5403}
5404#else
5405static int io_sqe_files_scm(struct io_ring_ctx *ctx)
5406{
5407 return 0;
5408}
5409#endif
5410
65e19f54
JA
5411static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
5412 unsigned nr_files)
5413{
5414 int i;
5415
5416 for (i = 0; i < nr_tables; i++) {
05f3fb3c 5417 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
5418 unsigned this_files;
5419
5420 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
5421 table->files = kcalloc(this_files, sizeof(struct file *),
5422 GFP_KERNEL);
5423 if (!table->files)
5424 break;
5425 nr_files -= this_files;
5426 }
5427
5428 if (i == nr_tables)
5429 return 0;
5430
5431 for (i = 0; i < nr_tables; i++) {
05f3fb3c 5432 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
5433 kfree(table->files);
5434 }
5435 return 1;
5436}
5437
05f3fb3c
JA
5438static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
5439{
5440#if defined(CONFIG_UNIX)
5441 struct sock *sock = ctx->ring_sock->sk;
5442 struct sk_buff_head list, *head = &sock->sk_receive_queue;
5443 struct sk_buff *skb;
5444 int i;
5445
5446 __skb_queue_head_init(&list);
5447
5448 /*
5449 * Find the skb that holds this file in its SCM_RIGHTS. When found,
5450 * remove this entry and rearrange the file array.
5451 */
5452 skb = skb_dequeue(head);
5453 while (skb) {
5454 struct scm_fp_list *fp;
5455
5456 fp = UNIXCB(skb).fp;
5457 for (i = 0; i < fp->count; i++) {
5458 int left;
5459
5460 if (fp->fp[i] != file)
5461 continue;
5462
5463 unix_notinflight(fp->user, fp->fp[i]);
5464 left = fp->count - 1 - i;
5465 if (left) {
5466 memmove(&fp->fp[i], &fp->fp[i + 1],
5467 left * sizeof(struct file *));
5468 }
5469 fp->count--;
5470 if (!fp->count) {
5471 kfree_skb(skb);
5472 skb = NULL;
5473 } else {
5474 __skb_queue_tail(&list, skb);
5475 }
5476 fput(file);
5477 file = NULL;
5478 break;
5479 }
5480
5481 if (!file)
5482 break;
5483
5484 __skb_queue_tail(&list, skb);
5485
5486 skb = skb_dequeue(head);
5487 }
5488
5489 if (skb_peek(&list)) {
5490 spin_lock_irq(&head->lock);
5491 while ((skb = __skb_dequeue(&list)) != NULL)
5492 __skb_queue_tail(head, skb);
5493 spin_unlock_irq(&head->lock);
5494 }
5495#else
5496 fput(file);
5497#endif
5498}
5499
5500struct io_file_put {
5501 struct llist_node llist;
5502 struct file *file;
5503 struct completion *done;
5504};
5505
2faf852d 5506static void io_ring_file_ref_flush(struct fixed_file_data *data)
65e19f54 5507{
05f3fb3c 5508 struct io_file_put *pfile, *tmp;
05f3fb3c 5509 struct llist_node *node;
65e19f54 5510
05f3fb3c
JA
5511 while ((node = llist_del_all(&data->put_llist)) != NULL) {
5512 llist_for_each_entry_safe(pfile, tmp, node, llist) {
5513 io_ring_file_put(data->ctx, pfile->file);
5514 if (pfile->done)
5515 complete(pfile->done);
5516 else
5517 kfree(pfile);
5518 }
65e19f54 5519 }
2faf852d 5520}
65e19f54 5521
2faf852d
JA
5522static void io_ring_file_ref_switch(struct work_struct *work)
5523{
5524 struct fixed_file_data *data;
65e19f54 5525
2faf852d
JA
5526 data = container_of(work, struct fixed_file_data, ref_work);
5527 io_ring_file_ref_flush(data);
05f3fb3c
JA
5528 percpu_ref_get(&data->refs);
5529 percpu_ref_switch_to_percpu(&data->refs);
5530}
65e19f54 5531
05f3fb3c
JA
5532static void io_file_data_ref_zero(struct percpu_ref *ref)
5533{
5534 struct fixed_file_data *data;
5535
5536 data = container_of(ref, struct fixed_file_data, refs);
5537
2faf852d
JA
5538 /*
5539 * We can't safely switch from inside this context, punt to wq. If
5540 * the table ref is going away, the table is being unregistered.
5541 * Don't queue up the async work for that case, the caller will
5542 * handle it.
5543 */
5544 if (!percpu_ref_is_dying(&data->refs))
5545 queue_work(system_wq, &data->ref_work);
65e19f54
JA
5546}
5547
6b06314c
JA
5548static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
5549 unsigned nr_args)
5550{
5551 __s32 __user *fds = (__s32 __user *) arg;
65e19f54 5552 unsigned nr_tables;
05f3fb3c 5553 struct file *file;
6b06314c
JA
5554 int fd, ret = 0;
5555 unsigned i;
5556
05f3fb3c 5557 if (ctx->file_data)
6b06314c
JA
5558 return -EBUSY;
5559 if (!nr_args)
5560 return -EINVAL;
5561 if (nr_args > IORING_MAX_FIXED_FILES)
5562 return -EMFILE;
5563
05f3fb3c
JA
5564 ctx->file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
5565 if (!ctx->file_data)
5566 return -ENOMEM;
5567 ctx->file_data->ctx = ctx;
5568 init_completion(&ctx->file_data->done);
5569
65e19f54 5570 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
05f3fb3c
JA
5571 ctx->file_data->table = kcalloc(nr_tables,
5572 sizeof(struct fixed_file_table),
65e19f54 5573 GFP_KERNEL);
05f3fb3c
JA
5574 if (!ctx->file_data->table) {
5575 kfree(ctx->file_data);
5576 ctx->file_data = NULL;
6b06314c 5577 return -ENOMEM;
05f3fb3c
JA
5578 }
5579
5580 if (percpu_ref_init(&ctx->file_data->refs, io_file_data_ref_zero,
5581 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
5582 kfree(ctx->file_data->table);
5583 kfree(ctx->file_data);
5584 ctx->file_data = NULL;
6b06314c 5585 return -ENOMEM;
05f3fb3c
JA
5586 }
5587 ctx->file_data->put_llist.first = NULL;
5588 INIT_WORK(&ctx->file_data->ref_work, io_ring_file_ref_switch);
6b06314c 5589
65e19f54 5590 if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
05f3fb3c
JA
5591 percpu_ref_exit(&ctx->file_data->refs);
5592 kfree(ctx->file_data->table);
5593 kfree(ctx->file_data);
5594 ctx->file_data = NULL;
65e19f54
JA
5595 return -ENOMEM;
5596 }
5597
08a45173 5598 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
65e19f54
JA
5599 struct fixed_file_table *table;
5600 unsigned index;
5601
6b06314c
JA
5602 ret = -EFAULT;
5603 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
5604 break;
08a45173
JA
5605 /* allow sparse sets */
5606 if (fd == -1) {
5607 ret = 0;
5608 continue;
5609 }
6b06314c 5610
05f3fb3c 5611 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54 5612 index = i & IORING_FILE_TABLE_MASK;
05f3fb3c 5613 file = fget(fd);
6b06314c
JA
5614
5615 ret = -EBADF;
05f3fb3c 5616 if (!file)
6b06314c 5617 break;
05f3fb3c 5618
6b06314c
JA
5619 /*
5620 * Don't allow io_uring instances to be registered. If UNIX
5621 * isn't enabled, then this causes a reference cycle and this
5622 * instance can never get freed. If UNIX is enabled we'll
5623 * handle it just fine, but there's still no point in allowing
5624 * a ring fd as it doesn't support regular read/write anyway.
5625 */
05f3fb3c
JA
5626 if (file->f_op == &io_uring_fops) {
5627 fput(file);
6b06314c
JA
5628 break;
5629 }
6b06314c 5630 ret = 0;
05f3fb3c 5631 table->files[index] = file;
6b06314c
JA
5632 }
5633
5634 if (ret) {
65e19f54 5635 for (i = 0; i < ctx->nr_user_files; i++) {
65e19f54
JA
5636 file = io_file_from_index(ctx, i);
5637 if (file)
5638 fput(file);
5639 }
5640 for (i = 0; i < nr_tables; i++)
05f3fb3c 5641 kfree(ctx->file_data->table[i].files);
6b06314c 5642
05f3fb3c
JA
5643 kfree(ctx->file_data->table);
5644 kfree(ctx->file_data);
5645 ctx->file_data = NULL;
6b06314c
JA
5646 ctx->nr_user_files = 0;
5647 return ret;
5648 }
5649
5650 ret = io_sqe_files_scm(ctx);
5651 if (ret)
5652 io_sqe_files_unregister(ctx);
5653
5654 return ret;
5655}
5656
c3a31e60
JA
5657static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
5658 int index)
5659{
5660#if defined(CONFIG_UNIX)
5661 struct sock *sock = ctx->ring_sock->sk;
5662 struct sk_buff_head *head = &sock->sk_receive_queue;
5663 struct sk_buff *skb;
5664
5665 /*
5666 * See if we can merge this file into an existing skb SCM_RIGHTS
5667 * file set. If there's no room, fall back to allocating a new skb
5668 * and filling it in.
5669 */
5670 spin_lock_irq(&head->lock);
5671 skb = skb_peek(head);
5672 if (skb) {
5673 struct scm_fp_list *fpl = UNIXCB(skb).fp;
5674
5675 if (fpl->count < SCM_MAX_FD) {
5676 __skb_unlink(skb, head);
5677 spin_unlock_irq(&head->lock);
5678 fpl->fp[fpl->count] = get_file(file);
5679 unix_inflight(fpl->user, fpl->fp[fpl->count]);
5680 fpl->count++;
5681 spin_lock_irq(&head->lock);
5682 __skb_queue_head(head, skb);
5683 } else {
5684 skb = NULL;
5685 }
5686 }
5687 spin_unlock_irq(&head->lock);
5688
5689 if (skb) {
5690 fput(file);
5691 return 0;
5692 }
5693
5694 return __io_sqe_files_scm(ctx, 1, index);
5695#else
5696 return 0;
5697#endif
5698}
5699
05f3fb3c 5700static void io_atomic_switch(struct percpu_ref *ref)
c3a31e60 5701{
05f3fb3c
JA
5702 struct fixed_file_data *data;
5703
5704 data = container_of(ref, struct fixed_file_data, refs);
5705 clear_bit(FFD_F_ATOMIC, &data->state);
5706}
5707
5708static bool io_queue_file_removal(struct fixed_file_data *data,
5709 struct file *file)
5710{
5711 struct io_file_put *pfile, pfile_stack;
5712 DECLARE_COMPLETION_ONSTACK(done);
5713
5714 /*
5715 * If we fail allocating the struct we need for doing async reomval
5716 * of this file, just punt to sync and wait for it.
5717 */
5718 pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
5719 if (!pfile) {
5720 pfile = &pfile_stack;
5721 pfile->done = &done;
5722 }
5723
5724 pfile->file = file;
5725 llist_add(&pfile->llist, &data->put_llist);
5726
5727 if (pfile == &pfile_stack) {
5728 if (!test_and_set_bit(FFD_F_ATOMIC, &data->state)) {
5729 percpu_ref_put(&data->refs);
5730 percpu_ref_switch_to_atomic(&data->refs,
5731 io_atomic_switch);
5732 }
5733 wait_for_completion(&done);
5734 flush_work(&data->ref_work);
5735 return false;
5736 }
5737
5738 return true;
5739}
5740
5741static int __io_sqe_files_update(struct io_ring_ctx *ctx,
5742 struct io_uring_files_update *up,
5743 unsigned nr_args)
5744{
5745 struct fixed_file_data *data = ctx->file_data;
5746 bool ref_switch = false;
5747 struct file *file;
c3a31e60
JA
5748 __s32 __user *fds;
5749 int fd, i, err;
5750 __u32 done;
5751
05f3fb3c 5752 if (check_add_overflow(up->offset, nr_args, &done))
c3a31e60
JA
5753 return -EOVERFLOW;
5754 if (done > ctx->nr_user_files)
5755 return -EINVAL;
5756
5757 done = 0;
05f3fb3c 5758 fds = u64_to_user_ptr(up->fds);
c3a31e60 5759 while (nr_args) {
65e19f54
JA
5760 struct fixed_file_table *table;
5761 unsigned index;
5762
c3a31e60
JA
5763 err = 0;
5764 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
5765 err = -EFAULT;
5766 break;
5767 }
05f3fb3c
JA
5768 i = array_index_nospec(up->offset, ctx->nr_user_files);
5769 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54
JA
5770 index = i & IORING_FILE_TABLE_MASK;
5771 if (table->files[index]) {
05f3fb3c 5772 file = io_file_from_index(ctx, index);
65e19f54 5773 table->files[index] = NULL;
05f3fb3c
JA
5774 if (io_queue_file_removal(data, file))
5775 ref_switch = true;
c3a31e60
JA
5776 }
5777 if (fd != -1) {
c3a31e60
JA
5778 file = fget(fd);
5779 if (!file) {
5780 err = -EBADF;
5781 break;
5782 }
5783 /*
5784 * Don't allow io_uring instances to be registered. If
5785 * UNIX isn't enabled, then this causes a reference
5786 * cycle and this instance can never get freed. If UNIX
5787 * is enabled we'll handle it just fine, but there's
5788 * still no point in allowing a ring fd as it doesn't
5789 * support regular read/write anyway.
5790 */
5791 if (file->f_op == &io_uring_fops) {
5792 fput(file);
5793 err = -EBADF;
5794 break;
5795 }
65e19f54 5796 table->files[index] = file;
c3a31e60
JA
5797 err = io_sqe_file_register(ctx, file, i);
5798 if (err)
5799 break;
5800 }
5801 nr_args--;
5802 done++;
05f3fb3c
JA
5803 up->offset++;
5804 }
5805
5806 if (ref_switch && !test_and_set_bit(FFD_F_ATOMIC, &data->state)) {
5807 percpu_ref_put(&data->refs);
5808 percpu_ref_switch_to_atomic(&data->refs, io_atomic_switch);
c3a31e60
JA
5809 }
5810
5811 return done ? done : err;
5812}
05f3fb3c
JA
5813static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
5814 unsigned nr_args)
5815{
5816 struct io_uring_files_update up;
5817
5818 if (!ctx->file_data)
5819 return -ENXIO;
5820 if (!nr_args)
5821 return -EINVAL;
5822 if (copy_from_user(&up, arg, sizeof(up)))
5823 return -EFAULT;
5824 if (up.resv)
5825 return -EINVAL;
5826
5827 return __io_sqe_files_update(ctx, &up, nr_args);
5828}
c3a31e60 5829
7d723065
JA
5830static void io_put_work(struct io_wq_work *work)
5831{
5832 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
5833
5834 io_put_req(req);
5835}
5836
5837static void io_get_work(struct io_wq_work *work)
5838{
5839 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
5840
5841 refcount_inc(&req->refs);
5842}
5843
24369c2e
PB
5844static int io_init_wq_offload(struct io_ring_ctx *ctx,
5845 struct io_uring_params *p)
5846{
5847 struct io_wq_data data;
5848 struct fd f;
5849 struct io_ring_ctx *ctx_attach;
5850 unsigned int concurrency;
5851 int ret = 0;
5852
5853 data.user = ctx->user;
5854 data.get_work = io_get_work;
5855 data.put_work = io_put_work;
5856
5857 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
5858 /* Do QD, or 4 * CPUS, whatever is smallest */
5859 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
5860
5861 ctx->io_wq = io_wq_create(concurrency, &data);
5862 if (IS_ERR(ctx->io_wq)) {
5863 ret = PTR_ERR(ctx->io_wq);
5864 ctx->io_wq = NULL;
5865 }
5866 return ret;
5867 }
5868
5869 f = fdget(p->wq_fd);
5870 if (!f.file)
5871 return -EBADF;
5872
5873 if (f.file->f_op != &io_uring_fops) {
5874 ret = -EINVAL;
5875 goto out_fput;
5876 }
5877
5878 ctx_attach = f.file->private_data;
5879 /* @io_wq is protected by holding the fd */
5880 if (!io_wq_get(ctx_attach->io_wq, &data)) {
5881 ret = -EINVAL;
5882 goto out_fput;
5883 }
5884
5885 ctx->io_wq = ctx_attach->io_wq;
5886out_fput:
5887 fdput(f);
5888 return ret;
5889}
5890
6c271ce2
JA
5891static int io_sq_offload_start(struct io_ring_ctx *ctx,
5892 struct io_uring_params *p)
2b188cc1
JA
5893{
5894 int ret;
5895
6c271ce2 5896 init_waitqueue_head(&ctx->sqo_wait);
2b188cc1
JA
5897 mmgrab(current->mm);
5898 ctx->sqo_mm = current->mm;
5899
6c271ce2 5900 if (ctx->flags & IORING_SETUP_SQPOLL) {
3ec482d1
JA
5901 ret = -EPERM;
5902 if (!capable(CAP_SYS_ADMIN))
5903 goto err;
5904
917257da
JA
5905 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
5906 if (!ctx->sq_thread_idle)
5907 ctx->sq_thread_idle = HZ;
5908
6c271ce2 5909 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 5910 int cpu = p->sq_thread_cpu;
6c271ce2 5911
917257da 5912 ret = -EINVAL;
44a9bd18
JA
5913 if (cpu >= nr_cpu_ids)
5914 goto err;
7889f44d 5915 if (!cpu_online(cpu))
917257da
JA
5916 goto err;
5917
6c271ce2
JA
5918 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
5919 ctx, cpu,
5920 "io_uring-sq");
5921 } else {
5922 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
5923 "io_uring-sq");
5924 }
5925 if (IS_ERR(ctx->sqo_thread)) {
5926 ret = PTR_ERR(ctx->sqo_thread);
5927 ctx->sqo_thread = NULL;
5928 goto err;
5929 }
5930 wake_up_process(ctx->sqo_thread);
5931 } else if (p->flags & IORING_SETUP_SQ_AFF) {
5932 /* Can't have SQ_AFF without SQPOLL */
5933 ret = -EINVAL;
5934 goto err;
5935 }
5936
24369c2e
PB
5937 ret = io_init_wq_offload(ctx, p);
5938 if (ret)
2b188cc1 5939 goto err;
2b188cc1
JA
5940
5941 return 0;
5942err:
54a91f3b 5943 io_finish_async(ctx);
2b188cc1
JA
5944 mmdrop(ctx->sqo_mm);
5945 ctx->sqo_mm = NULL;
5946 return ret;
5947}
5948
5949static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
5950{
5951 atomic_long_sub(nr_pages, &user->locked_vm);
5952}
5953
5954static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
5955{
5956 unsigned long page_limit, cur_pages, new_pages;
5957
5958 /* Don't allow more pages than we can safely lock */
5959 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
5960
5961 do {
5962 cur_pages = atomic_long_read(&user->locked_vm);
5963 new_pages = cur_pages + nr_pages;
5964 if (new_pages > page_limit)
5965 return -ENOMEM;
5966 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
5967 new_pages) != cur_pages);
5968
5969 return 0;
5970}
5971
5972static void io_mem_free(void *ptr)
5973{
52e04ef4
MR
5974 struct page *page;
5975
5976 if (!ptr)
5977 return;
2b188cc1 5978
52e04ef4 5979 page = virt_to_head_page(ptr);
2b188cc1
JA
5980 if (put_page_testzero(page))
5981 free_compound_page(page);
5982}
5983
5984static void *io_mem_alloc(size_t size)
5985{
5986 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
5987 __GFP_NORETRY;
5988
5989 return (void *) __get_free_pages(gfp_flags, get_order(size));
5990}
5991
75b28aff
HV
5992static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
5993 size_t *sq_offset)
5994{
5995 struct io_rings *rings;
5996 size_t off, sq_array_size;
5997
5998 off = struct_size(rings, cqes, cq_entries);
5999 if (off == SIZE_MAX)
6000 return SIZE_MAX;
6001
6002#ifdef CONFIG_SMP
6003 off = ALIGN(off, SMP_CACHE_BYTES);
6004 if (off == 0)
6005 return SIZE_MAX;
6006#endif
6007
6008 sq_array_size = array_size(sizeof(u32), sq_entries);
6009 if (sq_array_size == SIZE_MAX)
6010 return SIZE_MAX;
6011
6012 if (check_add_overflow(off, sq_array_size, &off))
6013 return SIZE_MAX;
6014
6015 if (sq_offset)
6016 *sq_offset = off;
6017
6018 return off;
6019}
6020
2b188cc1
JA
6021static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
6022{
75b28aff 6023 size_t pages;
2b188cc1 6024
75b28aff
HV
6025 pages = (size_t)1 << get_order(
6026 rings_size(sq_entries, cq_entries, NULL));
6027 pages += (size_t)1 << get_order(
6028 array_size(sizeof(struct io_uring_sqe), sq_entries));
2b188cc1 6029
75b28aff 6030 return pages;
2b188cc1
JA
6031}
6032
edafccee
JA
6033static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
6034{
6035 int i, j;
6036
6037 if (!ctx->user_bufs)
6038 return -ENXIO;
6039
6040 for (i = 0; i < ctx->nr_user_bufs; i++) {
6041 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
6042
6043 for (j = 0; j < imu->nr_bvecs; j++)
f1f6a7dd 6044 unpin_user_page(imu->bvec[j].bv_page);
edafccee
JA
6045
6046 if (ctx->account_mem)
6047 io_unaccount_mem(ctx->user, imu->nr_bvecs);
d4ef6475 6048 kvfree(imu->bvec);
edafccee
JA
6049 imu->nr_bvecs = 0;
6050 }
6051
6052 kfree(ctx->user_bufs);
6053 ctx->user_bufs = NULL;
6054 ctx->nr_user_bufs = 0;
6055 return 0;
6056}
6057
6058static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
6059 void __user *arg, unsigned index)
6060{
6061 struct iovec __user *src;
6062
6063#ifdef CONFIG_COMPAT
6064 if (ctx->compat) {
6065 struct compat_iovec __user *ciovs;
6066 struct compat_iovec ciov;
6067
6068 ciovs = (struct compat_iovec __user *) arg;
6069 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
6070 return -EFAULT;
6071
d55e5f5b 6072 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
6073 dst->iov_len = ciov.iov_len;
6074 return 0;
6075 }
6076#endif
6077 src = (struct iovec __user *) arg;
6078 if (copy_from_user(dst, &src[index], sizeof(*dst)))
6079 return -EFAULT;
6080 return 0;
6081}
6082
6083static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
6084 unsigned nr_args)
6085{
6086 struct vm_area_struct **vmas = NULL;
6087 struct page **pages = NULL;
6088 int i, j, got_pages = 0;
6089 int ret = -EINVAL;
6090
6091 if (ctx->user_bufs)
6092 return -EBUSY;
6093 if (!nr_args || nr_args > UIO_MAXIOV)
6094 return -EINVAL;
6095
6096 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
6097 GFP_KERNEL);
6098 if (!ctx->user_bufs)
6099 return -ENOMEM;
6100
6101 for (i = 0; i < nr_args; i++) {
6102 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
6103 unsigned long off, start, end, ubuf;
6104 int pret, nr_pages;
6105 struct iovec iov;
6106 size_t size;
6107
6108 ret = io_copy_iov(ctx, &iov, arg, i);
6109 if (ret)
a278682d 6110 goto err;
edafccee
JA
6111
6112 /*
6113 * Don't impose further limits on the size and buffer
6114 * constraints here, we'll -EINVAL later when IO is
6115 * submitted if they are wrong.
6116 */
6117 ret = -EFAULT;
6118 if (!iov.iov_base || !iov.iov_len)
6119 goto err;
6120
6121 /* arbitrary limit, but we need something */
6122 if (iov.iov_len > SZ_1G)
6123 goto err;
6124
6125 ubuf = (unsigned long) iov.iov_base;
6126 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
6127 start = ubuf >> PAGE_SHIFT;
6128 nr_pages = end - start;
6129
6130 if (ctx->account_mem) {
6131 ret = io_account_mem(ctx->user, nr_pages);
6132 if (ret)
6133 goto err;
6134 }
6135
6136 ret = 0;
6137 if (!pages || nr_pages > got_pages) {
6138 kfree(vmas);
6139 kfree(pages);
d4ef6475 6140 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
edafccee 6141 GFP_KERNEL);
d4ef6475 6142 vmas = kvmalloc_array(nr_pages,
edafccee
JA
6143 sizeof(struct vm_area_struct *),
6144 GFP_KERNEL);
6145 if (!pages || !vmas) {
6146 ret = -ENOMEM;
6147 if (ctx->account_mem)
6148 io_unaccount_mem(ctx->user, nr_pages);
6149 goto err;
6150 }
6151 got_pages = nr_pages;
6152 }
6153
d4ef6475 6154 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
edafccee
JA
6155 GFP_KERNEL);
6156 ret = -ENOMEM;
6157 if (!imu->bvec) {
6158 if (ctx->account_mem)
6159 io_unaccount_mem(ctx->user, nr_pages);
6160 goto err;
6161 }
6162
6163 ret = 0;
6164 down_read(&current->mm->mmap_sem);
2113b05d 6165 pret = pin_user_pages(ubuf, nr_pages,
932f4a63
IW
6166 FOLL_WRITE | FOLL_LONGTERM,
6167 pages, vmas);
edafccee
JA
6168 if (pret == nr_pages) {
6169 /* don't support file backed memory */
6170 for (j = 0; j < nr_pages; j++) {
6171 struct vm_area_struct *vma = vmas[j];
6172
6173 if (vma->vm_file &&
6174 !is_file_hugepages(vma->vm_file)) {
6175 ret = -EOPNOTSUPP;
6176 break;
6177 }
6178 }
6179 } else {
6180 ret = pret < 0 ? pret : -EFAULT;
6181 }
6182 up_read(&current->mm->mmap_sem);
6183 if (ret) {
6184 /*
6185 * if we did partial map, or found file backed vmas,
6186 * release any pages we did get
6187 */
27c4d3a3 6188 if (pret > 0)
f1f6a7dd 6189 unpin_user_pages(pages, pret);
edafccee
JA
6190 if (ctx->account_mem)
6191 io_unaccount_mem(ctx->user, nr_pages);
d4ef6475 6192 kvfree(imu->bvec);
edafccee
JA
6193 goto err;
6194 }
6195
6196 off = ubuf & ~PAGE_MASK;
6197 size = iov.iov_len;
6198 for (j = 0; j < nr_pages; j++) {
6199 size_t vec_len;
6200
6201 vec_len = min_t(size_t, size, PAGE_SIZE - off);
6202 imu->bvec[j].bv_page = pages[j];
6203 imu->bvec[j].bv_len = vec_len;
6204 imu->bvec[j].bv_offset = off;
6205 off = 0;
6206 size -= vec_len;
6207 }
6208 /* store original address for later verification */
6209 imu->ubuf = ubuf;
6210 imu->len = iov.iov_len;
6211 imu->nr_bvecs = nr_pages;
6212
6213 ctx->nr_user_bufs++;
6214 }
d4ef6475
MR
6215 kvfree(pages);
6216 kvfree(vmas);
edafccee
JA
6217 return 0;
6218err:
d4ef6475
MR
6219 kvfree(pages);
6220 kvfree(vmas);
edafccee
JA
6221 io_sqe_buffer_unregister(ctx);
6222 return ret;
6223}
6224
9b402849
JA
6225static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
6226{
6227 __s32 __user *fds = arg;
6228 int fd;
6229
6230 if (ctx->cq_ev_fd)
6231 return -EBUSY;
6232
6233 if (copy_from_user(&fd, fds, sizeof(*fds)))
6234 return -EFAULT;
6235
6236 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
6237 if (IS_ERR(ctx->cq_ev_fd)) {
6238 int ret = PTR_ERR(ctx->cq_ev_fd);
6239 ctx->cq_ev_fd = NULL;
6240 return ret;
6241 }
6242
6243 return 0;
6244}
6245
6246static int io_eventfd_unregister(struct io_ring_ctx *ctx)
6247{
6248 if (ctx->cq_ev_fd) {
6249 eventfd_ctx_put(ctx->cq_ev_fd);
6250 ctx->cq_ev_fd = NULL;
6251 return 0;
6252 }
6253
6254 return -ENXIO;
6255}
6256
2b188cc1
JA
6257static void io_ring_ctx_free(struct io_ring_ctx *ctx)
6258{
6b06314c 6259 io_finish_async(ctx);
2b188cc1
JA
6260 if (ctx->sqo_mm)
6261 mmdrop(ctx->sqo_mm);
def596e9
JA
6262
6263 io_iopoll_reap_events(ctx);
edafccee 6264 io_sqe_buffer_unregister(ctx);
6b06314c 6265 io_sqe_files_unregister(ctx);
9b402849 6266 io_eventfd_unregister(ctx);
def596e9 6267
2b188cc1 6268#if defined(CONFIG_UNIX)
355e8d26
EB
6269 if (ctx->ring_sock) {
6270 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 6271 sock_release(ctx->ring_sock);
355e8d26 6272 }
2b188cc1
JA
6273#endif
6274
75b28aff 6275 io_mem_free(ctx->rings);
2b188cc1 6276 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
6277
6278 percpu_ref_exit(&ctx->refs);
6279 if (ctx->account_mem)
6280 io_unaccount_mem(ctx->user,
6281 ring_pages(ctx->sq_entries, ctx->cq_entries));
6282 free_uid(ctx->user);
181e448d 6283 put_cred(ctx->creds);
206aefde 6284 kfree(ctx->completions);
78076bb6 6285 kfree(ctx->cancel_hash);
0ddf92e8 6286 kmem_cache_free(req_cachep, ctx->fallback_req);
2b188cc1
JA
6287 kfree(ctx);
6288}
6289
6290static __poll_t io_uring_poll(struct file *file, poll_table *wait)
6291{
6292 struct io_ring_ctx *ctx = file->private_data;
6293 __poll_t mask = 0;
6294
6295 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
6296 /*
6297 * synchronizes with barrier from wq_has_sleeper call in
6298 * io_commit_cqring
6299 */
2b188cc1 6300 smp_rmb();
75b28aff
HV
6301 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
6302 ctx->rings->sq_ring_entries)
2b188cc1 6303 mask |= EPOLLOUT | EPOLLWRNORM;
daa5de54 6304 if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail)
2b188cc1
JA
6305 mask |= EPOLLIN | EPOLLRDNORM;
6306
6307 return mask;
6308}
6309
6310static int io_uring_fasync(int fd, struct file *file, int on)
6311{
6312 struct io_ring_ctx *ctx = file->private_data;
6313
6314 return fasync_helper(fd, file, on, &ctx->cq_fasync);
6315}
6316
071698e1
JA
6317static int io_remove_personalities(int id, void *p, void *data)
6318{
6319 struct io_ring_ctx *ctx = data;
6320 const struct cred *cred;
6321
6322 cred = idr_remove(&ctx->personality_idr, id);
6323 if (cred)
6324 put_cred(cred);
6325 return 0;
6326}
6327
2b188cc1
JA
6328static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
6329{
6330 mutex_lock(&ctx->uring_lock);
6331 percpu_ref_kill(&ctx->refs);
6332 mutex_unlock(&ctx->uring_lock);
6333
df069d80
JA
6334 /*
6335 * Wait for sq thread to idle, if we have one. It won't spin on new
6336 * work after we've killed the ctx ref above. This is important to do
6337 * before we cancel existing commands, as the thread could otherwise
6338 * be queueing new work post that. If that's work we need to cancel,
6339 * it could cause shutdown to hang.
6340 */
6341 while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait))
6342 cpu_relax();
6343
5262f567 6344 io_kill_timeouts(ctx);
221c5eb2 6345 io_poll_remove_all(ctx);
561fb04a
JA
6346
6347 if (ctx->io_wq)
6348 io_wq_cancel_all(ctx->io_wq);
6349
def596e9 6350 io_iopoll_reap_events(ctx);
15dff286
JA
6351 /* if we failed setting up the ctx, we might not have any rings */
6352 if (ctx->rings)
6353 io_cqring_overflow_flush(ctx, true);
071698e1 6354 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
206aefde 6355 wait_for_completion(&ctx->completions[0]);
2b188cc1
JA
6356 io_ring_ctx_free(ctx);
6357}
6358
6359static int io_uring_release(struct inode *inode, struct file *file)
6360{
6361 struct io_ring_ctx *ctx = file->private_data;
6362
6363 file->private_data = NULL;
6364 io_ring_ctx_wait_and_kill(ctx);
6365 return 0;
6366}
6367
fcb323cc
JA
6368static void io_uring_cancel_files(struct io_ring_ctx *ctx,
6369 struct files_struct *files)
6370{
6371 struct io_kiocb *req;
6372 DEFINE_WAIT(wait);
6373
6374 while (!list_empty_careful(&ctx->inflight_list)) {
768134d4 6375 struct io_kiocb *cancel_req = NULL;
fcb323cc
JA
6376
6377 spin_lock_irq(&ctx->inflight_lock);
6378 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
768134d4
JA
6379 if (req->work.files != files)
6380 continue;
6381 /* req is being completed, ignore */
6382 if (!refcount_inc_not_zero(&req->refs))
6383 continue;
6384 cancel_req = req;
6385 break;
fcb323cc 6386 }
768134d4 6387 if (cancel_req)
fcb323cc 6388 prepare_to_wait(&ctx->inflight_wait, &wait,
768134d4 6389 TASK_UNINTERRUPTIBLE);
fcb323cc
JA
6390 spin_unlock_irq(&ctx->inflight_lock);
6391
768134d4
JA
6392 /* We need to keep going until we don't find a matching req */
6393 if (!cancel_req)
fcb323cc 6394 break;
2f6d9b9d
BL
6395
6396 io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
6397 io_put_req(cancel_req);
fcb323cc
JA
6398 schedule();
6399 }
768134d4 6400 finish_wait(&ctx->inflight_wait, &wait);
fcb323cc
JA
6401}
6402
6403static int io_uring_flush(struct file *file, void *data)
6404{
6405 struct io_ring_ctx *ctx = file->private_data;
6406
6407 io_uring_cancel_files(ctx, data);
fcb323cc
JA
6408 return 0;
6409}
6410
6c5c240e
RP
6411static void *io_uring_validate_mmap_request(struct file *file,
6412 loff_t pgoff, size_t sz)
2b188cc1 6413{
2b188cc1 6414 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 6415 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
6416 struct page *page;
6417 void *ptr;
6418
6419 switch (offset) {
6420 case IORING_OFF_SQ_RING:
75b28aff
HV
6421 case IORING_OFF_CQ_RING:
6422 ptr = ctx->rings;
2b188cc1
JA
6423 break;
6424 case IORING_OFF_SQES:
6425 ptr = ctx->sq_sqes;
6426 break;
2b188cc1 6427 default:
6c5c240e 6428 return ERR_PTR(-EINVAL);
2b188cc1
JA
6429 }
6430
6431 page = virt_to_head_page(ptr);
a50b854e 6432 if (sz > page_size(page))
6c5c240e
RP
6433 return ERR_PTR(-EINVAL);
6434
6435 return ptr;
6436}
6437
6438#ifdef CONFIG_MMU
6439
6440static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
6441{
6442 size_t sz = vma->vm_end - vma->vm_start;
6443 unsigned long pfn;
6444 void *ptr;
6445
6446 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
6447 if (IS_ERR(ptr))
6448 return PTR_ERR(ptr);
2b188cc1
JA
6449
6450 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
6451 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
6452}
6453
6c5c240e
RP
6454#else /* !CONFIG_MMU */
6455
6456static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
6457{
6458 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
6459}
6460
6461static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
6462{
6463 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
6464}
6465
6466static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
6467 unsigned long addr, unsigned long len,
6468 unsigned long pgoff, unsigned long flags)
6469{
6470 void *ptr;
6471
6472 ptr = io_uring_validate_mmap_request(file, pgoff, len);
6473 if (IS_ERR(ptr))
6474 return PTR_ERR(ptr);
6475
6476 return (unsigned long) ptr;
6477}
6478
6479#endif /* !CONFIG_MMU */
6480
2b188cc1
JA
6481SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
6482 u32, min_complete, u32, flags, const sigset_t __user *, sig,
6483 size_t, sigsz)
6484{
6485 struct io_ring_ctx *ctx;
6486 long ret = -EBADF;
6487 int submitted = 0;
6488 struct fd f;
6489
6c271ce2 6490 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
2b188cc1
JA
6491 return -EINVAL;
6492
6493 f = fdget(fd);
6494 if (!f.file)
6495 return -EBADF;
6496
6497 ret = -EOPNOTSUPP;
6498 if (f.file->f_op != &io_uring_fops)
6499 goto out_fput;
6500
6501 ret = -ENXIO;
6502 ctx = f.file->private_data;
6503 if (!percpu_ref_tryget(&ctx->refs))
6504 goto out_fput;
6505
6c271ce2
JA
6506 /*
6507 * For SQ polling, the thread will do all submissions and completions.
6508 * Just return the requested submit count, and wake the thread if
6509 * we were asked to.
6510 */
b2a9eada 6511 ret = 0;
6c271ce2 6512 if (ctx->flags & IORING_SETUP_SQPOLL) {
c1edbf5f
JA
6513 if (!list_empty_careful(&ctx->cq_overflow_list))
6514 io_cqring_overflow_flush(ctx, false);
6c271ce2
JA
6515 if (flags & IORING_ENTER_SQ_WAKEUP)
6516 wake_up(&ctx->sqo_wait);
6517 submitted = to_submit;
b2a9eada 6518 } else if (to_submit) {
ae9428ca 6519 struct mm_struct *cur_mm;
2b188cc1
JA
6520
6521 mutex_lock(&ctx->uring_lock);
ae9428ca
PB
6522 /* already have mm, so io_submit_sqes() won't try to grab it */
6523 cur_mm = ctx->sqo_mm;
6524 submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
6525 &cur_mm, false);
2b188cc1 6526 mutex_unlock(&ctx->uring_lock);
7c504e65
PB
6527
6528 if (submitted != to_submit)
6529 goto out;
2b188cc1
JA
6530 }
6531 if (flags & IORING_ENTER_GETEVENTS) {
def596e9
JA
6532 unsigned nr_events = 0;
6533
2b188cc1
JA
6534 min_complete = min(min_complete, ctx->cq_entries);
6535
def596e9 6536 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9 6537 ret = io_iopoll_check(ctx, &nr_events, min_complete);
def596e9
JA
6538 } else {
6539 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
6540 }
2b188cc1
JA
6541 }
6542
7c504e65 6543out:
6805b32e 6544 percpu_ref_put(&ctx->refs);
2b188cc1
JA
6545out_fput:
6546 fdput(f);
6547 return submitted ? submitted : ret;
6548}
6549
87ce955b
JA
6550static int io_uring_show_cred(int id, void *p, void *data)
6551{
6552 const struct cred *cred = p;
6553 struct seq_file *m = data;
6554 struct user_namespace *uns = seq_user_ns(m);
6555 struct group_info *gi;
6556 kernel_cap_t cap;
6557 unsigned __capi;
6558 int g;
6559
6560 seq_printf(m, "%5d\n", id);
6561 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
6562 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
6563 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
6564 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
6565 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
6566 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
6567 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
6568 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
6569 seq_puts(m, "\n\tGroups:\t");
6570 gi = cred->group_info;
6571 for (g = 0; g < gi->ngroups; g++) {
6572 seq_put_decimal_ull(m, g ? " " : "",
6573 from_kgid_munged(uns, gi->gid[g]));
6574 }
6575 seq_puts(m, "\n\tCapEff:\t");
6576 cap = cred->cap_effective;
6577 CAP_FOR_EACH_U32(__capi)
6578 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
6579 seq_putc(m, '\n');
6580 return 0;
6581}
6582
6583static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
6584{
6585 int i;
6586
6587 mutex_lock(&ctx->uring_lock);
6588 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
6589 for (i = 0; i < ctx->nr_user_files; i++) {
6590 struct fixed_file_table *table;
6591 struct file *f;
6592
6593 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
6594 f = table->files[i & IORING_FILE_TABLE_MASK];
6595 if (f)
6596 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
6597 else
6598 seq_printf(m, "%5u: <none>\n", i);
6599 }
6600 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
6601 for (i = 0; i < ctx->nr_user_bufs; i++) {
6602 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
6603
6604 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
6605 (unsigned int) buf->len);
6606 }
6607 if (!idr_is_empty(&ctx->personality_idr)) {
6608 seq_printf(m, "Personalities:\n");
6609 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
6610 }
6611 mutex_unlock(&ctx->uring_lock);
6612}
6613
6614static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
6615{
6616 struct io_ring_ctx *ctx = f->private_data;
6617
6618 if (percpu_ref_tryget(&ctx->refs)) {
6619 __io_uring_show_fdinfo(ctx, m);
6620 percpu_ref_put(&ctx->refs);
6621 }
6622}
6623
2b188cc1
JA
6624static const struct file_operations io_uring_fops = {
6625 .release = io_uring_release,
fcb323cc 6626 .flush = io_uring_flush,
2b188cc1 6627 .mmap = io_uring_mmap,
6c5c240e
RP
6628#ifndef CONFIG_MMU
6629 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
6630 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
6631#endif
2b188cc1
JA
6632 .poll = io_uring_poll,
6633 .fasync = io_uring_fasync,
87ce955b 6634 .show_fdinfo = io_uring_show_fdinfo,
2b188cc1
JA
6635};
6636
6637static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
6638 struct io_uring_params *p)
6639{
75b28aff
HV
6640 struct io_rings *rings;
6641 size_t size, sq_array_offset;
2b188cc1 6642
75b28aff
HV
6643 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
6644 if (size == SIZE_MAX)
6645 return -EOVERFLOW;
6646
6647 rings = io_mem_alloc(size);
6648 if (!rings)
2b188cc1
JA
6649 return -ENOMEM;
6650
75b28aff
HV
6651 ctx->rings = rings;
6652 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
6653 rings->sq_ring_mask = p->sq_entries - 1;
6654 rings->cq_ring_mask = p->cq_entries - 1;
6655 rings->sq_ring_entries = p->sq_entries;
6656 rings->cq_ring_entries = p->cq_entries;
6657 ctx->sq_mask = rings->sq_ring_mask;
6658 ctx->cq_mask = rings->cq_ring_mask;
6659 ctx->sq_entries = rings->sq_ring_entries;
6660 ctx->cq_entries = rings->cq_ring_entries;
2b188cc1
JA
6661
6662 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
6663 if (size == SIZE_MAX) {
6664 io_mem_free(ctx->rings);
6665 ctx->rings = NULL;
2b188cc1 6666 return -EOVERFLOW;
eb065d30 6667 }
2b188cc1
JA
6668
6669 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
6670 if (!ctx->sq_sqes) {
6671 io_mem_free(ctx->rings);
6672 ctx->rings = NULL;
2b188cc1 6673 return -ENOMEM;
eb065d30 6674 }
2b188cc1 6675
2b188cc1
JA
6676 return 0;
6677}
6678
6679/*
6680 * Allocate an anonymous fd, this is what constitutes the application
6681 * visible backing of an io_uring instance. The application mmaps this
6682 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
6683 * we have to tie this fd to a socket for file garbage collection purposes.
6684 */
6685static int io_uring_get_fd(struct io_ring_ctx *ctx)
6686{
6687 struct file *file;
6688 int ret;
6689
6690#if defined(CONFIG_UNIX)
6691 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
6692 &ctx->ring_sock);
6693 if (ret)
6694 return ret;
6695#endif
6696
6697 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
6698 if (ret < 0)
6699 goto err;
6700
6701 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
6702 O_RDWR | O_CLOEXEC);
6703 if (IS_ERR(file)) {
6704 put_unused_fd(ret);
6705 ret = PTR_ERR(file);
6706 goto err;
6707 }
6708
6709#if defined(CONFIG_UNIX)
6710 ctx->ring_sock->file = file;
6711#endif
6712 fd_install(ret, file);
6713 return ret;
6714err:
6715#if defined(CONFIG_UNIX)
6716 sock_release(ctx->ring_sock);
6717 ctx->ring_sock = NULL;
6718#endif
6719 return ret;
6720}
6721
6722static int io_uring_create(unsigned entries, struct io_uring_params *p)
6723{
6724 struct user_struct *user = NULL;
6725 struct io_ring_ctx *ctx;
6726 bool account_mem;
6727 int ret;
6728
8110c1a6 6729 if (!entries)
2b188cc1 6730 return -EINVAL;
8110c1a6
JA
6731 if (entries > IORING_MAX_ENTRIES) {
6732 if (!(p->flags & IORING_SETUP_CLAMP))
6733 return -EINVAL;
6734 entries = IORING_MAX_ENTRIES;
6735 }
2b188cc1
JA
6736
6737 /*
6738 * Use twice as many entries for the CQ ring. It's possible for the
6739 * application to drive a higher depth than the size of the SQ ring,
6740 * since the sqes are only used at submission time. This allows for
33a107f0
JA
6741 * some flexibility in overcommitting a bit. If the application has
6742 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
6743 * of CQ ring entries manually.
2b188cc1
JA
6744 */
6745 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
6746 if (p->flags & IORING_SETUP_CQSIZE) {
6747 /*
6748 * If IORING_SETUP_CQSIZE is set, we do the same roundup
6749 * to a power-of-two, if it isn't already. We do NOT impose
6750 * any cq vs sq ring sizing.
6751 */
8110c1a6 6752 if (p->cq_entries < p->sq_entries)
33a107f0 6753 return -EINVAL;
8110c1a6
JA
6754 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
6755 if (!(p->flags & IORING_SETUP_CLAMP))
6756 return -EINVAL;
6757 p->cq_entries = IORING_MAX_CQ_ENTRIES;
6758 }
33a107f0
JA
6759 p->cq_entries = roundup_pow_of_two(p->cq_entries);
6760 } else {
6761 p->cq_entries = 2 * p->sq_entries;
6762 }
2b188cc1
JA
6763
6764 user = get_uid(current_user());
6765 account_mem = !capable(CAP_IPC_LOCK);
6766
6767 if (account_mem) {
6768 ret = io_account_mem(user,
6769 ring_pages(p->sq_entries, p->cq_entries));
6770 if (ret) {
6771 free_uid(user);
6772 return ret;
6773 }
6774 }
6775
6776 ctx = io_ring_ctx_alloc(p);
6777 if (!ctx) {
6778 if (account_mem)
6779 io_unaccount_mem(user, ring_pages(p->sq_entries,
6780 p->cq_entries));
6781 free_uid(user);
6782 return -ENOMEM;
6783 }
6784 ctx->compat = in_compat_syscall();
6785 ctx->account_mem = account_mem;
6786 ctx->user = user;
0b8c0ec7 6787 ctx->creds = get_current_cred();
2b188cc1
JA
6788
6789 ret = io_allocate_scq_urings(ctx, p);
6790 if (ret)
6791 goto err;
6792
6c271ce2 6793 ret = io_sq_offload_start(ctx, p);
2b188cc1
JA
6794 if (ret)
6795 goto err;
6796
2b188cc1 6797 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
6798 p->sq_off.head = offsetof(struct io_rings, sq.head);
6799 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
6800 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
6801 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
6802 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
6803 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
6804 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
6805
6806 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
6807 p->cq_off.head = offsetof(struct io_rings, cq.head);
6808 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
6809 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
6810 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
6811 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
6812 p->cq_off.cqes = offsetof(struct io_rings, cqes);
ac90f249 6813
044c1ab3
JA
6814 /*
6815 * Install ring fd as the very last thing, so we don't risk someone
6816 * having closed it before we finish setup
6817 */
6818 ret = io_uring_get_fd(ctx);
6819 if (ret < 0)
6820 goto err;
6821
da8c9690 6822 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
cccf0ee8
JA
6823 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
6824 IORING_FEAT_CUR_PERSONALITY;
c826bd7a 6825 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
6826 return ret;
6827err:
6828 io_ring_ctx_wait_and_kill(ctx);
6829 return ret;
6830}
6831
6832/*
6833 * Sets up an aio uring context, and returns the fd. Applications asks for a
6834 * ring size, we return the actual sq/cq ring sizes (among other things) in the
6835 * params structure passed in.
6836 */
6837static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
6838{
6839 struct io_uring_params p;
6840 long ret;
6841 int i;
6842
6843 if (copy_from_user(&p, params, sizeof(p)))
6844 return -EFAULT;
6845 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
6846 if (p.resv[i])
6847 return -EINVAL;
6848 }
6849
6c271ce2 6850 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 6851 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
24369c2e 6852 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ))
2b188cc1
JA
6853 return -EINVAL;
6854
6855 ret = io_uring_create(entries, &p);
6856 if (ret < 0)
6857 return ret;
6858
6859 if (copy_to_user(params, &p, sizeof(p)))
6860 return -EFAULT;
6861
6862 return ret;
6863}
6864
6865SYSCALL_DEFINE2(io_uring_setup, u32, entries,
6866 struct io_uring_params __user *, params)
6867{
6868 return io_uring_setup(entries, params);
6869}
6870
66f4af93
JA
6871static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
6872{
6873 struct io_uring_probe *p;
6874 size_t size;
6875 int i, ret;
6876
6877 size = struct_size(p, ops, nr_args);
6878 if (size == SIZE_MAX)
6879 return -EOVERFLOW;
6880 p = kzalloc(size, GFP_KERNEL);
6881 if (!p)
6882 return -ENOMEM;
6883
6884 ret = -EFAULT;
6885 if (copy_from_user(p, arg, size))
6886 goto out;
6887 ret = -EINVAL;
6888 if (memchr_inv(p, 0, size))
6889 goto out;
6890
6891 p->last_op = IORING_OP_LAST - 1;
6892 if (nr_args > IORING_OP_LAST)
6893 nr_args = IORING_OP_LAST;
6894
6895 for (i = 0; i < nr_args; i++) {
6896 p->ops[i].op = i;
6897 if (!io_op_defs[i].not_supported)
6898 p->ops[i].flags = IO_URING_OP_SUPPORTED;
6899 }
6900 p->ops_len = i;
6901
6902 ret = 0;
6903 if (copy_to_user(arg, p, size))
6904 ret = -EFAULT;
6905out:
6906 kfree(p);
6907 return ret;
6908}
6909
071698e1
JA
6910static int io_register_personality(struct io_ring_ctx *ctx)
6911{
6912 const struct cred *creds = get_current_cred();
6913 int id;
6914
6915 id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
6916 USHRT_MAX, GFP_KERNEL);
6917 if (id < 0)
6918 put_cred(creds);
6919 return id;
6920}
6921
6922static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
6923{
6924 const struct cred *old_creds;
6925
6926 old_creds = idr_remove(&ctx->personality_idr, id);
6927 if (old_creds) {
6928 put_cred(old_creds);
6929 return 0;
6930 }
6931
6932 return -EINVAL;
6933}
6934
6935static bool io_register_op_must_quiesce(int op)
6936{
6937 switch (op) {
6938 case IORING_UNREGISTER_FILES:
6939 case IORING_REGISTER_FILES_UPDATE:
6940 case IORING_REGISTER_PROBE:
6941 case IORING_REGISTER_PERSONALITY:
6942 case IORING_UNREGISTER_PERSONALITY:
6943 return false;
6944 default:
6945 return true;
6946 }
6947}
6948
edafccee
JA
6949static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
6950 void __user *arg, unsigned nr_args)
b19062a5
JA
6951 __releases(ctx->uring_lock)
6952 __acquires(ctx->uring_lock)
edafccee
JA
6953{
6954 int ret;
6955
35fa71a0
JA
6956 /*
6957 * We're inside the ring mutex, if the ref is already dying, then
6958 * someone else killed the ctx or is already going through
6959 * io_uring_register().
6960 */
6961 if (percpu_ref_is_dying(&ctx->refs))
6962 return -ENXIO;
6963
071698e1 6964 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 6965 percpu_ref_kill(&ctx->refs);
b19062a5 6966
05f3fb3c
JA
6967 /*
6968 * Drop uring mutex before waiting for references to exit. If
6969 * another thread is currently inside io_uring_enter() it might
6970 * need to grab the uring_lock to make progress. If we hold it
6971 * here across the drain wait, then we can deadlock. It's safe
6972 * to drop the mutex here, since no new references will come in
6973 * after we've killed the percpu ref.
6974 */
6975 mutex_unlock(&ctx->uring_lock);
c150368b 6976 ret = wait_for_completion_interruptible(&ctx->completions[0]);
05f3fb3c 6977 mutex_lock(&ctx->uring_lock);
c150368b
JA
6978 if (ret) {
6979 percpu_ref_resurrect(&ctx->refs);
6980 ret = -EINTR;
6981 goto out;
6982 }
05f3fb3c 6983 }
edafccee
JA
6984
6985 switch (opcode) {
6986 case IORING_REGISTER_BUFFERS:
6987 ret = io_sqe_buffer_register(ctx, arg, nr_args);
6988 break;
6989 case IORING_UNREGISTER_BUFFERS:
6990 ret = -EINVAL;
6991 if (arg || nr_args)
6992 break;
6993 ret = io_sqe_buffer_unregister(ctx);
6994 break;
6b06314c
JA
6995 case IORING_REGISTER_FILES:
6996 ret = io_sqe_files_register(ctx, arg, nr_args);
6997 break;
6998 case IORING_UNREGISTER_FILES:
6999 ret = -EINVAL;
7000 if (arg || nr_args)
7001 break;
7002 ret = io_sqe_files_unregister(ctx);
7003 break;
c3a31e60
JA
7004 case IORING_REGISTER_FILES_UPDATE:
7005 ret = io_sqe_files_update(ctx, arg, nr_args);
7006 break;
9b402849 7007 case IORING_REGISTER_EVENTFD:
f2842ab5 7008 case IORING_REGISTER_EVENTFD_ASYNC:
9b402849
JA
7009 ret = -EINVAL;
7010 if (nr_args != 1)
7011 break;
7012 ret = io_eventfd_register(ctx, arg);
f2842ab5
JA
7013 if (ret)
7014 break;
7015 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
7016 ctx->eventfd_async = 1;
7017 else
7018 ctx->eventfd_async = 0;
9b402849
JA
7019 break;
7020 case IORING_UNREGISTER_EVENTFD:
7021 ret = -EINVAL;
7022 if (arg || nr_args)
7023 break;
7024 ret = io_eventfd_unregister(ctx);
7025 break;
66f4af93
JA
7026 case IORING_REGISTER_PROBE:
7027 ret = -EINVAL;
7028 if (!arg || nr_args > 256)
7029 break;
7030 ret = io_probe(ctx, arg, nr_args);
7031 break;
071698e1
JA
7032 case IORING_REGISTER_PERSONALITY:
7033 ret = -EINVAL;
7034 if (arg || nr_args)
7035 break;
7036 ret = io_register_personality(ctx);
7037 break;
7038 case IORING_UNREGISTER_PERSONALITY:
7039 ret = -EINVAL;
7040 if (arg)
7041 break;
7042 ret = io_unregister_personality(ctx, nr_args);
7043 break;
edafccee
JA
7044 default:
7045 ret = -EINVAL;
7046 break;
7047 }
7048
071698e1 7049 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 7050 /* bring the ctx back to life */
05f3fb3c 7051 percpu_ref_reinit(&ctx->refs);
c150368b
JA
7052out:
7053 reinit_completion(&ctx->completions[0]);
05f3fb3c 7054 }
edafccee
JA
7055 return ret;
7056}
7057
7058SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
7059 void __user *, arg, unsigned int, nr_args)
7060{
7061 struct io_ring_ctx *ctx;
7062 long ret = -EBADF;
7063 struct fd f;
7064
7065 f = fdget(fd);
7066 if (!f.file)
7067 return -EBADF;
7068
7069 ret = -EOPNOTSUPP;
7070 if (f.file->f_op != &io_uring_fops)
7071 goto out_fput;
7072
7073 ctx = f.file->private_data;
7074
7075 mutex_lock(&ctx->uring_lock);
7076 ret = __io_uring_register(ctx, opcode, arg, nr_args);
7077 mutex_unlock(&ctx->uring_lock);
c826bd7a
DD
7078 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
7079 ctx->cq_ev_fd != NULL, ret);
edafccee
JA
7080out_fput:
7081 fdput(f);
7082 return ret;
7083}
7084
2b188cc1
JA
7085static int __init io_uring_init(void)
7086{
d7f62e82
SM
7087#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
7088 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
7089 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
7090} while (0)
7091
7092#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
7093 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
7094 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
7095 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
7096 BUILD_BUG_SQE_ELEM(1, __u8, flags);
7097 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
7098 BUILD_BUG_SQE_ELEM(4, __s32, fd);
7099 BUILD_BUG_SQE_ELEM(8, __u64, off);
7100 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
7101 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7102 BUILD_BUG_SQE_ELEM(24, __u32, len);
7103 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
7104 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
7105 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
7106 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
7107 BUILD_BUG_SQE_ELEM(28, __u16, poll_events);
7108 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
7109 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
7110 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
7111 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
7112 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
7113 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
7114 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
7115 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7116 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
7117 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
7118 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7119
d3656344 7120 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
2b188cc1
JA
7121 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
7122 return 0;
7123};
7124__initcall(io_uring_init);