io_uring: split poll and poll_remove structs
[linux-block.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
52de1fe1 47#include <net/compat.h>
2b188cc1
JA
48#include <linux/refcount.h>
49#include <linux/uio.h>
6b47ee6e 50#include <linux/bits.h>
2b188cc1
JA
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
2b188cc1
JA
58#include <linux/percpu.h>
59#include <linux/slab.h>
6c271ce2 60#include <linux/kthread.h>
2b188cc1 61#include <linux/blkdev.h>
edafccee 62#include <linux/bvec.h>
2b188cc1
JA
63#include <linux/net.h>
64#include <net/sock.h>
65#include <net/af_unix.h>
6b06314c 66#include <net/scm.h>
2b188cc1
JA
67#include <linux/anon_inodes.h>
68#include <linux/sched/mm.h>
69#include <linux/uaccess.h>
70#include <linux/nospec.h>
edafccee
JA
71#include <linux/sizes.h>
72#include <linux/hugetlb.h>
aa4c3967 73#include <linux/highmem.h>
15b71abe
JA
74#include <linux/namei.h>
75#include <linux/fsnotify.h>
4840e418 76#include <linux/fadvise.h>
3e4827b0 77#include <linux/eventpoll.h>
ff002b30 78#include <linux/fs_struct.h>
7d67af2c 79#include <linux/splice.h>
b41e9852 80#include <linux/task_work.h>
bcf5a063 81#include <linux/pagemap.h>
0f212204 82#include <linux/io_uring.h>
91d8f519 83#include <linux/blk-cgroup.h>
4ea33a97 84#include <linux/audit.h>
2b188cc1 85
c826bd7a
DD
86#define CREATE_TRACE_POINTS
87#include <trace/events/io_uring.h>
88
2b188cc1
JA
89#include <uapi/linux/io_uring.h>
90
91#include "internal.h"
561fb04a 92#include "io-wq.h"
2b188cc1 93
5277deaa 94#define IORING_MAX_ENTRIES 32768
33a107f0 95#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
65e19f54
JA
96
97/*
98 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
99 */
100#define IORING_FILE_TABLE_SHIFT 9
101#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
102#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
103#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
21b55dbc
SG
104#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
105 IORING_REGISTER_LAST + IORING_OP_LAST)
2b188cc1
JA
106
107struct io_uring {
108 u32 head ____cacheline_aligned_in_smp;
109 u32 tail ____cacheline_aligned_in_smp;
110};
111
1e84b97b 112/*
75b28aff
HV
113 * This data is shared with the application through the mmap at offsets
114 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
115 *
116 * The offsets to the member fields are published through struct
117 * io_sqring_offsets when calling io_uring_setup.
118 */
75b28aff 119struct io_rings {
1e84b97b
SB
120 /*
121 * Head and tail offsets into the ring; the offsets need to be
122 * masked to get valid indices.
123 *
75b28aff
HV
124 * The kernel controls head of the sq ring and the tail of the cq ring,
125 * and the application controls tail of the sq ring and the head of the
126 * cq ring.
1e84b97b 127 */
75b28aff 128 struct io_uring sq, cq;
1e84b97b 129 /*
75b28aff 130 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
131 * ring_entries - 1)
132 */
75b28aff
HV
133 u32 sq_ring_mask, cq_ring_mask;
134 /* Ring sizes (constant, power of 2) */
135 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
136 /*
137 * Number of invalid entries dropped by the kernel due to
138 * invalid index stored in array
139 *
140 * Written by the kernel, shouldn't be modified by the
141 * application (i.e. get number of "new events" by comparing to
142 * cached value).
143 *
144 * After a new SQ head value was read by the application this
145 * counter includes all submissions that were dropped reaching
146 * the new SQ head (and possibly more).
147 */
75b28aff 148 u32 sq_dropped;
1e84b97b 149 /*
0d9b5b3a 150 * Runtime SQ flags
1e84b97b
SB
151 *
152 * Written by the kernel, shouldn't be modified by the
153 * application.
154 *
155 * The application needs a full memory barrier before checking
156 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
157 */
75b28aff 158 u32 sq_flags;
0d9b5b3a
SG
159 /*
160 * Runtime CQ flags
161 *
162 * Written by the application, shouldn't be modified by the
163 * kernel.
164 */
165 u32 cq_flags;
1e84b97b
SB
166 /*
167 * Number of completion events lost because the queue was full;
168 * this should be avoided by the application by making sure
0b4295b5 169 * there are not more requests pending than there is space in
1e84b97b
SB
170 * the completion queue.
171 *
172 * Written by the kernel, shouldn't be modified by the
173 * application (i.e. get number of "new events" by comparing to
174 * cached value).
175 *
176 * As completion events come in out of order this counter is not
177 * ordered with any other data.
178 */
75b28aff 179 u32 cq_overflow;
1e84b97b
SB
180 /*
181 * Ring buffer of completion events.
182 *
183 * The kernel writes completion events fresh every time they are
184 * produced, so the application is allowed to modify pending
185 * entries.
186 */
75b28aff 187 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
188};
189
edafccee
JA
190struct io_mapped_ubuf {
191 u64 ubuf;
192 size_t len;
193 struct bio_vec *bvec;
194 unsigned int nr_bvecs;
de293938 195 unsigned long acct_pages;
edafccee
JA
196};
197
65e19f54
JA
198struct fixed_file_table {
199 struct file **files;
31b51510
JA
200};
201
05589553
XW
202struct fixed_file_ref_node {
203 struct percpu_ref refs;
204 struct list_head node;
205 struct list_head file_list;
206 struct fixed_file_data *file_data;
4a38aed2 207 struct llist_node llist;
e297822b 208 bool done;
05589553
XW
209};
210
05f3fb3c
JA
211struct fixed_file_data {
212 struct fixed_file_table *table;
213 struct io_ring_ctx *ctx;
214
b2e96852 215 struct fixed_file_ref_node *node;
05f3fb3c 216 struct percpu_ref refs;
05f3fb3c 217 struct completion done;
05589553
XW
218 struct list_head ref_list;
219 spinlock_t lock;
05f3fb3c
JA
220};
221
5a2e745d
JA
222struct io_buffer {
223 struct list_head list;
224 __u64 addr;
225 __s32 len;
226 __u16 bid;
227};
228
21b55dbc
SG
229struct io_restriction {
230 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
231 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
232 u8 sqe_flags_allowed;
233 u8 sqe_flags_required;
7e84e1c7 234 bool registered;
21b55dbc
SG
235};
236
534ca6d6
JA
237struct io_sq_data {
238 refcount_t refs;
69fb2131
JA
239 struct mutex lock;
240
241 /* ctx's that are using this sqd */
242 struct list_head ctx_list;
243 struct list_head ctx_new_list;
244 struct mutex ctx_lock;
245
534ca6d6
JA
246 struct task_struct *thread;
247 struct wait_queue_head wait;
248};
249
2b188cc1
JA
250struct io_ring_ctx {
251 struct {
252 struct percpu_ref refs;
253 } ____cacheline_aligned_in_smp;
254
255 struct {
256 unsigned int flags;
e1d85334 257 unsigned int compat: 1;
aad5d8da 258 unsigned int limit_mem: 1;
e1d85334
RD
259 unsigned int cq_overflow_flushed: 1;
260 unsigned int drain_next: 1;
261 unsigned int eventfd_async: 1;
21b55dbc 262 unsigned int restricted: 1;
2b188cc1 263
75b28aff
HV
264 /*
265 * Ring buffer of indices into array of io_uring_sqe, which is
266 * mmapped by the application using the IORING_OFF_SQES offset.
267 *
268 * This indirection could e.g. be used to assign fixed
269 * io_uring_sqe entries to operations and only submit them to
270 * the queue when needed.
271 *
272 * The kernel modifies neither the indices array nor the entries
273 * array.
274 */
275 u32 *sq_array;
2b188cc1
JA
276 unsigned cached_sq_head;
277 unsigned sq_entries;
278 unsigned sq_mask;
6c271ce2 279 unsigned sq_thread_idle;
498ccd9e 280 unsigned cached_sq_dropped;
2c3bac6d 281 unsigned cached_cq_overflow;
ad3eb2c8 282 unsigned long sq_check_overflow;
de0617e4
JA
283
284 struct list_head defer_list;
5262f567 285 struct list_head timeout_list;
1d7bb1d5 286 struct list_head cq_overflow_list;
fcb323cc
JA
287
288 wait_queue_head_t inflight_wait;
ad3eb2c8 289 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
290 } ____cacheline_aligned_in_smp;
291
206aefde
JA
292 struct io_rings *rings;
293
2b188cc1 294 /* IO offload */
561fb04a 295 struct io_wq *io_wq;
2aede0e4
JA
296
297 /*
298 * For SQPOLL usage - we hold a reference to the parent task, so we
299 * have access to the ->files
300 */
301 struct task_struct *sqo_task;
302
303 /* Only used for accounting purposes */
304 struct mm_struct *mm_account;
305
91d8f519
DZ
306#ifdef CONFIG_BLK_CGROUP
307 struct cgroup_subsys_state *sqo_blkcg_css;
308#endif
309
534ca6d6
JA
310 struct io_sq_data *sq_data; /* if using sq thread polling */
311
90554200 312 struct wait_queue_head sqo_sq_wait;
6a779382 313 struct wait_queue_entry sqo_wait_entry;
69fb2131 314 struct list_head sqd_list;
75b28aff 315
6b06314c
JA
316 /*
317 * If used, fixed file set. Writers must ensure that ->refs is dead,
318 * readers must ensure that ->refs is alive as long as the file* is
319 * used. Only updated through io_uring_register(2).
320 */
05f3fb3c 321 struct fixed_file_data *file_data;
6b06314c
JA
322 unsigned nr_user_files;
323
edafccee
JA
324 /* if used, fixed mapped user buffers */
325 unsigned nr_user_bufs;
326 struct io_mapped_ubuf *user_bufs;
327
2b188cc1
JA
328 struct user_struct *user;
329
0b8c0ec7 330 const struct cred *creds;
181e448d 331
4ea33a97
JA
332#ifdef CONFIG_AUDIT
333 kuid_t loginuid;
334 unsigned int sessionid;
335#endif
336
0f158b4c
JA
337 struct completion ref_comp;
338 struct completion sq_thread_comp;
206aefde 339
0ddf92e8
JA
340 /* if all else fails... */
341 struct io_kiocb *fallback_req;
342
206aefde
JA
343#if defined(CONFIG_UNIX)
344 struct socket *ring_sock;
345#endif
346
5a2e745d
JA
347 struct idr io_buffer_idr;
348
071698e1
JA
349 struct idr personality_idr;
350
206aefde
JA
351 struct {
352 unsigned cached_cq_tail;
353 unsigned cq_entries;
354 unsigned cq_mask;
355 atomic_t cq_timeouts;
ad3eb2c8 356 unsigned long cq_check_overflow;
206aefde
JA
357 struct wait_queue_head cq_wait;
358 struct fasync_struct *cq_fasync;
359 struct eventfd_ctx *cq_ev_fd;
360 } ____cacheline_aligned_in_smp;
2b188cc1
JA
361
362 struct {
363 struct mutex uring_lock;
364 wait_queue_head_t wait;
365 } ____cacheline_aligned_in_smp;
366
367 struct {
368 spinlock_t completion_lock;
e94f141b 369
def596e9 370 /*
540e32a0 371 * ->iopoll_list is protected by the ctx->uring_lock for
def596e9
JA
372 * io_uring instances that don't use IORING_SETUP_SQPOLL.
373 * For SQPOLL, only the single threaded io_sq_thread() will
374 * manipulate the list, hence no extra locking is needed there.
375 */
540e32a0 376 struct list_head iopoll_list;
78076bb6
JA
377 struct hlist_head *cancel_hash;
378 unsigned cancel_hash_bits;
e94f141b 379 bool poll_multi_file;
31b51510 380
fcb323cc
JA
381 spinlock_t inflight_lock;
382 struct list_head inflight_list;
2b188cc1 383 } ____cacheline_aligned_in_smp;
85faa7b8 384
4a38aed2
JA
385 struct delayed_work file_put_work;
386 struct llist_head file_put_llist;
387
85faa7b8 388 struct work_struct exit_work;
21b55dbc 389 struct io_restriction restrictions;
2b188cc1
JA
390};
391
09bb8394
JA
392/*
393 * First field must be the file pointer in all the
394 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
395 */
221c5eb2
JA
396struct io_poll_iocb {
397 struct file *file;
018043be 398 struct wait_queue_head *head;
221c5eb2 399 __poll_t events;
8c838788 400 bool done;
221c5eb2 401 bool canceled;
392edb45 402 struct wait_queue_entry wait;
221c5eb2
JA
403};
404
018043be
PB
405struct io_poll_remove {
406 struct file *file;
407 u64 addr;
408};
409
b5dba59e
JA
410struct io_close {
411 struct file *file;
412 struct file *put_file;
413 int fd;
414};
415
ad8a48ac
JA
416struct io_timeout_data {
417 struct io_kiocb *req;
418 struct hrtimer timer;
419 struct timespec64 ts;
420 enum hrtimer_mode mode;
421};
422
8ed8d3c3
JA
423struct io_accept {
424 struct file *file;
425 struct sockaddr __user *addr;
426 int __user *addr_len;
427 int flags;
09952e3e 428 unsigned long nofile;
8ed8d3c3
JA
429};
430
431struct io_sync {
432 struct file *file;
433 loff_t len;
434 loff_t off;
435 int flags;
d63d1b5e 436 int mode;
8ed8d3c3
JA
437};
438
fbf23849
JA
439struct io_cancel {
440 struct file *file;
441 u64 addr;
442};
443
b29472ee
JA
444struct io_timeout {
445 struct file *file;
bfe68a22
PB
446 u32 off;
447 u32 target_seq;
135fcde8 448 struct list_head list;
b29472ee
JA
449};
450
0bdf7a2d
PB
451struct io_timeout_rem {
452 struct file *file;
453 u64 addr;
454};
455
9adbd45d
JA
456struct io_rw {
457 /* NOTE: kiocb has the file as the first member, so don't do it here */
458 struct kiocb kiocb;
459 u64 addr;
460 u64 len;
461};
462
3fbb51c1
JA
463struct io_connect {
464 struct file *file;
465 struct sockaddr __user *addr;
466 int addr_len;
467};
468
e47293fd
JA
469struct io_sr_msg {
470 struct file *file;
fddaface 471 union {
270a5940 472 struct user_msghdr __user *umsg;
fddaface
JA
473 void __user *buf;
474 };
e47293fd 475 int msg_flags;
bcda7baa 476 int bgid;
fddaface 477 size_t len;
bcda7baa 478 struct io_buffer *kbuf;
e47293fd
JA
479};
480
15b71abe
JA
481struct io_open {
482 struct file *file;
483 int dfd;
944d1444 484 bool ignore_nonblock;
15b71abe 485 struct filename *filename;
c12cedf2 486 struct open_how how;
4022e7af 487 unsigned long nofile;
15b71abe
JA
488};
489
05f3fb3c
JA
490struct io_files_update {
491 struct file *file;
492 u64 arg;
493 u32 nr_args;
494 u32 offset;
495};
496
4840e418
JA
497struct io_fadvise {
498 struct file *file;
499 u64 offset;
500 u32 len;
501 u32 advice;
502};
503
c1ca757b
JA
504struct io_madvise {
505 struct file *file;
506 u64 addr;
507 u32 len;
508 u32 advice;
509};
510
3e4827b0
JA
511struct io_epoll {
512 struct file *file;
513 int epfd;
514 int op;
515 int fd;
516 struct epoll_event event;
e47293fd
JA
517};
518
7d67af2c
PB
519struct io_splice {
520 struct file *file_out;
521 struct file *file_in;
522 loff_t off_out;
523 loff_t off_in;
524 u64 len;
525 unsigned int flags;
526};
527
ddf0322d
JA
528struct io_provide_buf {
529 struct file *file;
530 __u64 addr;
531 __s32 len;
532 __u32 bgid;
533 __u16 nbufs;
534 __u16 bid;
535};
536
1d9e1288
BM
537struct io_statx {
538 struct file *file;
539 int dfd;
540 unsigned int mask;
541 unsigned int flags;
e62753e4 542 const char __user *filename;
1d9e1288
BM
543 struct statx __user *buffer;
544};
545
36f4fa68
JA
546struct io_shutdown {
547 struct file *file;
548 int how;
549};
550
80a261fd
JA
551struct io_rename {
552 struct file *file;
553 int old_dfd;
554 int new_dfd;
555 struct filename *oldpath;
556 struct filename *newpath;
557 int flags;
558};
559
14a1143b
JA
560struct io_unlink {
561 struct file *file;
562 int dfd;
563 int flags;
564 struct filename *filename;
565};
566
3ca405eb
PB
567struct io_completion {
568 struct file *file;
569 struct list_head list;
0f7e466b 570 int cflags;
3ca405eb
PB
571};
572
f499a021
JA
573struct io_async_connect {
574 struct sockaddr_storage address;
575};
576
03b1230c
JA
577struct io_async_msghdr {
578 struct iovec fast_iov[UIO_FASTIOV];
579 struct iovec *iov;
580 struct sockaddr __user *uaddr;
581 struct msghdr msg;
b537916c 582 struct sockaddr_storage addr;
03b1230c
JA
583};
584
f67676d1
JA
585struct io_async_rw {
586 struct iovec fast_iov[UIO_FASTIOV];
ff6165b2
JA
587 const struct iovec *free_iovec;
588 struct iov_iter iter;
227c0c96 589 size_t bytes_done;
bcf5a063 590 struct wait_page_queue wpq;
f67676d1
JA
591};
592
6b47ee6e
PB
593enum {
594 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
595 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
596 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
597 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
598 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
bcda7baa 599 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
6b47ee6e 600
dea3b49c 601 REQ_F_LINK_HEAD_BIT,
6b47ee6e
PB
602 REQ_F_FAIL_LINK_BIT,
603 REQ_F_INFLIGHT_BIT,
604 REQ_F_CUR_POS_BIT,
605 REQ_F_NOWAIT_BIT,
6b47ee6e 606 REQ_F_LINK_TIMEOUT_BIT,
6b47ee6e 607 REQ_F_ISREG_BIT,
99bc4c38 608 REQ_F_NEED_CLEANUP_BIT,
d7718a9d 609 REQ_F_POLLED_BIT,
bcda7baa 610 REQ_F_BUFFER_SELECTED_BIT,
5b0bbee4 611 REQ_F_NO_FILE_TABLE_BIT,
7cdaf587 612 REQ_F_WORK_INITIALIZED_BIT,
900fad45 613 REQ_F_LTIMEOUT_ACTIVE_BIT,
84557871
JA
614
615 /* not a real bit, just to check we're not overflowing the space */
616 __REQ_F_LAST_BIT,
6b47ee6e
PB
617};
618
619enum {
620 /* ctx owns file */
621 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
622 /* drain existing IO first */
623 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
624 /* linked sqes */
625 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
626 /* doesn't sever on completion < 0 */
627 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
628 /* IOSQE_ASYNC */
629 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
bcda7baa
JA
630 /* IOSQE_BUFFER_SELECT */
631 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
6b47ee6e 632
dea3b49c
PB
633 /* head of a link */
634 REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT),
6b47ee6e
PB
635 /* fail rest of links */
636 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
637 /* on inflight list */
638 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
639 /* read/write uses file position */
640 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
641 /* must not punt to workers */
642 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
900fad45 643 /* has or had linked timeout */
6b47ee6e 644 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
6b47ee6e
PB
645 /* regular file */
646 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
99bc4c38
PB
647 /* needs cleanup */
648 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
d7718a9d
JA
649 /* already went through poll handler */
650 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
bcda7baa
JA
651 /* buffer already selected */
652 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
5b0bbee4
JA
653 /* doesn't need file table for this request */
654 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
7cdaf587
XW
655 /* io_wq_work is initialized */
656 REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
900fad45
PB
657 /* linked timeout is active, i.e. prepared by link's head */
658 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
d7718a9d
JA
659};
660
661struct async_poll {
662 struct io_poll_iocb poll;
807abcb0 663 struct io_poll_iocb *double_poll;
6b47ee6e
PB
664};
665
09bb8394
JA
666/*
667 * NOTE! Each of the iocb union members has the file pointer
668 * as the first entry in their struct definition. So you can
669 * access the file pointer through any of the sub-structs,
670 * or directly as just 'ki_filp' in this struct.
671 */
2b188cc1 672struct io_kiocb {
221c5eb2 673 union {
09bb8394 674 struct file *file;
9adbd45d 675 struct io_rw rw;
221c5eb2 676 struct io_poll_iocb poll;
018043be 677 struct io_poll_remove poll_remove;
8ed8d3c3
JA
678 struct io_accept accept;
679 struct io_sync sync;
fbf23849 680 struct io_cancel cancel;
b29472ee 681 struct io_timeout timeout;
0bdf7a2d 682 struct io_timeout_rem timeout_rem;
3fbb51c1 683 struct io_connect connect;
e47293fd 684 struct io_sr_msg sr_msg;
15b71abe 685 struct io_open open;
b5dba59e 686 struct io_close close;
05f3fb3c 687 struct io_files_update files_update;
4840e418 688 struct io_fadvise fadvise;
c1ca757b 689 struct io_madvise madvise;
3e4827b0 690 struct io_epoll epoll;
7d67af2c 691 struct io_splice splice;
ddf0322d 692 struct io_provide_buf pbuf;
1d9e1288 693 struct io_statx statx;
36f4fa68 694 struct io_shutdown shutdown;
80a261fd 695 struct io_rename rename;
14a1143b 696 struct io_unlink unlink;
3ca405eb
PB
697 /* use only after cleaning per-op data, see io_clean_op() */
698 struct io_completion compl;
221c5eb2 699 };
2b188cc1 700
e8c2bc1f
JA
701 /* opcode allocated if it needs to store data for async defer */
702 void *async_data;
d625c6ee 703 u8 opcode;
65a6543d
XW
704 /* polled IO has completed */
705 u8 iopoll_completed;
2b188cc1 706
4f4eeba8 707 u16 buf_index;
9cf7c104 708 u32 result;
4f4eeba8 709
010e8e6b
PB
710 struct io_ring_ctx *ctx;
711 unsigned int flags;
712 refcount_t refs;
713 struct task_struct *task;
714 u64 user_data;
d7718a9d 715
010e8e6b 716 struct list_head link_list;
fcb323cc 717
d21ffe7e
PB
718 /*
719 * 1. used with ctx->iopoll_list with reads/writes
720 * 2. to track reqs with ->files (see io_op_def::file_table)
721 */
010e8e6b
PB
722 struct list_head inflight_entry;
723
724 struct percpu_ref *fixed_file_refs;
725 struct callback_head task_work;
726 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
727 struct hlist_node hash_node;
728 struct async_poll *apoll;
729 struct io_wq_work work;
2b188cc1 730};
05589553 731
27dc8338
PB
732struct io_defer_entry {
733 struct list_head list;
734 struct io_kiocb *req;
9cf7c104 735 u32 seq;
2b188cc1
JA
736};
737
def596e9 738#define IO_IOPOLL_BATCH 8
2b188cc1 739
013538bd
JA
740struct io_comp_state {
741 unsigned int nr;
742 struct list_head list;
743 struct io_ring_ctx *ctx;
744};
745
9a56a232
JA
746struct io_submit_state {
747 struct blk_plug plug;
748
2579f913
JA
749 /*
750 * io_kiocb alloc cache
751 */
752 void *reqs[IO_IOPOLL_BATCH];
6c8a3134 753 unsigned int free_reqs;
2579f913 754
013538bd
JA
755 /*
756 * Batch completion logic
757 */
758 struct io_comp_state comp;
759
9a56a232
JA
760 /*
761 * File reference cache
762 */
763 struct file *file;
764 unsigned int fd;
765 unsigned int has_refs;
9a56a232
JA
766 unsigned int ios_left;
767};
768
d3656344 769struct io_op_def {
d3656344
JA
770 /* needs req->file assigned */
771 unsigned needs_file : 1;
fd2206e4
JA
772 /* don't fail if file grab fails */
773 unsigned needs_file_no_error : 1;
d3656344
JA
774 /* hash wq insertion if file is a regular file */
775 unsigned hash_reg_file : 1;
776 /* unbound wq insertion if file is a non-regular file */
777 unsigned unbound_nonreg_file : 1;
66f4af93
JA
778 /* opcode is not supported by this kernel */
779 unsigned not_supported : 1;
8a72758c
JA
780 /* set if opcode supports polled "wait" */
781 unsigned pollin : 1;
782 unsigned pollout : 1;
bcda7baa
JA
783 /* op supports buffer selection */
784 unsigned buffer_select : 1;
e8c2bc1f
JA
785 /* must always have async data allocated */
786 unsigned needs_async_data : 1;
787 /* size of async data needed, if any */
788 unsigned short async_size;
0f203765 789 unsigned work_flags;
d3656344
JA
790};
791
0918682b 792static const struct io_op_def io_op_defs[] = {
0463b6c5
PB
793 [IORING_OP_NOP] = {},
794 [IORING_OP_READV] = {
d3656344
JA
795 .needs_file = 1,
796 .unbound_nonreg_file = 1,
8a72758c 797 .pollin = 1,
4d954c25 798 .buffer_select = 1,
e8c2bc1f
JA
799 .needs_async_data = 1,
800 .async_size = sizeof(struct io_async_rw),
0f203765 801 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
d3656344 802 },
0463b6c5 803 [IORING_OP_WRITEV] = {
d3656344
JA
804 .needs_file = 1,
805 .hash_reg_file = 1,
806 .unbound_nonreg_file = 1,
8a72758c 807 .pollout = 1,
e8c2bc1f
JA
808 .needs_async_data = 1,
809 .async_size = sizeof(struct io_async_rw),
69228338
JA
810 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
811 IO_WQ_WORK_FSIZE,
d3656344 812 },
0463b6c5 813 [IORING_OP_FSYNC] = {
d3656344 814 .needs_file = 1,
0f203765 815 .work_flags = IO_WQ_WORK_BLKCG,
d3656344 816 },
0463b6c5 817 [IORING_OP_READ_FIXED] = {
d3656344
JA
818 .needs_file = 1,
819 .unbound_nonreg_file = 1,
8a72758c 820 .pollin = 1,
e8c2bc1f 821 .async_size = sizeof(struct io_async_rw),
4017eb91 822 .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
d3656344 823 },
0463b6c5 824 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
825 .needs_file = 1,
826 .hash_reg_file = 1,
827 .unbound_nonreg_file = 1,
8a72758c 828 .pollout = 1,
e8c2bc1f 829 .async_size = sizeof(struct io_async_rw),
4017eb91
JA
830 .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE |
831 IO_WQ_WORK_MM,
d3656344 832 },
0463b6c5 833 [IORING_OP_POLL_ADD] = {
d3656344
JA
834 .needs_file = 1,
835 .unbound_nonreg_file = 1,
836 },
0463b6c5
PB
837 [IORING_OP_POLL_REMOVE] = {},
838 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344 839 .needs_file = 1,
0f203765 840 .work_flags = IO_WQ_WORK_BLKCG,
d3656344 841 },
0463b6c5 842 [IORING_OP_SENDMSG] = {
d3656344
JA
843 .needs_file = 1,
844 .unbound_nonreg_file = 1,
8a72758c 845 .pollout = 1,
e8c2bc1f
JA
846 .needs_async_data = 1,
847 .async_size = sizeof(struct io_async_msghdr),
0f203765
JA
848 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
849 IO_WQ_WORK_FS,
d3656344 850 },
0463b6c5 851 [IORING_OP_RECVMSG] = {
d3656344
JA
852 .needs_file = 1,
853 .unbound_nonreg_file = 1,
8a72758c 854 .pollin = 1,
52de1fe1 855 .buffer_select = 1,
e8c2bc1f
JA
856 .needs_async_data = 1,
857 .async_size = sizeof(struct io_async_msghdr),
0f203765
JA
858 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
859 IO_WQ_WORK_FS,
d3656344 860 },
0463b6c5 861 [IORING_OP_TIMEOUT] = {
e8c2bc1f
JA
862 .needs_async_data = 1,
863 .async_size = sizeof(struct io_timeout_data),
0f203765 864 .work_flags = IO_WQ_WORK_MM,
d3656344 865 },
0463b6c5
PB
866 [IORING_OP_TIMEOUT_REMOVE] = {},
867 [IORING_OP_ACCEPT] = {
d3656344
JA
868 .needs_file = 1,
869 .unbound_nonreg_file = 1,
8a72758c 870 .pollin = 1,
0f203765 871 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES,
d3656344 872 },
0463b6c5
PB
873 [IORING_OP_ASYNC_CANCEL] = {},
874 [IORING_OP_LINK_TIMEOUT] = {
e8c2bc1f
JA
875 .needs_async_data = 1,
876 .async_size = sizeof(struct io_timeout_data),
0f203765 877 .work_flags = IO_WQ_WORK_MM,
d3656344 878 },
0463b6c5 879 [IORING_OP_CONNECT] = {
d3656344
JA
880 .needs_file = 1,
881 .unbound_nonreg_file = 1,
8a72758c 882 .pollout = 1,
e8c2bc1f
JA
883 .needs_async_data = 1,
884 .async_size = sizeof(struct io_async_connect),
0f203765 885 .work_flags = IO_WQ_WORK_MM,
d3656344 886 },
0463b6c5 887 [IORING_OP_FALLOCATE] = {
d3656344 888 .needs_file = 1,
69228338 889 .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE,
d3656344 890 },
0463b6c5 891 [IORING_OP_OPENAT] = {
0f203765 892 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG |
14587a46 893 IO_WQ_WORK_FS | IO_WQ_WORK_MM,
d3656344 894 },
0463b6c5 895 [IORING_OP_CLOSE] = {
fd2206e4
JA
896 .needs_file = 1,
897 .needs_file_no_error = 1,
0f203765 898 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG,
d3656344 899 },
0463b6c5 900 [IORING_OP_FILES_UPDATE] = {
0f203765 901 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM,
d3656344 902 },
0463b6c5 903 [IORING_OP_STATX] = {
0f203765
JA
904 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM |
905 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
d3656344 906 },
0463b6c5 907 [IORING_OP_READ] = {
3a6820f2
JA
908 .needs_file = 1,
909 .unbound_nonreg_file = 1,
8a72758c 910 .pollin = 1,
bcda7baa 911 .buffer_select = 1,
e8c2bc1f 912 .async_size = sizeof(struct io_async_rw),
0f203765 913 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
3a6820f2 914 },
0463b6c5 915 [IORING_OP_WRITE] = {
3a6820f2
JA
916 .needs_file = 1,
917 .unbound_nonreg_file = 1,
8a72758c 918 .pollout = 1,
e8c2bc1f 919 .async_size = sizeof(struct io_async_rw),
69228338
JA
920 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
921 IO_WQ_WORK_FSIZE,
3a6820f2 922 },
0463b6c5 923 [IORING_OP_FADVISE] = {
4840e418 924 .needs_file = 1,
0f203765 925 .work_flags = IO_WQ_WORK_BLKCG,
4840e418 926 },
0463b6c5 927 [IORING_OP_MADVISE] = {
0f203765 928 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
c1ca757b 929 },
0463b6c5 930 [IORING_OP_SEND] = {
fddaface
JA
931 .needs_file = 1,
932 .unbound_nonreg_file = 1,
8a72758c 933 .pollout = 1,
0f203765 934 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
fddaface 935 },
0463b6c5 936 [IORING_OP_RECV] = {
fddaface
JA
937 .needs_file = 1,
938 .unbound_nonreg_file = 1,
8a72758c 939 .pollin = 1,
bcda7baa 940 .buffer_select = 1,
0f203765 941 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
fddaface 942 },
0463b6c5 943 [IORING_OP_OPENAT2] = {
0f203765 944 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_FS |
14587a46 945 IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
cebdb986 946 },
3e4827b0
JA
947 [IORING_OP_EPOLL_CTL] = {
948 .unbound_nonreg_file = 1,
0f203765 949 .work_flags = IO_WQ_WORK_FILES,
3e4827b0 950 },
7d67af2c
PB
951 [IORING_OP_SPLICE] = {
952 .needs_file = 1,
953 .hash_reg_file = 1,
954 .unbound_nonreg_file = 1,
0f203765 955 .work_flags = IO_WQ_WORK_BLKCG,
ddf0322d
JA
956 },
957 [IORING_OP_PROVIDE_BUFFERS] = {},
067524e9 958 [IORING_OP_REMOVE_BUFFERS] = {},
f2a8d5c7
PB
959 [IORING_OP_TEE] = {
960 .needs_file = 1,
961 .hash_reg_file = 1,
962 .unbound_nonreg_file = 1,
963 },
36f4fa68
JA
964 [IORING_OP_SHUTDOWN] = {
965 .needs_file = 1,
966 },
80a261fd
JA
967 [IORING_OP_RENAMEAT] = {
968 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES |
969 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
970 },
14a1143b
JA
971 [IORING_OP_UNLINKAT] = {
972 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES |
973 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
974 },
d3656344
JA
975};
976
2e0464d4
BM
977enum io_mem_account {
978 ACCT_LOCKED,
979 ACCT_PINNED,
980};
981
81b68a5c
PB
982static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
983 struct io_comp_state *cs);
78e19bbe 984static void io_cqring_fill_event(struct io_kiocb *req, long res);
ec9c02ad 985static void io_put_req(struct io_kiocb *req);
216578e5 986static void io_put_req_deferred(struct io_kiocb *req, int nr);
c40f6379 987static void io_double_put_req(struct io_kiocb *req);
94ae5e77 988static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
7271ef3a 989static void __io_queue_linked_timeout(struct io_kiocb *req);
94ae5e77 990static void io_queue_linked_timeout(struct io_kiocb *req);
05f3fb3c
JA
991static int __io_sqe_files_update(struct io_ring_ctx *ctx,
992 struct io_uring_files_update *ip,
993 unsigned nr_args);
3ca405eb 994static void __io_clean_op(struct io_kiocb *req);
8371adf5
PB
995static struct file *io_file_get(struct io_submit_state *state,
996 struct io_kiocb *req, int fd, bool fixed);
c1379e24 997static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs);
4349f30e 998static void io_file_put_work(struct work_struct *work);
de0617e4 999
b63534c4
JA
1000static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
1001 struct iovec **iovec, struct iov_iter *iter,
1002 bool needs_lock);
ff6165b2
JA
1003static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
1004 const struct iovec *fast_iov,
227c0c96 1005 struct iov_iter *iter, bool force);
de0617e4 1006
2b188cc1
JA
1007static struct kmem_cache *req_cachep;
1008
0918682b 1009static const struct file_operations io_uring_fops;
2b188cc1
JA
1010
1011struct sock *io_uring_get_socket(struct file *file)
1012{
1013#if defined(CONFIG_UNIX)
1014 if (file->f_op == &io_uring_fops) {
1015 struct io_ring_ctx *ctx = file->private_data;
1016
1017 return ctx->ring_sock->sk;
1018 }
1019#endif
1020 return NULL;
1021}
1022EXPORT_SYMBOL(io_uring_get_socket);
1023
3ca405eb
PB
1024static inline void io_clean_op(struct io_kiocb *req)
1025{
bb175342
PB
1026 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
1027 REQ_F_INFLIGHT))
3ca405eb
PB
1028 __io_clean_op(req);
1029}
1030
28cea78a 1031static void io_sq_thread_drop_mm_files(void)
c40f6379 1032{
28cea78a 1033 struct files_struct *files = current->files;
c40f6379
JA
1034 struct mm_struct *mm = current->mm;
1035
1036 if (mm) {
1037 kthread_unuse_mm(mm);
1038 mmput(mm);
4b70cf9d 1039 current->mm = NULL;
c40f6379 1040 }
28cea78a
JA
1041 if (files) {
1042 struct nsproxy *nsproxy = current->nsproxy;
1043
1044 task_lock(current);
1045 current->files = NULL;
1046 current->nsproxy = NULL;
1047 task_unlock(current);
1048 put_files_struct(files);
1049 put_nsproxy(nsproxy);
1050 }
1051}
1052
1053static void __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
1054{
1055 if (!current->files) {
1056 struct files_struct *files;
1057 struct nsproxy *nsproxy;
1058
1059 task_lock(ctx->sqo_task);
1060 files = ctx->sqo_task->files;
1061 if (!files) {
1062 task_unlock(ctx->sqo_task);
1063 return;
1064 }
1065 atomic_inc(&files->count);
1066 get_nsproxy(ctx->sqo_task->nsproxy);
1067 nsproxy = ctx->sqo_task->nsproxy;
1068 task_unlock(ctx->sqo_task);
1069
1070 task_lock(current);
1071 current->files = files;
1072 current->nsproxy = nsproxy;
1073 task_unlock(current);
1074 }
c40f6379
JA
1075}
1076
1077static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
1078{
4b70cf9d
JA
1079 struct mm_struct *mm;
1080
1081 if (current->mm)
1082 return 0;
1083
1084 /* Should never happen */
1085 if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL)))
1086 return -EFAULT;
1087
1088 task_lock(ctx->sqo_task);
1089 mm = ctx->sqo_task->mm;
1090 if (unlikely(!mm || !mmget_not_zero(mm)))
1091 mm = NULL;
1092 task_unlock(ctx->sqo_task);
1093
1094 if (mm) {
1095 kthread_use_mm(mm);
1096 return 0;
c40f6379
JA
1097 }
1098
4b70cf9d 1099 return -EFAULT;
c40f6379
JA
1100}
1101
28cea78a
JA
1102static int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
1103 struct io_kiocb *req)
c40f6379 1104{
28cea78a
JA
1105 const struct io_op_def *def = &io_op_defs[req->opcode];
1106
1107 if (def->work_flags & IO_WQ_WORK_MM) {
1108 int ret = __io_sq_thread_acquire_mm(ctx);
1109 if (unlikely(ret))
1110 return ret;
1111 }
1112
1113 if (def->needs_file || (def->work_flags & IO_WQ_WORK_FILES))
1114 __io_sq_thread_acquire_files(ctx);
1115
1116 return 0;
c40f6379
JA
1117}
1118
91d8f519
DZ
1119static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
1120 struct cgroup_subsys_state **cur_css)
1121
1122{
1123#ifdef CONFIG_BLK_CGROUP
1124 /* puts the old one when swapping */
1125 if (*cur_css != ctx->sqo_blkcg_css) {
1126 kthread_associate_blkcg(ctx->sqo_blkcg_css);
1127 *cur_css = ctx->sqo_blkcg_css;
1128 }
1129#endif
1130}
1131
1132static void io_sq_thread_unassociate_blkcg(void)
1133{
1134#ifdef CONFIG_BLK_CGROUP
1135 kthread_associate_blkcg(NULL);
1136#endif
1137}
1138
c40f6379
JA
1139static inline void req_set_fail_links(struct io_kiocb *req)
1140{
1141 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1142 req->flags |= REQ_F_FAIL_LINK;
1143}
4a38aed2 1144
1e6fa521
JA
1145/*
1146 * None of these are dereferenced, they are simply used to check if any of
1147 * them have changed. If we're under current and check they are still the
1148 * same, we're fine to grab references to them for actual out-of-line use.
1149 */
1150static void io_init_identity(struct io_identity *id)
1151{
1152 id->files = current->files;
1153 id->mm = current->mm;
1154#ifdef CONFIG_BLK_CGROUP
1155 rcu_read_lock();
1156 id->blkcg_css = blkcg_css();
1157 rcu_read_unlock();
1158#endif
1159 id->creds = current_cred();
1160 id->nsproxy = current->nsproxy;
1161 id->fs = current->fs;
1162 id->fsize = rlimit(RLIMIT_FSIZE);
4ea33a97
JA
1163#ifdef CONFIG_AUDIT
1164 id->loginuid = current->loginuid;
1165 id->sessionid = current->sessionid;
1166#endif
1e6fa521
JA
1167 refcount_set(&id->count, 1);
1168}
1169
ec99ca6c
PB
1170static inline void __io_req_init_async(struct io_kiocb *req)
1171{
1172 memset(&req->work, 0, sizeof(req->work));
1173 req->flags |= REQ_F_WORK_INITIALIZED;
1174}
1175
7cdaf587
XW
1176/*
1177 * Note: must call io_req_init_async() for the first time you
1178 * touch any members of io_wq_work.
1179 */
1180static inline void io_req_init_async(struct io_kiocb *req)
1181{
500a373d
JA
1182 struct io_uring_task *tctx = current->io_uring;
1183
7cdaf587
XW
1184 if (req->flags & REQ_F_WORK_INITIALIZED)
1185 return;
1186
ec99ca6c 1187 __io_req_init_async(req);
500a373d
JA
1188
1189 /* Grab a ref if this isn't our static identity */
1190 req->work.identity = tctx->identity;
1191 if (tctx->identity != &tctx->__identity)
1192 refcount_inc(&req->work.identity->count);
7cdaf587
XW
1193}
1194
0cdaf760
PB
1195static inline bool io_async_submit(struct io_ring_ctx *ctx)
1196{
1197 return ctx->flags & IORING_SETUP_SQPOLL;
1198}
1199
2b188cc1
JA
1200static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1201{
1202 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1203
0f158b4c 1204 complete(&ctx->ref_comp);
2b188cc1
JA
1205}
1206
8eb7e2d0
PB
1207static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1208{
1209 return !req->timeout.off;
1210}
1211
2b188cc1
JA
1212static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1213{
1214 struct io_ring_ctx *ctx;
78076bb6 1215 int hash_bits;
2b188cc1
JA
1216
1217 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1218 if (!ctx)
1219 return NULL;
1220
0ddf92e8
JA
1221 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
1222 if (!ctx->fallback_req)
1223 goto err;
1224
78076bb6
JA
1225 /*
1226 * Use 5 bits less than the max cq entries, that should give us around
1227 * 32 entries per hash list if totally full and uniformly spread.
1228 */
1229 hash_bits = ilog2(p->cq_entries);
1230 hash_bits -= 5;
1231 if (hash_bits <= 0)
1232 hash_bits = 1;
1233 ctx->cancel_hash_bits = hash_bits;
1234 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1235 GFP_KERNEL);
1236 if (!ctx->cancel_hash)
1237 goto err;
1238 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1239
21482896 1240 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
1241 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1242 goto err;
2b188cc1
JA
1243
1244 ctx->flags = p->flags;
90554200 1245 init_waitqueue_head(&ctx->sqo_sq_wait);
69fb2131 1246 INIT_LIST_HEAD(&ctx->sqd_list);
2b188cc1 1247 init_waitqueue_head(&ctx->cq_wait);
1d7bb1d5 1248 INIT_LIST_HEAD(&ctx->cq_overflow_list);
0f158b4c
JA
1249 init_completion(&ctx->ref_comp);
1250 init_completion(&ctx->sq_thread_comp);
5a2e745d 1251 idr_init(&ctx->io_buffer_idr);
071698e1 1252 idr_init(&ctx->personality_idr);
2b188cc1
JA
1253 mutex_init(&ctx->uring_lock);
1254 init_waitqueue_head(&ctx->wait);
1255 spin_lock_init(&ctx->completion_lock);
540e32a0 1256 INIT_LIST_HEAD(&ctx->iopoll_list);
de0617e4 1257 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 1258 INIT_LIST_HEAD(&ctx->timeout_list);
fcb323cc
JA
1259 init_waitqueue_head(&ctx->inflight_wait);
1260 spin_lock_init(&ctx->inflight_lock);
1261 INIT_LIST_HEAD(&ctx->inflight_list);
4a38aed2
JA
1262 INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
1263 init_llist_head(&ctx->file_put_llist);
2b188cc1 1264 return ctx;
206aefde 1265err:
0ddf92e8
JA
1266 if (ctx->fallback_req)
1267 kmem_cache_free(req_cachep, ctx->fallback_req);
78076bb6 1268 kfree(ctx->cancel_hash);
206aefde
JA
1269 kfree(ctx);
1270 return NULL;
2b188cc1
JA
1271}
1272
9cf7c104 1273static bool req_need_defer(struct io_kiocb *req, u32 seq)
7adf4eaf 1274{
2bc9930e
JA
1275 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1276 struct io_ring_ctx *ctx = req->ctx;
a197f664 1277
9cf7c104 1278 return seq != ctx->cached_cq_tail
2c3bac6d 1279 + READ_ONCE(ctx->cached_cq_overflow);
2bc9930e 1280 }
de0617e4 1281
9d858b21 1282 return false;
de0617e4
JA
1283}
1284
de0617e4 1285static void __io_commit_cqring(struct io_ring_ctx *ctx)
2b188cc1 1286{
75b28aff 1287 struct io_rings *rings = ctx->rings;
2b188cc1 1288
07910158
PB
1289 /* order cqe stores with ring update */
1290 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
2b188cc1 1291
07910158
PB
1292 if (wq_has_sleeper(&ctx->cq_wait)) {
1293 wake_up_interruptible(&ctx->cq_wait);
1294 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
2b188cc1
JA
1295 }
1296}
1297
5c3462cf 1298static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
1e6fa521 1299{
500a373d 1300 if (req->work.identity == &tctx->__identity)
1e6fa521
JA
1301 return;
1302 if (refcount_dec_and_test(&req->work.identity->count))
1303 kfree(req->work.identity);
1304}
1305
4edf20f9 1306static void io_req_clean_work(struct io_kiocb *req)
18d9be1a 1307{
7cdaf587 1308 if (!(req->flags & REQ_F_WORK_INITIALIZED))
4edf20f9 1309 return;
51a4cc11
JA
1310
1311 req->flags &= ~REQ_F_WORK_INITIALIZED;
7cdaf587 1312
dfead8a8 1313 if (req->work.flags & IO_WQ_WORK_MM) {
98447d65 1314 mmdrop(req->work.identity->mm);
dfead8a8 1315 req->work.flags &= ~IO_WQ_WORK_MM;
cccf0ee8 1316 }
91d8f519 1317#ifdef CONFIG_BLK_CGROUP
dfead8a8 1318 if (req->work.flags & IO_WQ_WORK_BLKCG) {
98447d65 1319 css_put(req->work.identity->blkcg_css);
dfead8a8
JA
1320 req->work.flags &= ~IO_WQ_WORK_BLKCG;
1321 }
91d8f519 1322#endif
dfead8a8 1323 if (req->work.flags & IO_WQ_WORK_CREDS) {
98447d65 1324 put_cred(req->work.identity->creds);
dfead8a8 1325 req->work.flags &= ~IO_WQ_WORK_CREDS;
cccf0ee8 1326 }
dfead8a8 1327 if (req->work.flags & IO_WQ_WORK_FS) {
98447d65 1328 struct fs_struct *fs = req->work.identity->fs;
51a4cc11 1329
98447d65 1330 spin_lock(&req->work.identity->fs->lock);
ff002b30
JA
1331 if (--fs->users)
1332 fs = NULL;
98447d65 1333 spin_unlock(&req->work.identity->fs->lock);
ff002b30
JA
1334 if (fs)
1335 free_fs_struct(fs);
dfead8a8 1336 req->work.flags &= ~IO_WQ_WORK_FS;
ff002b30 1337 }
51a4cc11 1338
5c3462cf 1339 io_put_identity(req->task->io_uring, req);
561fb04a
JA
1340}
1341
1e6fa521
JA
1342/*
1343 * Create a private copy of io_identity, since some fields don't match
1344 * the current context.
1345 */
1346static bool io_identity_cow(struct io_kiocb *req)
1347{
5c3462cf 1348 struct io_uring_task *tctx = current->io_uring;
1e6fa521
JA
1349 const struct cred *creds = NULL;
1350 struct io_identity *id;
1351
1352 if (req->work.flags & IO_WQ_WORK_CREDS)
1353 creds = req->work.identity->creds;
1354
1355 id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL);
1356 if (unlikely(!id)) {
1357 req->work.flags |= IO_WQ_WORK_CANCEL;
1358 return false;
1359 }
1360
1361 /*
1362 * We can safely just re-init the creds we copied Either the field
1363 * matches the current one, or we haven't grabbed it yet. The only
1364 * exception is ->creds, through registered personalities, so handle
1365 * that one separately.
1366 */
1367 io_init_identity(id);
1368 if (creds)
1369 req->work.identity->creds = creds;
1370
1371 /* add one for this request */
1372 refcount_inc(&id->count);
1373
cb8a8ae3
JA
1374 /* drop tctx and req identity references, if needed */
1375 if (tctx->identity != &tctx->__identity &&
1376 refcount_dec_and_test(&tctx->identity->count))
1377 kfree(tctx->identity);
1378 if (req->work.identity != &tctx->__identity &&
1379 refcount_dec_and_test(&req->work.identity->count))
1e6fa521
JA
1380 kfree(req->work.identity);
1381
1382 req->work.identity = id;
500a373d 1383 tctx->identity = id;
1e6fa521
JA
1384 return true;
1385}
1386
1387static bool io_grab_identity(struct io_kiocb *req)
18d9be1a 1388{
d3656344 1389 const struct io_op_def *def = &io_op_defs[req->opcode];
5c3462cf 1390 struct io_identity *id = req->work.identity;
23329513 1391 struct io_ring_ctx *ctx = req->ctx;
54a91f3b 1392
69228338
JA
1393 if (def->work_flags & IO_WQ_WORK_FSIZE) {
1394 if (id->fsize != rlimit(RLIMIT_FSIZE))
1395 return false;
1396 req->work.flags |= IO_WQ_WORK_FSIZE;
1397 }
16d59803 1398
dfead8a8 1399 if (!(req->work.flags & IO_WQ_WORK_FILES) &&
1e6fa521 1400 (def->work_flags & IO_WQ_WORK_FILES) &&
23329513 1401 !(req->flags & REQ_F_NO_FILE_TABLE)) {
1e6fa521
JA
1402 if (id->files != current->files ||
1403 id->nsproxy != current->nsproxy)
1404 return false;
1405 atomic_inc(&id->files->count);
1406 get_nsproxy(id->nsproxy);
23329513
PB
1407 req->flags |= REQ_F_INFLIGHT;
1408
1409 spin_lock_irq(&ctx->inflight_lock);
1410 list_add(&req->inflight_entry, &ctx->inflight_list);
1411 spin_unlock_irq(&ctx->inflight_lock);
dfead8a8 1412 req->work.flags |= IO_WQ_WORK_FILES;
dca9cf8b 1413 }
91d8f519 1414#ifdef CONFIG_BLK_CGROUP
dfead8a8
JA
1415 if (!(req->work.flags & IO_WQ_WORK_BLKCG) &&
1416 (def->work_flags & IO_WQ_WORK_BLKCG)) {
91d8f519 1417 rcu_read_lock();
1e6fa521
JA
1418 if (id->blkcg_css != blkcg_css()) {
1419 rcu_read_unlock();
1420 return false;
1421 }
91d8f519
DZ
1422 /*
1423 * This should be rare, either the cgroup is dying or the task
1424 * is moving cgroups. Just punt to root for the handful of ios.
1425 */
1e6fa521 1426 if (css_tryget_online(id->blkcg_css))
dfead8a8 1427 req->work.flags |= IO_WQ_WORK_BLKCG;
91d8f519
DZ
1428 rcu_read_unlock();
1429 }
1430#endif
dfead8a8 1431 if (!(req->work.flags & IO_WQ_WORK_CREDS)) {
1e6fa521
JA
1432 if (id->creds != current_cred())
1433 return false;
1434 get_cred(id->creds);
dfead8a8
JA
1435 req->work.flags |= IO_WQ_WORK_CREDS;
1436 }
4ea33a97
JA
1437#ifdef CONFIG_AUDIT
1438 if (!uid_eq(current->loginuid, id->loginuid) ||
1439 current->sessionid != id->sessionid)
1440 return false;
1441#endif
dfead8a8
JA
1442 if (!(req->work.flags & IO_WQ_WORK_FS) &&
1443 (def->work_flags & IO_WQ_WORK_FS)) {
1e6fa521
JA
1444 if (current->fs != id->fs)
1445 return false;
1446 spin_lock(&id->fs->lock);
1447 if (!id->fs->in_exec) {
1448 id->fs->users++;
dfead8a8 1449 req->work.flags |= IO_WQ_WORK_FS;
dca9cf8b
PB
1450 } else {
1451 req->work.flags |= IO_WQ_WORK_CANCEL;
1452 }
1453 spin_unlock(&current->fs->lock);
1454 }
1e6fa521
JA
1455
1456 return true;
1457}
1458
1459static void io_prep_async_work(struct io_kiocb *req)
1460{
1461 const struct io_op_def *def = &io_op_defs[req->opcode];
1e6fa521 1462 struct io_ring_ctx *ctx = req->ctx;
5c3462cf 1463 struct io_identity *id;
1e6fa521
JA
1464
1465 io_req_init_async(req);
5c3462cf 1466 id = req->work.identity;
1e6fa521 1467
feaadc4f
PB
1468 if (req->flags & REQ_F_FORCE_ASYNC)
1469 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1470
1e6fa521
JA
1471 if (req->flags & REQ_F_ISREG) {
1472 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
1473 io_wq_hash_work(&req->work, file_inode(req->file));
1474 } else {
1475 if (def->unbound_nonreg_file)
1476 req->work.flags |= IO_WQ_WORK_UNBOUND;
1477 }
1478
1479 /* ->mm can never change on us */
1480 if (!(req->work.flags & IO_WQ_WORK_MM) &&
1481 (def->work_flags & IO_WQ_WORK_MM)) {
1482 mmgrab(id->mm);
1483 req->work.flags |= IO_WQ_WORK_MM;
1484 }
1485
1486 /* if we fail grabbing identity, we must COW, regrab, and retry */
1487 if (io_grab_identity(req))
1488 return;
1489
1490 if (!io_identity_cow(req))
1491 return;
1492
1493 /* can't fail at this point */
1494 if (!io_grab_identity(req))
1495 WARN_ON(1);
561fb04a 1496}
cccf0ee8 1497
cbdcb435 1498static void io_prep_async_link(struct io_kiocb *req)
561fb04a 1499{
cbdcb435 1500 struct io_kiocb *cur;
54a91f3b 1501
cbdcb435
PB
1502 io_prep_async_work(req);
1503 if (req->flags & REQ_F_LINK_HEAD)
1504 list_for_each_entry(cur, &req->link_list, link_list)
1505 io_prep_async_work(cur);
561fb04a
JA
1506}
1507
7271ef3a 1508static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
561fb04a 1509{
a197f664 1510 struct io_ring_ctx *ctx = req->ctx;
cbdcb435 1511 struct io_kiocb *link = io_prep_linked_timeout(req);
561fb04a 1512
8766dd51
PB
1513 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1514 &req->work, req->flags);
1515 io_wq_enqueue(ctx->io_wq, &req->work);
7271ef3a 1516 return link;
18d9be1a
JA
1517}
1518
cbdcb435
PB
1519static void io_queue_async_work(struct io_kiocb *req)
1520{
7271ef3a
JA
1521 struct io_kiocb *link;
1522
cbdcb435
PB
1523 /* init ->work of the whole link before punting */
1524 io_prep_async_link(req);
7271ef3a
JA
1525 link = __io_queue_async_work(req);
1526
1527 if (link)
1528 io_queue_linked_timeout(link);
cbdcb435
PB
1529}
1530
5262f567
JA
1531static void io_kill_timeout(struct io_kiocb *req)
1532{
e8c2bc1f 1533 struct io_timeout_data *io = req->async_data;
5262f567
JA
1534 int ret;
1535
e8c2bc1f 1536 ret = hrtimer_try_to_cancel(&io->timer);
5262f567 1537 if (ret != -1) {
01cec8c1
PB
1538 atomic_set(&req->ctx->cq_timeouts,
1539 atomic_read(&req->ctx->cq_timeouts) + 1);
135fcde8 1540 list_del_init(&req->timeout.list);
78e19bbe 1541 io_cqring_fill_event(req, 0);
216578e5 1542 io_put_req_deferred(req, 1);
5262f567
JA
1543 }
1544}
1545
f3606e3a
JA
1546static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk)
1547{
1548 struct io_ring_ctx *ctx = req->ctx;
1549
1550 if (!tsk || req->task == tsk)
1551 return true;
534ca6d6
JA
1552 if (ctx->flags & IORING_SETUP_SQPOLL) {
1553 if (ctx->sq_data && req->task == ctx->sq_data->thread)
1554 return true;
1555 }
f3606e3a
JA
1556 return false;
1557}
1558
76e1b642
JA
1559/*
1560 * Returns true if we found and killed one or more timeouts
1561 */
1562static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk)
5262f567
JA
1563{
1564 struct io_kiocb *req, *tmp;
76e1b642 1565 int canceled = 0;
5262f567
JA
1566
1567 spin_lock_irq(&ctx->completion_lock);
f3606e3a 1568 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
76e1b642 1569 if (io_task_match(req, tsk)) {
f3606e3a 1570 io_kill_timeout(req);
76e1b642
JA
1571 canceled++;
1572 }
f3606e3a 1573 }
5262f567 1574 spin_unlock_irq(&ctx->completion_lock);
76e1b642 1575 return canceled != 0;
5262f567
JA
1576}
1577
04518945 1578static void __io_queue_deferred(struct io_ring_ctx *ctx)
de0617e4 1579{
04518945 1580 do {
27dc8338
PB
1581 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1582 struct io_defer_entry, list);
7271ef3a 1583 struct io_kiocb *link;
de0617e4 1584
9cf7c104 1585 if (req_need_defer(de->req, de->seq))
04518945 1586 break;
27dc8338 1587 list_del_init(&de->list);
cbdcb435 1588 /* punt-init is done before queueing for defer */
7271ef3a
JA
1589 link = __io_queue_async_work(de->req);
1590 if (link) {
1591 __io_queue_linked_timeout(link);
1592 /* drop submission reference */
216578e5 1593 io_put_req_deferred(link, 1);
7271ef3a 1594 }
27dc8338 1595 kfree(de);
04518945
PB
1596 } while (!list_empty(&ctx->defer_list));
1597}
1598
360428f8 1599static void io_flush_timeouts(struct io_ring_ctx *ctx)
de0617e4 1600{
360428f8
PB
1601 while (!list_empty(&ctx->timeout_list)) {
1602 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
135fcde8 1603 struct io_kiocb, timeout.list);
de0617e4 1604
8eb7e2d0 1605 if (io_is_timeout_noseq(req))
360428f8 1606 break;
bfe68a22
PB
1607 if (req->timeout.target_seq != ctx->cached_cq_tail
1608 - atomic_read(&ctx->cq_timeouts))
360428f8 1609 break;
bfe68a22 1610
135fcde8 1611 list_del_init(&req->timeout.list);
5262f567 1612 io_kill_timeout(req);
360428f8
PB
1613 }
1614}
5262f567 1615
360428f8
PB
1616static void io_commit_cqring(struct io_ring_ctx *ctx)
1617{
1618 io_flush_timeouts(ctx);
de0617e4
JA
1619 __io_commit_cqring(ctx);
1620
04518945
PB
1621 if (unlikely(!list_empty(&ctx->defer_list)))
1622 __io_queue_deferred(ctx);
de0617e4
JA
1623}
1624
90554200
JA
1625static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1626{
1627 struct io_rings *r = ctx->rings;
1628
1629 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
1630}
1631
2b188cc1
JA
1632static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1633{
75b28aff 1634 struct io_rings *rings = ctx->rings;
2b188cc1
JA
1635 unsigned tail;
1636
1637 tail = ctx->cached_cq_tail;
115e12e5
SB
1638 /*
1639 * writes to the cq entry need to come after reading head; the
1640 * control dependency is enough as we're using WRITE_ONCE to
1641 * fill the cq entry
1642 */
75b28aff 1643 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
2b188cc1
JA
1644 return NULL;
1645
1646 ctx->cached_cq_tail++;
75b28aff 1647 return &rings->cqes[tail & ctx->cq_mask];
2b188cc1
JA
1648}
1649
f2842ab5
JA
1650static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1651{
f0b493e6
JA
1652 if (!ctx->cq_ev_fd)
1653 return false;
7e55a19c
SG
1654 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1655 return false;
f2842ab5
JA
1656 if (!ctx->eventfd_async)
1657 return true;
b41e9852 1658 return io_wq_current_is_worker();
f2842ab5
JA
1659}
1660
b41e9852 1661static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1d7bb1d5
JA
1662{
1663 if (waitqueue_active(&ctx->wait))
1664 wake_up(&ctx->wait);
534ca6d6
JA
1665 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1666 wake_up(&ctx->sq_data->wait);
b41e9852 1667 if (io_should_trigger_evfd(ctx))
1d7bb1d5
JA
1668 eventfd_signal(ctx->cq_ev_fd, 1);
1669}
1670
46930143
PB
1671static void io_cqring_mark_overflow(struct io_ring_ctx *ctx)
1672{
1673 if (list_empty(&ctx->cq_overflow_list)) {
1674 clear_bit(0, &ctx->sq_check_overflow);
1675 clear_bit(0, &ctx->cq_check_overflow);
1676 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1677 }
1678}
1679
99b32808
PB
1680static inline bool __io_match_files(struct io_kiocb *req,
1681 struct files_struct *files)
e6c8aa9a 1682{
99b32808
PB
1683 return ((req->flags & REQ_F_WORK_INITIALIZED) &&
1684 (req->work.flags & IO_WQ_WORK_FILES)) &&
1685 req->work.identity->files == files;
1686}
1687
1688static bool io_match_files(struct io_kiocb *req,
1689 struct files_struct *files)
1690{
1691 struct io_kiocb *link;
1692
e6c8aa9a
JA
1693 if (!files)
1694 return true;
99b32808
PB
1695 if (__io_match_files(req, files))
1696 return true;
1697 if (req->flags & REQ_F_LINK_HEAD) {
1698 list_for_each_entry(link, &req->link_list, link_list) {
1699 if (__io_match_files(link, files))
1700 return true;
1701 }
1702 }
e6c8aa9a
JA
1703 return false;
1704}
1705
c4a2ed72 1706/* Returns true if there are no backlogged entries after the flush */
e6c8aa9a
JA
1707static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1708 struct task_struct *tsk,
1709 struct files_struct *files)
1d7bb1d5
JA
1710{
1711 struct io_rings *rings = ctx->rings;
e6c8aa9a 1712 struct io_kiocb *req, *tmp;
1d7bb1d5 1713 struct io_uring_cqe *cqe;
1d7bb1d5
JA
1714 unsigned long flags;
1715 LIST_HEAD(list);
1716
1717 if (!force) {
1718 if (list_empty_careful(&ctx->cq_overflow_list))
c4a2ed72 1719 return true;
1d7bb1d5
JA
1720 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1721 rings->cq_ring_entries))
c4a2ed72 1722 return false;
1d7bb1d5
JA
1723 }
1724
1725 spin_lock_irqsave(&ctx->completion_lock, flags);
1726
1727 /* if force is set, the ring is going away. always drop after that */
1728 if (force)
69b3e546 1729 ctx->cq_overflow_flushed = 1;
1d7bb1d5 1730
c4a2ed72 1731 cqe = NULL;
e6c8aa9a
JA
1732 list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
1733 if (tsk && req->task != tsk)
1734 continue;
1735 if (!io_match_files(req, files))
1736 continue;
1737
1d7bb1d5
JA
1738 cqe = io_get_cqring(ctx);
1739 if (!cqe && !force)
1740 break;
1741
40d8ddd4 1742 list_move(&req->compl.list, &list);
1d7bb1d5
JA
1743 if (cqe) {
1744 WRITE_ONCE(cqe->user_data, req->user_data);
1745 WRITE_ONCE(cqe->res, req->result);
0f7e466b 1746 WRITE_ONCE(cqe->flags, req->compl.cflags);
1d7bb1d5 1747 } else {
2c3bac6d 1748 ctx->cached_cq_overflow++;
1d7bb1d5 1749 WRITE_ONCE(ctx->rings->cq_overflow,
2c3bac6d 1750 ctx->cached_cq_overflow);
1d7bb1d5
JA
1751 }
1752 }
1753
1754 io_commit_cqring(ctx);
46930143
PB
1755 io_cqring_mark_overflow(ctx);
1756
1d7bb1d5
JA
1757 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1758 io_cqring_ev_posted(ctx);
1759
1760 while (!list_empty(&list)) {
40d8ddd4
PB
1761 req = list_first_entry(&list, struct io_kiocb, compl.list);
1762 list_del(&req->compl.list);
ec9c02ad 1763 io_put_req(req);
1d7bb1d5 1764 }
c4a2ed72
JA
1765
1766 return cqe != NULL;
1d7bb1d5
JA
1767}
1768
bcda7baa 1769static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1770{
78e19bbe 1771 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1772 struct io_uring_cqe *cqe;
1773
78e19bbe 1774 trace_io_uring_complete(ctx, req->user_data, res);
51c3ff62 1775
2b188cc1
JA
1776 /*
1777 * If we can't get a cq entry, userspace overflowed the
1778 * submission (by quite a lot). Increment the overflow count in
1779 * the ring.
1780 */
1781 cqe = io_get_cqring(ctx);
1d7bb1d5 1782 if (likely(cqe)) {
78e19bbe 1783 WRITE_ONCE(cqe->user_data, req->user_data);
2b188cc1 1784 WRITE_ONCE(cqe->res, res);
bcda7baa 1785 WRITE_ONCE(cqe->flags, cflags);
fdaf083c
JA
1786 } else if (ctx->cq_overflow_flushed ||
1787 atomic_read(&req->task->io_uring->in_idle)) {
0f212204
JA
1788 /*
1789 * If we're in ring overflow flush mode, or in task cancel mode,
1790 * then we cannot store the request for later flushing, we need
1791 * to drop it on the floor.
1792 */
2c3bac6d
PB
1793 ctx->cached_cq_overflow++;
1794 WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow);
1d7bb1d5 1795 } else {
ad3eb2c8
JA
1796 if (list_empty(&ctx->cq_overflow_list)) {
1797 set_bit(0, &ctx->sq_check_overflow);
1798 set_bit(0, &ctx->cq_check_overflow);
6d5f9049 1799 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
ad3eb2c8 1800 }
40d8ddd4 1801 io_clean_op(req);
1d7bb1d5 1802 req->result = res;
0f7e466b 1803 req->compl.cflags = cflags;
40d8ddd4
PB
1804 refcount_inc(&req->refs);
1805 list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
2b188cc1
JA
1806 }
1807}
1808
bcda7baa
JA
1809static void io_cqring_fill_event(struct io_kiocb *req, long res)
1810{
1811 __io_cqring_fill_event(req, res, 0);
1812}
1813
e1e16097 1814static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1815{
78e19bbe 1816 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1817 unsigned long flags;
1818
1819 spin_lock_irqsave(&ctx->completion_lock, flags);
bcda7baa 1820 __io_cqring_fill_event(req, res, cflags);
2b188cc1
JA
1821 io_commit_cqring(ctx);
1822 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1823
8c838788 1824 io_cqring_ev_posted(ctx);
2b188cc1
JA
1825}
1826
229a7b63 1827static void io_submit_flush_completions(struct io_comp_state *cs)
bcda7baa 1828{
229a7b63
JA
1829 struct io_ring_ctx *ctx = cs->ctx;
1830
1831 spin_lock_irq(&ctx->completion_lock);
1832 while (!list_empty(&cs->list)) {
1833 struct io_kiocb *req;
1834
3ca405eb
PB
1835 req = list_first_entry(&cs->list, struct io_kiocb, compl.list);
1836 list_del(&req->compl.list);
0f7e466b 1837 __io_cqring_fill_event(req, req->result, req->compl.cflags);
216578e5
PB
1838
1839 /*
1840 * io_free_req() doesn't care about completion_lock unless one
1841 * of these flags is set. REQ_F_WORK_INITIALIZED is in the list
1842 * because of a potential deadlock with req->work.fs->lock
1843 */
1844 if (req->flags & (REQ_F_FAIL_LINK|REQ_F_LINK_TIMEOUT
1845 |REQ_F_WORK_INITIALIZED)) {
229a7b63
JA
1846 spin_unlock_irq(&ctx->completion_lock);
1847 io_put_req(req);
1848 spin_lock_irq(&ctx->completion_lock);
216578e5
PB
1849 } else {
1850 io_put_req(req);
229a7b63
JA
1851 }
1852 }
1853 io_commit_cqring(ctx);
1854 spin_unlock_irq(&ctx->completion_lock);
1855
1856 io_cqring_ev_posted(ctx);
1857 cs->nr = 0;
1858}
1859
1860static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags,
1861 struct io_comp_state *cs)
1862{
1863 if (!cs) {
1864 io_cqring_add_event(req, res, cflags);
1865 io_put_req(req);
1866 } else {
3ca405eb 1867 io_clean_op(req);
229a7b63 1868 req->result = res;
0f7e466b 1869 req->compl.cflags = cflags;
3ca405eb 1870 list_add_tail(&req->compl.list, &cs->list);
229a7b63
JA
1871 if (++cs->nr >= 32)
1872 io_submit_flush_completions(cs);
1873 }
e1e16097
JA
1874}
1875
1876static void io_req_complete(struct io_kiocb *req, long res)
bcda7baa 1877{
229a7b63 1878 __io_req_complete(req, res, 0, NULL);
bcda7baa
JA
1879}
1880
0ddf92e8
JA
1881static inline bool io_is_fallback_req(struct io_kiocb *req)
1882{
1883 return req == (struct io_kiocb *)
1884 ((unsigned long) req->ctx->fallback_req & ~1UL);
1885}
1886
1887static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1888{
1889 struct io_kiocb *req;
1890
1891 req = ctx->fallback_req;
dd461af6 1892 if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
0ddf92e8
JA
1893 return req;
1894
1895 return NULL;
1896}
1897
0553b8bd
PB
1898static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
1899 struct io_submit_state *state)
2b188cc1 1900{
f6b6c7d6 1901 if (!state->free_reqs) {
291b2821 1902 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2579f913
JA
1903 size_t sz;
1904 int ret;
1905
1906 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
fd6fab2c
JA
1907 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1908
1909 /*
1910 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1911 * retry single alloc to be on the safe side.
1912 */
1913 if (unlikely(ret <= 0)) {
1914 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1915 if (!state->reqs[0])
0ddf92e8 1916 goto fallback;
fd6fab2c
JA
1917 ret = 1;
1918 }
291b2821 1919 state->free_reqs = ret;
2b188cc1
JA
1920 }
1921
291b2821
PB
1922 state->free_reqs--;
1923 return state->reqs[state->free_reqs];
0ddf92e8 1924fallback:
0553b8bd 1925 return io_get_fallback_req(ctx);
2b188cc1
JA
1926}
1927
8da11c19
PB
1928static inline void io_put_file(struct io_kiocb *req, struct file *file,
1929 bool fixed)
1930{
1931 if (fixed)
05589553 1932 percpu_ref_put(req->fixed_file_refs);
8da11c19
PB
1933 else
1934 fput(file);
1935}
1936
4edf20f9 1937static void io_dismantle_req(struct io_kiocb *req)
2b188cc1 1938{
3ca405eb 1939 io_clean_op(req);
929a3af9 1940
e8c2bc1f
JA
1941 if (req->async_data)
1942 kfree(req->async_data);
8da11c19
PB
1943 if (req->file)
1944 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
fcb323cc 1945
4edf20f9 1946 io_req_clean_work(req);
e65ef56d
JA
1947}
1948
216578e5 1949static void __io_free_req(struct io_kiocb *req)
c6ca97b3 1950{
0f212204 1951 struct io_uring_task *tctx = req->task->io_uring;
51a4cc11 1952 struct io_ring_ctx *ctx = req->ctx;
c6ca97b3 1953
216578e5 1954 io_dismantle_req(req);
c6ca97b3 1955
d8a6df10 1956 percpu_counter_dec(&tctx->inflight);
fdaf083c 1957 if (atomic_read(&tctx->in_idle))
0f212204 1958 wake_up(&tctx->wait);
e3bc8e9d
JA
1959 put_task_struct(req->task);
1960
b1e50e54
PB
1961 if (likely(!io_is_fallback_req(req)))
1962 kmem_cache_free(req_cachep, req);
1963 else
ecfc5177
PB
1964 clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req);
1965 percpu_ref_put(&ctx->refs);
e65ef56d
JA
1966}
1967
c9abd7ad 1968static void io_kill_linked_timeout(struct io_kiocb *req)
2665abfd 1969{
a197f664 1970 struct io_ring_ctx *ctx = req->ctx;
7c86ffee 1971 struct io_kiocb *link;
c9abd7ad
PB
1972 bool cancelled = false;
1973 unsigned long flags;
7c86ffee 1974
c9abd7ad
PB
1975 spin_lock_irqsave(&ctx->completion_lock, flags);
1976 link = list_first_entry_or_null(&req->link_list, struct io_kiocb,
1977 link_list);
900fad45
PB
1978 /*
1979 * Can happen if a linked timeout fired and link had been like
1980 * req -> link t-out -> link t-out [-> ...]
1981 */
c9abd7ad
PB
1982 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1983 struct io_timeout_data *io = link->async_data;
1984 int ret;
7c86ffee 1985
c9abd7ad
PB
1986 list_del_init(&link->link_list);
1987 ret = hrtimer_try_to_cancel(&io->timer);
1988 if (ret != -1) {
1989 io_cqring_fill_event(link, -ECANCELED);
1990 io_commit_cqring(ctx);
1991 cancelled = true;
1992 }
1993 }
7c86ffee 1994 req->flags &= ~REQ_F_LINK_TIMEOUT;
216578e5 1995 spin_unlock_irqrestore(&ctx->completion_lock, flags);
ab0b6451 1996
c9abd7ad 1997 if (cancelled) {
7c86ffee 1998 io_cqring_ev_posted(ctx);
c9abd7ad
PB
1999 io_put_req(link);
2000 }
7c86ffee
PB
2001}
2002
9b5f7bd9 2003static struct io_kiocb *io_req_link_next(struct io_kiocb *req)
7c86ffee
PB
2004{
2005 struct io_kiocb *nxt;
4d7dd462 2006
9e645e11
JA
2007 /*
2008 * The list should never be empty when we are called here. But could
2009 * potentially happen if the chain is messed up, check to be on the
2010 * safe side.
2011 */
7c86ffee 2012 if (unlikely(list_empty(&req->link_list)))
9b5f7bd9 2013 return NULL;
2665abfd 2014
7c86ffee
PB
2015 nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list);
2016 list_del_init(&req->link_list);
2017 if (!list_empty(&nxt->link_list))
2018 nxt->flags |= REQ_F_LINK_HEAD;
9b5f7bd9 2019 return nxt;
9e645e11
JA
2020}
2021
2022/*
dea3b49c 2023 * Called if REQ_F_LINK_HEAD is set, and we fail the head request
9e645e11 2024 */
d148ca4b 2025static void io_fail_links(struct io_kiocb *req)
9e645e11 2026{
2665abfd 2027 struct io_ring_ctx *ctx = req->ctx;
d148ca4b 2028 unsigned long flags;
9e645e11 2029
d148ca4b 2030 spin_lock_irqsave(&ctx->completion_lock, flags);
9e645e11 2031 while (!list_empty(&req->link_list)) {
4493233e
PB
2032 struct io_kiocb *link = list_first_entry(&req->link_list,
2033 struct io_kiocb, link_list);
9e645e11 2034
4493233e 2035 list_del_init(&link->link_list);
c826bd7a 2036 trace_io_uring_fail_link(req, link);
2665abfd 2037
7c86ffee 2038 io_cqring_fill_event(link, -ECANCELED);
216578e5
PB
2039
2040 /*
2041 * It's ok to free under spinlock as they're not linked anymore,
2042 * but avoid REQ_F_WORK_INITIALIZED because it may deadlock on
2043 * work.fs->lock.
2044 */
2045 if (link->flags & REQ_F_WORK_INITIALIZED)
2046 io_put_req_deferred(link, 2);
2047 else
2048 io_double_put_req(link);
9e645e11 2049 }
2665abfd
JA
2050
2051 io_commit_cqring(ctx);
216578e5 2052 spin_unlock_irqrestore(&ctx->completion_lock, flags);
9e645e11 2053
2665abfd 2054 io_cqring_ev_posted(ctx);
9e645e11
JA
2055}
2056
3fa5e0f3 2057static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
c69f8dbe 2058{
9b0d911a 2059 req->flags &= ~REQ_F_LINK_HEAD;
7c86ffee
PB
2060 if (req->flags & REQ_F_LINK_TIMEOUT)
2061 io_kill_linked_timeout(req);
944e58bf 2062
9e645e11
JA
2063 /*
2064 * If LINK is set, we have dependent requests in this chain. If we
2065 * didn't fail this request, queue the first one up, moving any other
2066 * dependencies to the next request. In case of failure, fail the rest
2067 * of the chain.
2068 */
9b5f7bd9
PB
2069 if (likely(!(req->flags & REQ_F_FAIL_LINK)))
2070 return io_req_link_next(req);
2071 io_fail_links(req);
2072 return NULL;
4d7dd462 2073}
9e645e11 2074
3fa5e0f3
PB
2075static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
2076{
2077 if (likely(!(req->flags & REQ_F_LINK_HEAD)))
2078 return NULL;
2079 return __io_req_find_next(req);
2080}
2081
87c4311f 2082static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
c2c4c83c
JA
2083{
2084 struct task_struct *tsk = req->task;
2085 struct io_ring_ctx *ctx = req->ctx;
91989c70
JA
2086 enum task_work_notify_mode notify;
2087 int ret;
c2c4c83c 2088
6200b0ae
JA
2089 if (tsk->flags & PF_EXITING)
2090 return -ESRCH;
2091
c2c4c83c 2092 /*
0ba9c9ed
JA
2093 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2094 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2095 * processing task_work. There's no reliable way to tell if TWA_RESUME
2096 * will do the job.
c2c4c83c 2097 */
91989c70 2098 notify = TWA_NONE;
fd7d6de2 2099 if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
c2c4c83c
JA
2100 notify = TWA_SIGNAL;
2101
87c4311f 2102 ret = task_work_add(tsk, &req->task_work, notify);
c2c4c83c
JA
2103 if (!ret)
2104 wake_up_process(tsk);
0ba9c9ed 2105
c2c4c83c
JA
2106 return ret;
2107}
2108
c40f6379
JA
2109static void __io_req_task_cancel(struct io_kiocb *req, int error)
2110{
2111 struct io_ring_ctx *ctx = req->ctx;
2112
2113 spin_lock_irq(&ctx->completion_lock);
2114 io_cqring_fill_event(req, error);
2115 io_commit_cqring(ctx);
2116 spin_unlock_irq(&ctx->completion_lock);
2117
2118 io_cqring_ev_posted(ctx);
2119 req_set_fail_links(req);
2120 io_double_put_req(req);
2121}
2122
2123static void io_req_task_cancel(struct callback_head *cb)
2124{
2125 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
87ceb6a6 2126 struct io_ring_ctx *ctx = req->ctx;
c40f6379
JA
2127
2128 __io_req_task_cancel(req, -ECANCELED);
87ceb6a6 2129 percpu_ref_put(&ctx->refs);
c40f6379
JA
2130}
2131
2132static void __io_req_task_submit(struct io_kiocb *req)
2133{
2134 struct io_ring_ctx *ctx = req->ctx;
2135
c40f6379 2136 if (!__io_sq_thread_acquire_mm(ctx)) {
28cea78a 2137 __io_sq_thread_acquire_files(ctx);
c40f6379 2138 mutex_lock(&ctx->uring_lock);
c1379e24 2139 __io_queue_sqe(req, NULL);
c40f6379
JA
2140 mutex_unlock(&ctx->uring_lock);
2141 } else {
2142 __io_req_task_cancel(req, -EFAULT);
2143 }
2144}
2145
2146static void io_req_task_submit(struct callback_head *cb)
2147{
2148 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
6d816e08 2149 struct io_ring_ctx *ctx = req->ctx;
c40f6379
JA
2150
2151 __io_req_task_submit(req);
6d816e08 2152 percpu_ref_put(&ctx->refs);
c40f6379
JA
2153}
2154
2155static void io_req_task_queue(struct io_kiocb *req)
2156{
c40f6379
JA
2157 int ret;
2158
2159 init_task_work(&req->task_work, io_req_task_submit);
6d816e08 2160 percpu_ref_get(&req->ctx->refs);
c40f6379 2161
87c4311f 2162 ret = io_req_task_work_add(req, true);
c40f6379 2163 if (unlikely(ret)) {
c2c4c83c
JA
2164 struct task_struct *tsk;
2165
c40f6379
JA
2166 init_task_work(&req->task_work, io_req_task_cancel);
2167 tsk = io_wq_get_task(req->ctx->io_wq);
91989c70 2168 task_work_add(tsk, &req->task_work, TWA_NONE);
c2c4c83c 2169 wake_up_process(tsk);
c40f6379 2170 }
c40f6379
JA
2171}
2172
c3524383 2173static void io_queue_next(struct io_kiocb *req)
c69f8dbe 2174{
9b5f7bd9 2175 struct io_kiocb *nxt = io_req_find_next(req);
944e58bf
PB
2176
2177 if (nxt)
906a8c3f 2178 io_req_task_queue(nxt);
c69f8dbe
JL
2179}
2180
c3524383 2181static void io_free_req(struct io_kiocb *req)
7a743e22 2182{
c3524383
PB
2183 io_queue_next(req);
2184 __io_free_req(req);
2185}
8766dd51 2186
2d6500d4
PB
2187struct req_batch {
2188 void *reqs[IO_IOPOLL_BATCH];
2189 int to_free;
7a743e22 2190
5af1d13e
PB
2191 struct task_struct *task;
2192 int task_refs;
2d6500d4
PB
2193};
2194
5af1d13e
PB
2195static inline void io_init_req_batch(struct req_batch *rb)
2196{
2197 rb->to_free = 0;
2198 rb->task_refs = 0;
2199 rb->task = NULL;
2200}
2201
2d6500d4
PB
2202static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
2203 struct req_batch *rb)
2204{
2205 kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
2206 percpu_ref_put_many(&ctx->refs, rb->to_free);
2207 rb->to_free = 0;
2208}
2209
2210static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2211 struct req_batch *rb)
2212{
2213 if (rb->to_free)
2214 __io_req_free_batch_flush(ctx, rb);
5af1d13e 2215 if (rb->task) {
d8a6df10
JA
2216 struct io_uring_task *tctx = rb->task->io_uring;
2217
2218 percpu_counter_sub(&tctx->inflight, rb->task_refs);
5af1d13e
PB
2219 put_task_struct_many(rb->task, rb->task_refs);
2220 rb->task = NULL;
2221 }
2d6500d4
PB
2222}
2223
2224static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
2225{
2226 if (unlikely(io_is_fallback_req(req))) {
2227 io_free_req(req);
2228 return;
2229 }
2230 if (req->flags & REQ_F_LINK_HEAD)
2231 io_queue_next(req);
2232
e3bc8e9d 2233 if (req->task != rb->task) {
0f212204 2234 if (rb->task) {
d8a6df10
JA
2235 struct io_uring_task *tctx = rb->task->io_uring;
2236
2237 percpu_counter_sub(&tctx->inflight, rb->task_refs);
e3bc8e9d 2238 put_task_struct_many(rb->task, rb->task_refs);
5af1d13e 2239 }
e3bc8e9d
JA
2240 rb->task = req->task;
2241 rb->task_refs = 0;
5af1d13e 2242 }
e3bc8e9d 2243 rb->task_refs++;
5af1d13e 2244
4edf20f9 2245 io_dismantle_req(req);
2d6500d4
PB
2246 rb->reqs[rb->to_free++] = req;
2247 if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
2248 __io_req_free_batch_flush(req->ctx, rb);
7a743e22
PB
2249}
2250
ba816ad6
JA
2251/*
2252 * Drop reference to request, return next in chain (if there is one) if this
2253 * was the last reference to this request.
2254 */
9b5f7bd9 2255static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
e65ef56d 2256{
9b5f7bd9
PB
2257 struct io_kiocb *nxt = NULL;
2258
2a44f467 2259 if (refcount_dec_and_test(&req->refs)) {
9b5f7bd9 2260 nxt = io_req_find_next(req);
4d7dd462 2261 __io_free_req(req);
2a44f467 2262 }
9b5f7bd9 2263 return nxt;
2b188cc1
JA
2264}
2265
e65ef56d
JA
2266static void io_put_req(struct io_kiocb *req)
2267{
2268 if (refcount_dec_and_test(&req->refs))
2269 io_free_req(req);
2b188cc1
JA
2270}
2271
216578e5
PB
2272static void io_put_req_deferred_cb(struct callback_head *cb)
2273{
2274 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2275
2276 io_free_req(req);
2277}
2278
2279static void io_free_req_deferred(struct io_kiocb *req)
2280{
2281 int ret;
2282
2283 init_task_work(&req->task_work, io_put_req_deferred_cb);
2284 ret = io_req_task_work_add(req, true);
2285 if (unlikely(ret)) {
2286 struct task_struct *tsk;
2287
2288 tsk = io_wq_get_task(req->ctx->io_wq);
91989c70 2289 task_work_add(tsk, &req->task_work, TWA_NONE);
216578e5
PB
2290 wake_up_process(tsk);
2291 }
2292}
2293
2294static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2295{
2296 if (refcount_sub_and_test(refs, &req->refs))
2297 io_free_req_deferred(req);
2298}
2299
f4db7182 2300static struct io_wq_work *io_steal_work(struct io_kiocb *req)
7a743e22 2301{
6df1db6b 2302 struct io_kiocb *nxt;
f4db7182 2303
7a743e22 2304 /*
f4db7182
PB
2305 * A ref is owned by io-wq in which context we're. So, if that's the
2306 * last one, it's safe to steal next work. False negatives are Ok,
2307 * it just will be re-punted async in io_put_work()
7a743e22 2308 */
f4db7182
PB
2309 if (refcount_read(&req->refs) != 1)
2310 return NULL;
7a743e22 2311
9b5f7bd9 2312 nxt = io_req_find_next(req);
6df1db6b 2313 return nxt ? &nxt->work : NULL;
7a743e22
PB
2314}
2315
978db57e
JA
2316static void io_double_put_req(struct io_kiocb *req)
2317{
2318 /* drop both submit and complete references */
2319 if (refcount_sub_and_test(2, &req->refs))
2320 io_free_req(req);
2321}
2322
1d7bb1d5 2323static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
a3a0e43f 2324{
84f97dc2
JA
2325 struct io_rings *rings = ctx->rings;
2326
ad3eb2c8
JA
2327 if (test_bit(0, &ctx->cq_check_overflow)) {
2328 /*
2329 * noflush == true is from the waitqueue handler, just ensure
2330 * we wake up the task, and the next invocation will flush the
2331 * entries. We cannot safely to it from here.
2332 */
2333 if (noflush && !list_empty(&ctx->cq_overflow_list))
2334 return -1U;
1d7bb1d5 2335
e6c8aa9a 2336 io_cqring_overflow_flush(ctx, false, NULL, NULL);
ad3eb2c8 2337 }
1d7bb1d5 2338
a3a0e43f
JA
2339 /* See comment at the top of this file */
2340 smp_rmb();
ad3eb2c8 2341 return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
a3a0e43f
JA
2342}
2343
fb5ccc98
PB
2344static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2345{
2346 struct io_rings *rings = ctx->rings;
2347
2348 /* make sure SQ entry isn't read before tail */
2349 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2350}
2351
8ff069bf 2352static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
e94f141b 2353{
8ff069bf 2354 unsigned int cflags;
e94f141b 2355
bcda7baa
JA
2356 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2357 cflags |= IORING_CQE_F_BUFFER;
0e1b6fe3 2358 req->flags &= ~REQ_F_BUFFER_SELECTED;
bcda7baa
JA
2359 kfree(kbuf);
2360 return cflags;
e94f141b
JA
2361}
2362
8ff069bf 2363static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
bcda7baa 2364{
4d954c25 2365 struct io_buffer *kbuf;
bcda7baa 2366
4d954c25 2367 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
8ff069bf
PB
2368 return io_put_kbuf(req, kbuf);
2369}
2370
4c6e277c
JA
2371static inline bool io_run_task_work(void)
2372{
6200b0ae
JA
2373 /*
2374 * Not safe to run on exiting task, and the task_work handling will
2375 * not add work to such a task.
2376 */
2377 if (unlikely(current->flags & PF_EXITING))
2378 return false;
4c6e277c
JA
2379 if (current->task_works) {
2380 __set_current_state(TASK_RUNNING);
2381 task_work_run();
2382 return true;
2383 }
2384
2385 return false;
bcda7baa
JA
2386}
2387
bbde017a
XW
2388static void io_iopoll_queue(struct list_head *again)
2389{
2390 struct io_kiocb *req;
2391
2392 do {
d21ffe7e
PB
2393 req = list_first_entry(again, struct io_kiocb, inflight_entry);
2394 list_del(&req->inflight_entry);
81b68a5c 2395 __io_complete_rw(req, -EAGAIN, 0, NULL);
bbde017a
XW
2396 } while (!list_empty(again));
2397}
2398
def596e9
JA
2399/*
2400 * Find and free completed poll iocbs
2401 */
2402static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2403 struct list_head *done)
2404{
8237e045 2405 struct req_batch rb;
def596e9 2406 struct io_kiocb *req;
bbde017a
XW
2407 LIST_HEAD(again);
2408
2409 /* order with ->result store in io_complete_rw_iopoll() */
2410 smp_rmb();
def596e9 2411
5af1d13e 2412 io_init_req_batch(&rb);
def596e9 2413 while (!list_empty(done)) {
bcda7baa
JA
2414 int cflags = 0;
2415
d21ffe7e 2416 req = list_first_entry(done, struct io_kiocb, inflight_entry);
bbde017a 2417 if (READ_ONCE(req->result) == -EAGAIN) {
56450c20 2418 req->result = 0;
bbde017a 2419 req->iopoll_completed = 0;
d21ffe7e 2420 list_move_tail(&req->inflight_entry, &again);
bbde017a
XW
2421 continue;
2422 }
d21ffe7e 2423 list_del(&req->inflight_entry);
def596e9 2424
bcda7baa 2425 if (req->flags & REQ_F_BUFFER_SELECTED)
8ff069bf 2426 cflags = io_put_rw_kbuf(req);
bcda7baa
JA
2427
2428 __io_cqring_fill_event(req, req->result, cflags);
def596e9
JA
2429 (*nr_events)++;
2430
c3524383 2431 if (refcount_dec_and_test(&req->refs))
2d6500d4 2432 io_req_free_batch(&rb, req);
def596e9 2433 }
def596e9 2434
09bb8394 2435 io_commit_cqring(ctx);
32b2244a
XW
2436 if (ctx->flags & IORING_SETUP_SQPOLL)
2437 io_cqring_ev_posted(ctx);
2d6500d4 2438 io_req_free_batch_finish(ctx, &rb);
581f9810 2439
bbde017a
XW
2440 if (!list_empty(&again))
2441 io_iopoll_queue(&again);
581f9810
BM
2442}
2443
def596e9
JA
2444static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2445 long min)
2446{
2447 struct io_kiocb *req, *tmp;
2448 LIST_HEAD(done);
2449 bool spin;
2450 int ret;
2451
2452 /*
2453 * Only spin for completions if we don't have multiple devices hanging
2454 * off our complete list, and we're under the requested amount.
2455 */
2456 spin = !ctx->poll_multi_file && *nr_events < min;
2457
2458 ret = 0;
d21ffe7e 2459 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
9adbd45d 2460 struct kiocb *kiocb = &req->rw.kiocb;
def596e9
JA
2461
2462 /*
581f9810
BM
2463 * Move completed and retryable entries to our local lists.
2464 * If we find a request that requires polling, break out
2465 * and complete those lists first, if we have entries there.
def596e9 2466 */
65a6543d 2467 if (READ_ONCE(req->iopoll_completed)) {
d21ffe7e 2468 list_move_tail(&req->inflight_entry, &done);
def596e9
JA
2469 continue;
2470 }
2471 if (!list_empty(&done))
2472 break;
2473
2474 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2475 if (ret < 0)
2476 break;
2477
3aadc23e
PB
2478 /* iopoll may have completed current req */
2479 if (READ_ONCE(req->iopoll_completed))
d21ffe7e 2480 list_move_tail(&req->inflight_entry, &done);
3aadc23e 2481
def596e9
JA
2482 if (ret && spin)
2483 spin = false;
2484 ret = 0;
2485 }
2486
2487 if (!list_empty(&done))
2488 io_iopoll_complete(ctx, nr_events, &done);
2489
2490 return ret;
2491}
2492
2493/*
d195a66e 2494 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
def596e9
JA
2495 * non-spinning poll check - we'll still enter the driver poll loop, but only
2496 * as a non-spinning completion check.
2497 */
2498static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
2499 long min)
2500{
540e32a0 2501 while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
def596e9
JA
2502 int ret;
2503
2504 ret = io_do_iopoll(ctx, nr_events, min);
2505 if (ret < 0)
2506 return ret;
eba0a4dd 2507 if (*nr_events >= min)
def596e9
JA
2508 return 0;
2509 }
2510
2511 return 1;
2512}
2513
2514/*
2515 * We can't just wait for polled events to come to us, we have to actively
2516 * find and complete them.
2517 */
b2edc0a7 2518static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
def596e9
JA
2519{
2520 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2521 return;
2522
2523 mutex_lock(&ctx->uring_lock);
540e32a0 2524 while (!list_empty(&ctx->iopoll_list)) {
def596e9
JA
2525 unsigned int nr_events = 0;
2526
b2edc0a7 2527 io_do_iopoll(ctx, &nr_events, 0);
08f5439f 2528
b2edc0a7
PB
2529 /* let it sleep and repeat later if can't complete a request */
2530 if (nr_events == 0)
2531 break;
08f5439f
JA
2532 /*
2533 * Ensure we allow local-to-the-cpu processing to take place,
2534 * in this case we need to ensure that we reap all events.
3fcee5a6 2535 * Also let task_work, etc. to progress by releasing the mutex
08f5439f 2536 */
3fcee5a6
PB
2537 if (need_resched()) {
2538 mutex_unlock(&ctx->uring_lock);
2539 cond_resched();
2540 mutex_lock(&ctx->uring_lock);
2541 }
def596e9
JA
2542 }
2543 mutex_unlock(&ctx->uring_lock);
2544}
2545
7668b92a 2546static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
def596e9 2547{
7668b92a 2548 unsigned int nr_events = 0;
2b2ed975 2549 int iters = 0, ret = 0;
500f9fba 2550
c7849be9
XW
2551 /*
2552 * We disallow the app entering submit/complete with polling, but we
2553 * still need to lock the ring to prevent racing with polled issue
2554 * that got punted to a workqueue.
2555 */
2556 mutex_lock(&ctx->uring_lock);
def596e9 2557 do {
a3a0e43f
JA
2558 /*
2559 * Don't enter poll loop if we already have events pending.
2560 * If we do, we can potentially be spinning for commands that
2561 * already triggered a CQE (eg in error).
2562 */
1d7bb1d5 2563 if (io_cqring_events(ctx, false))
a3a0e43f
JA
2564 break;
2565
500f9fba
JA
2566 /*
2567 * If a submit got punted to a workqueue, we can have the
2568 * application entering polling for a command before it gets
2569 * issued. That app will hold the uring_lock for the duration
2570 * of the poll right here, so we need to take a breather every
2571 * now and then to ensure that the issue has a chance to add
2572 * the poll to the issued list. Otherwise we can spin here
2573 * forever, while the workqueue is stuck trying to acquire the
2574 * very same mutex.
2575 */
2576 if (!(++iters & 7)) {
2577 mutex_unlock(&ctx->uring_lock);
4c6e277c 2578 io_run_task_work();
500f9fba
JA
2579 mutex_lock(&ctx->uring_lock);
2580 }
2581
7668b92a 2582 ret = io_iopoll_getevents(ctx, &nr_events, min);
def596e9
JA
2583 if (ret <= 0)
2584 break;
2585 ret = 0;
7668b92a 2586 } while (min && !nr_events && !need_resched());
def596e9 2587
500f9fba 2588 mutex_unlock(&ctx->uring_lock);
def596e9
JA
2589 return ret;
2590}
2591
491381ce 2592static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 2593{
491381ce
JA
2594 /*
2595 * Tell lockdep we inherited freeze protection from submission
2596 * thread.
2597 */
2598 if (req->flags & REQ_F_ISREG) {
2599 struct inode *inode = file_inode(req->file);
2b188cc1 2600
491381ce 2601 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2b188cc1 2602 }
491381ce 2603 file_end_write(req->file);
2b188cc1
JA
2604}
2605
a1d7c393
JA
2606static void io_complete_rw_common(struct kiocb *kiocb, long res,
2607 struct io_comp_state *cs)
2b188cc1 2608{
9adbd45d 2609 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
bcda7baa 2610 int cflags = 0;
2b188cc1 2611
491381ce
JA
2612 if (kiocb->ki_flags & IOCB_WRITE)
2613 kiocb_end_write(req);
2b188cc1 2614
4e88d6e7
JA
2615 if (res != req->result)
2616 req_set_fail_links(req);
bcda7baa 2617 if (req->flags & REQ_F_BUFFER_SELECTED)
8ff069bf 2618 cflags = io_put_rw_kbuf(req);
a1d7c393 2619 __io_req_complete(req, res, cflags, cs);
ba816ad6
JA
2620}
2621
b63534c4
JA
2622#ifdef CONFIG_BLOCK
2623static bool io_resubmit_prep(struct io_kiocb *req, int error)
2624{
2625 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
2626 ssize_t ret = -ECANCELED;
2627 struct iov_iter iter;
2628 int rw;
2629
2630 if (error) {
2631 ret = error;
2632 goto end_req;
2633 }
2634
2635 switch (req->opcode) {
2636 case IORING_OP_READV:
2637 case IORING_OP_READ_FIXED:
2638 case IORING_OP_READ:
2639 rw = READ;
2640 break;
2641 case IORING_OP_WRITEV:
2642 case IORING_OP_WRITE_FIXED:
2643 case IORING_OP_WRITE:
2644 rw = WRITE;
2645 break;
2646 default:
2647 printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
2648 req->opcode);
2649 goto end_req;
2650 }
2651
e8c2bc1f 2652 if (!req->async_data) {
8f3d7496
JA
2653 ret = io_import_iovec(rw, req, &iovec, &iter, false);
2654 if (ret < 0)
2655 goto end_req;
2656 ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
2657 if (!ret)
2658 return true;
2659 kfree(iovec);
2660 } else {
b63534c4 2661 return true;
8f3d7496 2662 }
b63534c4 2663end_req:
b63534c4 2664 req_set_fail_links(req);
b63534c4
JA
2665 return false;
2666}
b63534c4
JA
2667#endif
2668
2669static bool io_rw_reissue(struct io_kiocb *req, long res)
2670{
2671#ifdef CONFIG_BLOCK
355afaeb 2672 umode_t mode = file_inode(req->file)->i_mode;
b63534c4
JA
2673 int ret;
2674
355afaeb
JA
2675 if (!S_ISBLK(mode) && !S_ISREG(mode))
2676 return false;
b63534c4
JA
2677 if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
2678 return false;
2679
28cea78a 2680 ret = io_sq_thread_acquire_mm_files(req->ctx, req);
6d816e08 2681
fdee946d
JA
2682 if (io_resubmit_prep(req, ret)) {
2683 refcount_inc(&req->refs);
2684 io_queue_async_work(req);
b63534c4 2685 return true;
fdee946d
JA
2686 }
2687
b63534c4
JA
2688#endif
2689 return false;
2690}
2691
a1d7c393
JA
2692static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2693 struct io_comp_state *cs)
2694{
2695 if (!io_rw_reissue(req, res))
2696 io_complete_rw_common(&req->rw.kiocb, res, cs);
ba816ad6
JA
2697}
2698
2699static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2700{
9adbd45d 2701 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6 2702
a1d7c393 2703 __io_complete_rw(req, res, res2, NULL);
2b188cc1
JA
2704}
2705
def596e9
JA
2706static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2707{
9adbd45d 2708 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 2709
491381ce
JA
2710 if (kiocb->ki_flags & IOCB_WRITE)
2711 kiocb_end_write(req);
def596e9 2712
2d7d6792 2713 if (res != -EAGAIN && res != req->result)
4e88d6e7 2714 req_set_fail_links(req);
bbde017a
XW
2715
2716 WRITE_ONCE(req->result, res);
2717 /* order with io_poll_complete() checking ->result */
cd664b0e
PB
2718 smp_wmb();
2719 WRITE_ONCE(req->iopoll_completed, 1);
def596e9
JA
2720}
2721
2722/*
2723 * After the iocb has been issued, it's safe to be found on the poll list.
2724 * Adding the kiocb to the list AFTER submission ensures that we don't
2725 * find it from a io_iopoll_getevents() thread before the issuer is done
2726 * accessing the kiocb cookie.
2727 */
2728static void io_iopoll_req_issued(struct io_kiocb *req)
2729{
2730 struct io_ring_ctx *ctx = req->ctx;
2731
2732 /*
2733 * Track whether we have multiple files in our lists. This will impact
2734 * how we do polling eventually, not spinning if we're on potentially
2735 * different devices.
2736 */
540e32a0 2737 if (list_empty(&ctx->iopoll_list)) {
def596e9
JA
2738 ctx->poll_multi_file = false;
2739 } else if (!ctx->poll_multi_file) {
2740 struct io_kiocb *list_req;
2741
540e32a0 2742 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
d21ffe7e 2743 inflight_entry);
9adbd45d 2744 if (list_req->file != req->file)
def596e9
JA
2745 ctx->poll_multi_file = true;
2746 }
2747
2748 /*
2749 * For fast devices, IO may have already completed. If it has, add
2750 * it to the front so we find it first.
2751 */
65a6543d 2752 if (READ_ONCE(req->iopoll_completed))
d21ffe7e 2753 list_add(&req->inflight_entry, &ctx->iopoll_list);
def596e9 2754 else
d21ffe7e 2755 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
bdcd3eab
XW
2756
2757 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
534ca6d6
JA
2758 wq_has_sleeper(&ctx->sq_data->wait))
2759 wake_up(&ctx->sq_data->wait);
def596e9
JA
2760}
2761
9f13c35b 2762static void __io_state_file_put(struct io_submit_state *state)
9a56a232 2763{
06ef3608
PB
2764 if (state->has_refs)
2765 fput_many(state->file, state->has_refs);
9f13c35b
PB
2766 state->file = NULL;
2767}
2768
2769static inline void io_state_file_put(struct io_submit_state *state)
2770{
2771 if (state->file)
2772 __io_state_file_put(state);
9a56a232
JA
2773}
2774
2775/*
2776 * Get as many references to a file as we have IOs left in this submission,
2777 * assuming most submissions are for one file, or at least that each file
2778 * has more than one submission.
2779 */
8da11c19 2780static struct file *__io_file_get(struct io_submit_state *state, int fd)
9a56a232
JA
2781{
2782 if (!state)
2783 return fget(fd);
2784
2785 if (state->file) {
2786 if (state->fd == fd) {
06ef3608 2787 state->has_refs--;
9a56a232
JA
2788 return state->file;
2789 }
9f13c35b 2790 __io_state_file_put(state);
9a56a232
JA
2791 }
2792 state->file = fget_many(fd, state->ios_left);
2793 if (!state->file)
2794 return NULL;
2795
2796 state->fd = fd;
71b547c0 2797 state->has_refs = state->ios_left - 1;
9a56a232
JA
2798 return state->file;
2799}
2800
4503b767
JA
2801static bool io_bdev_nowait(struct block_device *bdev)
2802{
2803#ifdef CONFIG_BLOCK
9ba0d0c8 2804 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
4503b767
JA
2805#else
2806 return true;
2807#endif
2808}
2809
2b188cc1
JA
2810/*
2811 * If we tracked the file through the SCM inflight mechanism, we could support
2812 * any file. For now, just ensure that anything potentially problematic is done
2813 * inline.
2814 */
af197f50 2815static bool io_file_supports_async(struct file *file, int rw)
2b188cc1
JA
2816{
2817 umode_t mode = file_inode(file)->i_mode;
2818
4503b767
JA
2819 if (S_ISBLK(mode)) {
2820 if (io_bdev_nowait(file->f_inode->i_bdev))
2821 return true;
2822 return false;
2823 }
2824 if (S_ISCHR(mode) || S_ISSOCK(mode))
2b188cc1 2825 return true;
4503b767
JA
2826 if (S_ISREG(mode)) {
2827 if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
2828 file->f_op != &io_uring_fops)
2829 return true;
2830 return false;
2831 }
2b188cc1 2832
c5b85625
JA
2833 /* any ->read/write should understand O_NONBLOCK */
2834 if (file->f_flags & O_NONBLOCK)
2835 return true;
2836
af197f50
JA
2837 if (!(file->f_mode & FMODE_NOWAIT))
2838 return false;
2839
2840 if (rw == READ)
2841 return file->f_op->read_iter != NULL;
2842
2843 return file->f_op->write_iter != NULL;
2b188cc1
JA
2844}
2845
a88fc400 2846static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2b188cc1 2847{
def596e9 2848 struct io_ring_ctx *ctx = req->ctx;
9adbd45d 2849 struct kiocb *kiocb = &req->rw.kiocb;
09bb8394
JA
2850 unsigned ioprio;
2851 int ret;
2b188cc1 2852
491381ce
JA
2853 if (S_ISREG(file_inode(req->file)->i_mode))
2854 req->flags |= REQ_F_ISREG;
2855
2b188cc1 2856 kiocb->ki_pos = READ_ONCE(sqe->off);
ba04291e
JA
2857 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
2858 req->flags |= REQ_F_CUR_POS;
2859 kiocb->ki_pos = req->file->f_pos;
2860 }
2b188cc1 2861 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
3e577dcd
PB
2862 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2863 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2864 if (unlikely(ret))
2865 return ret;
2b188cc1
JA
2866
2867 ioprio = READ_ONCE(sqe->ioprio);
2868 if (ioprio) {
2869 ret = ioprio_check_cap(ioprio);
2870 if (ret)
09bb8394 2871 return ret;
2b188cc1
JA
2872
2873 kiocb->ki_ioprio = ioprio;
2874 } else
2875 kiocb->ki_ioprio = get_current_ioprio();
2876
8449eeda 2877 /* don't allow async punt if RWF_NOWAIT was requested */
c5b85625 2878 if (kiocb->ki_flags & IOCB_NOWAIT)
8449eeda
SB
2879 req->flags |= REQ_F_NOWAIT;
2880
def596e9 2881 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
2882 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2883 !kiocb->ki_filp->f_op->iopoll)
09bb8394 2884 return -EOPNOTSUPP;
2b188cc1 2885
def596e9
JA
2886 kiocb->ki_flags |= IOCB_HIPRI;
2887 kiocb->ki_complete = io_complete_rw_iopoll;
65a6543d 2888 req->iopoll_completed = 0;
def596e9 2889 } else {
09bb8394
JA
2890 if (kiocb->ki_flags & IOCB_HIPRI)
2891 return -EINVAL;
def596e9
JA
2892 kiocb->ki_complete = io_complete_rw;
2893 }
9adbd45d 2894
3529d8c2
JA
2895 req->rw.addr = READ_ONCE(sqe->addr);
2896 req->rw.len = READ_ONCE(sqe->len);
4f4eeba8 2897 req->buf_index = READ_ONCE(sqe->buf_index);
2b188cc1 2898 return 0;
2b188cc1
JA
2899}
2900
2901static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2902{
2903 switch (ret) {
2904 case -EIOCBQUEUED:
2905 break;
2906 case -ERESTARTSYS:
2907 case -ERESTARTNOINTR:
2908 case -ERESTARTNOHAND:
2909 case -ERESTART_RESTARTBLOCK:
2910 /*
2911 * We can't just restart the syscall, since previously
2912 * submitted sqes may already be in progress. Just fail this
2913 * IO with EINTR.
2914 */
2915 ret = -EINTR;
df561f66 2916 fallthrough;
2b188cc1
JA
2917 default:
2918 kiocb->ki_complete(kiocb, ret, 0);
2919 }
2920}
2921
a1d7c393
JA
2922static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
2923 struct io_comp_state *cs)
ba816ad6 2924{
ba04291e 2925 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
e8c2bc1f 2926 struct io_async_rw *io = req->async_data;
ba04291e 2927
227c0c96 2928 /* add previously done IO, if any */
e8c2bc1f 2929 if (io && io->bytes_done > 0) {
227c0c96 2930 if (ret < 0)
e8c2bc1f 2931 ret = io->bytes_done;
227c0c96 2932 else
e8c2bc1f 2933 ret += io->bytes_done;
227c0c96
JA
2934 }
2935
ba04291e
JA
2936 if (req->flags & REQ_F_CUR_POS)
2937 req->file->f_pos = kiocb->ki_pos;
bcaec089 2938 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
a1d7c393 2939 __io_complete_rw(req, ret, 0, cs);
ba816ad6
JA
2940 else
2941 io_rw_done(kiocb, ret);
2942}
2943
9adbd45d 2944static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
7d009165 2945 struct iov_iter *iter)
edafccee 2946{
9adbd45d
JA
2947 struct io_ring_ctx *ctx = req->ctx;
2948 size_t len = req->rw.len;
edafccee 2949 struct io_mapped_ubuf *imu;
4be1c615 2950 u16 index, buf_index = req->buf_index;
edafccee
JA
2951 size_t offset;
2952 u64 buf_addr;
2953
edafccee
JA
2954 if (unlikely(buf_index >= ctx->nr_user_bufs))
2955 return -EFAULT;
edafccee
JA
2956 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2957 imu = &ctx->user_bufs[index];
9adbd45d 2958 buf_addr = req->rw.addr;
edafccee
JA
2959
2960 /* overflow */
2961 if (buf_addr + len < buf_addr)
2962 return -EFAULT;
2963 /* not inside the mapped region */
2964 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2965 return -EFAULT;
2966
2967 /*
2968 * May not be a start of buffer, set size appropriately
2969 * and advance us to the beginning.
2970 */
2971 offset = buf_addr - imu->ubuf;
2972 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
2973
2974 if (offset) {
2975 /*
2976 * Don't use iov_iter_advance() here, as it's really slow for
2977 * using the latter parts of a big fixed buffer - it iterates
2978 * over each segment manually. We can cheat a bit here, because
2979 * we know that:
2980 *
2981 * 1) it's a BVEC iter, we set it up
2982 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2983 * first and last bvec
2984 *
2985 * So just find our index, and adjust the iterator afterwards.
2986 * If the offset is within the first bvec (or the whole first
2987 * bvec, just use iov_iter_advance(). This makes it easier
2988 * since we can just skip the first segment, which may not
2989 * be PAGE_SIZE aligned.
2990 */
2991 const struct bio_vec *bvec = imu->bvec;
2992
2993 if (offset <= bvec->bv_len) {
2994 iov_iter_advance(iter, offset);
2995 } else {
2996 unsigned long seg_skip;
2997
2998 /* skip first vec */
2999 offset -= bvec->bv_len;
3000 seg_skip = 1 + (offset >> PAGE_SHIFT);
3001
3002 iter->bvec = bvec + seg_skip;
3003 iter->nr_segs -= seg_skip;
99c79f66 3004 iter->count -= bvec->bv_len + offset;
bd11b3a3 3005 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
3006 }
3007 }
3008
5e559561 3009 return len;
edafccee
JA
3010}
3011
bcda7baa
JA
3012static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
3013{
3014 if (needs_lock)
3015 mutex_unlock(&ctx->uring_lock);
3016}
3017
3018static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
3019{
3020 /*
3021 * "Normal" inline submissions always hold the uring_lock, since we
3022 * grab it from the system call. Same is true for the SQPOLL offload.
3023 * The only exception is when we've detached the request and issue it
3024 * from an async worker thread, grab the lock for that case.
3025 */
3026 if (needs_lock)
3027 mutex_lock(&ctx->uring_lock);
3028}
3029
3030static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
3031 int bgid, struct io_buffer *kbuf,
3032 bool needs_lock)
3033{
3034 struct io_buffer *head;
3035
3036 if (req->flags & REQ_F_BUFFER_SELECTED)
3037 return kbuf;
3038
3039 io_ring_submit_lock(req->ctx, needs_lock);
3040
3041 lockdep_assert_held(&req->ctx->uring_lock);
3042
3043 head = idr_find(&req->ctx->io_buffer_idr, bgid);
3044 if (head) {
3045 if (!list_empty(&head->list)) {
3046 kbuf = list_last_entry(&head->list, struct io_buffer,
3047 list);
3048 list_del(&kbuf->list);
3049 } else {
3050 kbuf = head;
3051 idr_remove(&req->ctx->io_buffer_idr, bgid);
3052 }
3053 if (*len > kbuf->len)
3054 *len = kbuf->len;
3055 } else {
3056 kbuf = ERR_PTR(-ENOBUFS);
3057 }
3058
3059 io_ring_submit_unlock(req->ctx, needs_lock);
3060
3061 return kbuf;
3062}
3063
4d954c25
JA
3064static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
3065 bool needs_lock)
3066{
3067 struct io_buffer *kbuf;
4f4eeba8 3068 u16 bgid;
4d954c25
JA
3069
3070 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
4f4eeba8 3071 bgid = req->buf_index;
4d954c25
JA
3072 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
3073 if (IS_ERR(kbuf))
3074 return kbuf;
3075 req->rw.addr = (u64) (unsigned long) kbuf;
3076 req->flags |= REQ_F_BUFFER_SELECTED;
3077 return u64_to_user_ptr(kbuf->addr);
3078}
3079
3080#ifdef CONFIG_COMPAT
3081static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
3082 bool needs_lock)
3083{
3084 struct compat_iovec __user *uiov;
3085 compat_ssize_t clen;
3086 void __user *buf;
3087 ssize_t len;
3088
3089 uiov = u64_to_user_ptr(req->rw.addr);
3090 if (!access_ok(uiov, sizeof(*uiov)))
3091 return -EFAULT;
3092 if (__get_user(clen, &uiov->iov_len))
3093 return -EFAULT;
3094 if (clen < 0)
3095 return -EINVAL;
3096
3097 len = clen;
3098 buf = io_rw_buffer_select(req, &len, needs_lock);
3099 if (IS_ERR(buf))
3100 return PTR_ERR(buf);
3101 iov[0].iov_base = buf;
3102 iov[0].iov_len = (compat_size_t) len;
3103 return 0;
3104}
3105#endif
3106
3107static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3108 bool needs_lock)
3109{
3110 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3111 void __user *buf;
3112 ssize_t len;
3113
3114 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3115 return -EFAULT;
3116
3117 len = iov[0].iov_len;
3118 if (len < 0)
3119 return -EINVAL;
3120 buf = io_rw_buffer_select(req, &len, needs_lock);
3121 if (IS_ERR(buf))
3122 return PTR_ERR(buf);
3123 iov[0].iov_base = buf;
3124 iov[0].iov_len = len;
3125 return 0;
3126}
3127
3128static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3129 bool needs_lock)
3130{
dddb3e26
JA
3131 if (req->flags & REQ_F_BUFFER_SELECTED) {
3132 struct io_buffer *kbuf;
3133
3134 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3135 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3136 iov[0].iov_len = kbuf->len;
4d954c25 3137 return 0;
dddb3e26 3138 }
4d954c25
JA
3139 if (!req->rw.len)
3140 return 0;
3141 else if (req->rw.len > 1)
3142 return -EINVAL;
3143
3144#ifdef CONFIG_COMPAT
3145 if (req->ctx->compat)
3146 return io_compat_import(req, iov, needs_lock);
3147#endif
3148
3149 return __io_iov_buffer_select(req, iov, needs_lock);
3150}
3151
8452fd0c
JA
3152static ssize_t __io_import_iovec(int rw, struct io_kiocb *req,
3153 struct iovec **iovec, struct iov_iter *iter,
3154 bool needs_lock)
2b188cc1 3155{
9adbd45d
JA
3156 void __user *buf = u64_to_user_ptr(req->rw.addr);
3157 size_t sqe_len = req->rw.len;
4d954c25 3158 ssize_t ret;
edafccee
JA
3159 u8 opcode;
3160
d625c6ee 3161 opcode = req->opcode;
7d009165 3162 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
edafccee 3163 *iovec = NULL;
9adbd45d 3164 return io_import_fixed(req, rw, iter);
edafccee 3165 }
2b188cc1 3166
bcda7baa 3167 /* buffer index only valid with fixed read/write, or buffer select */
4f4eeba8 3168 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
9adbd45d
JA
3169 return -EINVAL;
3170
3a6820f2 3171 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
bcda7baa 3172 if (req->flags & REQ_F_BUFFER_SELECT) {
4d954c25 3173 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
867a23ea 3174 if (IS_ERR(buf))
4d954c25 3175 return PTR_ERR(buf);
3f9d6441 3176 req->rw.len = sqe_len;
bcda7baa
JA
3177 }
3178
3a6820f2
JA
3179 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3180 *iovec = NULL;
3a901598 3181 return ret < 0 ? ret : sqe_len;
3a6820f2
JA
3182 }
3183
4d954c25
JA
3184 if (req->flags & REQ_F_BUFFER_SELECT) {
3185 ret = io_iov_buffer_select(req, *iovec, needs_lock);
3f9d6441
JA
3186 if (!ret) {
3187 ret = (*iovec)->iov_len;
3188 iov_iter_init(iter, rw, *iovec, 1, ret);
3189 }
4d954c25
JA
3190 *iovec = NULL;
3191 return ret;
3192 }
3193
89cd35c5
CH
3194 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3195 req->ctx->compat);
2b188cc1
JA
3196}
3197
8452fd0c
JA
3198static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
3199 struct iovec **iovec, struct iov_iter *iter,
3200 bool needs_lock)
3201{
e8c2bc1f
JA
3202 struct io_async_rw *iorw = req->async_data;
3203
3204 if (!iorw)
8452fd0c
JA
3205 return __io_import_iovec(rw, req, iovec, iter, needs_lock);
3206 *iovec = NULL;
e8c2bc1f 3207 return iov_iter_count(&iorw->iter);
8452fd0c
JA
3208}
3209
0fef9483
JA
3210static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3211{
5b09e37e 3212 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
0fef9483
JA
3213}
3214
31b51510 3215/*
32960613
JA
3216 * For files that don't have ->read_iter() and ->write_iter(), handle them
3217 * by looping over ->read() or ->write() manually.
31b51510 3218 */
4017eb91 3219static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
32960613 3220{
4017eb91
JA
3221 struct kiocb *kiocb = &req->rw.kiocb;
3222 struct file *file = req->file;
32960613
JA
3223 ssize_t ret = 0;
3224
3225 /*
3226 * Don't support polled IO through this interface, and we can't
3227 * support non-blocking either. For the latter, this just causes
3228 * the kiocb to be handled from an async context.
3229 */
3230 if (kiocb->ki_flags & IOCB_HIPRI)
3231 return -EOPNOTSUPP;
3232 if (kiocb->ki_flags & IOCB_NOWAIT)
3233 return -EAGAIN;
3234
3235 while (iov_iter_count(iter)) {
311ae9e1 3236 struct iovec iovec;
32960613
JA
3237 ssize_t nr;
3238
311ae9e1
PB
3239 if (!iov_iter_is_bvec(iter)) {
3240 iovec = iov_iter_iovec(iter);
3241 } else {
4017eb91
JA
3242 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3243 iovec.iov_len = req->rw.len;
311ae9e1
PB
3244 }
3245
32960613
JA
3246 if (rw == READ) {
3247 nr = file->f_op->read(file, iovec.iov_base,
0fef9483 3248 iovec.iov_len, io_kiocb_ppos(kiocb));
32960613
JA
3249 } else {
3250 nr = file->f_op->write(file, iovec.iov_base,
0fef9483 3251 iovec.iov_len, io_kiocb_ppos(kiocb));
32960613
JA
3252 }
3253
3254 if (nr < 0) {
3255 if (!ret)
3256 ret = nr;
3257 break;
3258 }
3259 ret += nr;
3260 if (nr != iovec.iov_len)
3261 break;
4017eb91
JA
3262 req->rw.len -= nr;
3263 req->rw.addr += nr;
32960613
JA
3264 iov_iter_advance(iter, nr);
3265 }
3266
3267 return ret;
3268}
3269
ff6165b2
JA
3270static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3271 const struct iovec *fast_iov, struct iov_iter *iter)
f67676d1 3272{
e8c2bc1f 3273 struct io_async_rw *rw = req->async_data;
b64e3444 3274
ff6165b2 3275 memcpy(&rw->iter, iter, sizeof(*iter));
afb87658 3276 rw->free_iovec = iovec;
227c0c96 3277 rw->bytes_done = 0;
ff6165b2
JA
3278 /* can only be fixed buffers, no need to do anything */
3279 if (iter->type == ITER_BVEC)
3280 return;
b64e3444 3281 if (!iovec) {
ff6165b2
JA
3282 unsigned iov_off = 0;
3283
3284 rw->iter.iov = rw->fast_iov;
3285 if (iter->iov != fast_iov) {
3286 iov_off = iter->iov - fast_iov;
3287 rw->iter.iov += iov_off;
3288 }
3289 if (rw->fast_iov != fast_iov)
3290 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
45097dae 3291 sizeof(struct iovec) * iter->nr_segs);
99bc4c38
PB
3292 } else {
3293 req->flags |= REQ_F_NEED_CLEANUP;
f67676d1
JA
3294 }
3295}
3296
e8c2bc1f 3297static inline int __io_alloc_async_data(struct io_kiocb *req)
3d9932a8 3298{
e8c2bc1f
JA
3299 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3300 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3301 return req->async_data == NULL;
3d9932a8
XW
3302}
3303
e8c2bc1f 3304static int io_alloc_async_data(struct io_kiocb *req)
f67676d1 3305{
e8c2bc1f 3306 if (!io_op_defs[req->opcode].needs_async_data)
d3656344 3307 return 0;
3d9932a8 3308
e8c2bc1f 3309 return __io_alloc_async_data(req);
b7bb4f7d
JA
3310}
3311
ff6165b2
JA
3312static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3313 const struct iovec *fast_iov,
227c0c96 3314 struct iov_iter *iter, bool force)
b7bb4f7d 3315{
e8c2bc1f 3316 if (!force && !io_op_defs[req->opcode].needs_async_data)
74566df3 3317 return 0;
e8c2bc1f
JA
3318 if (!req->async_data) {
3319 if (__io_alloc_async_data(req))
5d204bcf 3320 return -ENOMEM;
b7bb4f7d 3321
ff6165b2 3322 io_req_map_rw(req, iovec, fast_iov, iter);
5d204bcf 3323 }
b7bb4f7d 3324 return 0;
f67676d1
JA
3325}
3326
73debe68 3327static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
c3e330a4 3328{
e8c2bc1f 3329 struct io_async_rw *iorw = req->async_data;
f4bff104 3330 struct iovec *iov = iorw->fast_iov;
c3e330a4
PB
3331 ssize_t ret;
3332
73debe68 3333 ret = __io_import_iovec(rw, req, &iov, &iorw->iter, false);
c3e330a4
PB
3334 if (unlikely(ret < 0))
3335 return ret;
3336
ab0b196c
PB
3337 iorw->bytes_done = 0;
3338 iorw->free_iovec = iov;
3339 if (iov)
3340 req->flags |= REQ_F_NEED_CLEANUP;
c3e330a4
PB
3341 return 0;
3342}
3343
73debe68 3344static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1
JA
3345{
3346 ssize_t ret;
3347
a88fc400 3348 ret = io_prep_rw(req, sqe);
3529d8c2
JA
3349 if (ret)
3350 return ret;
f67676d1 3351
3529d8c2
JA
3352 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3353 return -EBADF;
f67676d1 3354
5f798bea 3355 /* either don't need iovec imported or already have it */
2d199895 3356 if (!req->async_data)
3529d8c2 3357 return 0;
73debe68 3358 return io_rw_prep_async(req, READ);
f67676d1
JA
3359}
3360
c1dd91d1
JA
3361/*
3362 * This is our waitqueue callback handler, registered through lock_page_async()
3363 * when we initially tried to do the IO with the iocb armed our waitqueue.
3364 * This gets called when the page is unlocked, and we generally expect that to
3365 * happen when the page IO is completed and the page is now uptodate. This will
3366 * queue a task_work based retry of the operation, attempting to copy the data
3367 * again. If the latter fails because the page was NOT uptodate, then we will
3368 * do a thread based blocking retry of the operation. That's the unexpected
3369 * slow path.
3370 */
bcf5a063
JA
3371static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3372 int sync, void *arg)
3373{
3374 struct wait_page_queue *wpq;
3375 struct io_kiocb *req = wait->private;
bcf5a063 3376 struct wait_page_key *key = arg;
bcf5a063
JA
3377 int ret;
3378
3379 wpq = container_of(wait, struct wait_page_queue, wait);
3380
cdc8fcb4
LT
3381 if (!wake_page_match(wpq, key))
3382 return 0;
3383
c8d317aa 3384 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
bcf5a063
JA
3385 list_del_init(&wait->entry);
3386
e7375122 3387 init_task_work(&req->task_work, io_req_task_submit);
6d816e08
JA
3388 percpu_ref_get(&req->ctx->refs);
3389
bcf5a063
JA
3390 /* submit ref gets dropped, acquire a new one */
3391 refcount_inc(&req->refs);
87c4311f 3392 ret = io_req_task_work_add(req, true);
bcf5a063 3393 if (unlikely(ret)) {
c2c4c83c
JA
3394 struct task_struct *tsk;
3395
bcf5a063 3396 /* queue just for cancelation */
e7375122 3397 init_task_work(&req->task_work, io_req_task_cancel);
bcf5a063 3398 tsk = io_wq_get_task(req->ctx->io_wq);
91989c70 3399 task_work_add(tsk, &req->task_work, TWA_NONE);
c2c4c83c 3400 wake_up_process(tsk);
bcf5a063 3401 }
bcf5a063
JA
3402 return 1;
3403}
3404
c1dd91d1
JA
3405/*
3406 * This controls whether a given IO request should be armed for async page
3407 * based retry. If we return false here, the request is handed to the async
3408 * worker threads for retry. If we're doing buffered reads on a regular file,
3409 * we prepare a private wait_page_queue entry and retry the operation. This
3410 * will either succeed because the page is now uptodate and unlocked, or it
3411 * will register a callback when the page is unlocked at IO completion. Through
3412 * that callback, io_uring uses task_work to setup a retry of the operation.
3413 * That retry will attempt the buffered read again. The retry will generally
3414 * succeed, or in rare cases where it fails, we then fall back to using the
3415 * async worker threads for a blocking retry.
3416 */
227c0c96 3417static bool io_rw_should_retry(struct io_kiocb *req)
f67676d1 3418{
e8c2bc1f
JA
3419 struct io_async_rw *rw = req->async_data;
3420 struct wait_page_queue *wait = &rw->wpq;
bcf5a063 3421 struct kiocb *kiocb = &req->rw.kiocb;
f67676d1 3422
bcf5a063
JA
3423 /* never retry for NOWAIT, we just complete with -EAGAIN */
3424 if (req->flags & REQ_F_NOWAIT)
3425 return false;
f67676d1 3426
227c0c96 3427 /* Only for buffered IO */
3b2a4439 3428 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
bcf5a063 3429 return false;
3b2a4439 3430
bcf5a063
JA
3431 /*
3432 * just use poll if we can, and don't attempt if the fs doesn't
3433 * support callback based unlocks
3434 */
3435 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3436 return false;
f67676d1 3437
3b2a4439
JA
3438 wait->wait.func = io_async_buf_func;
3439 wait->wait.private = req;
3440 wait->wait.flags = 0;
3441 INIT_LIST_HEAD(&wait->wait.entry);
3442 kiocb->ki_flags |= IOCB_WAITQ;
c8d317aa 3443 kiocb->ki_flags &= ~IOCB_NOWAIT;
3b2a4439 3444 kiocb->ki_waitq = wait;
3b2a4439 3445 return true;
bcf5a063
JA
3446}
3447
3448static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3449{
3450 if (req->file->f_op->read_iter)
3451 return call_read_iter(req->file, &req->rw.kiocb, iter);
2dd2111d 3452 else if (req->file->f_op->read)
4017eb91 3453 return loop_rw_iter(READ, req, iter);
2dd2111d
GH
3454 else
3455 return -EINVAL;
f67676d1
JA
3456}
3457
a1d7c393
JA
3458static int io_read(struct io_kiocb *req, bool force_nonblock,
3459 struct io_comp_state *cs)
2b188cc1
JA
3460{
3461 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 3462 struct kiocb *kiocb = &req->rw.kiocb;
ff6165b2 3463 struct iov_iter __iter, *iter = &__iter;
e8c2bc1f 3464 struct io_async_rw *rw = req->async_data;
227c0c96 3465 ssize_t io_size, ret, ret2;
31b51510 3466 size_t iov_count;
f5cac8b1 3467 bool no_async;
ff6165b2 3468
e8c2bc1f
JA
3469 if (rw)
3470 iter = &rw->iter;
2b188cc1 3471
ff6165b2 3472 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
06b76d44
JA
3473 if (ret < 0)
3474 return ret;
eefdf30f 3475 iov_count = iov_iter_count(iter);
fa15bafb
PB
3476 io_size = ret;
3477 req->result = io_size;
227c0c96 3478 ret = 0;
2b188cc1 3479
fd6c2e4c
JA
3480 /* Ensure we clear previously set non-block flag */
3481 if (!force_nonblock)
29de5f6a 3482 kiocb->ki_flags &= ~IOCB_NOWAIT;
a88fc400
PB
3483 else
3484 kiocb->ki_flags |= IOCB_NOWAIT;
3485
fd6c2e4c 3486
24c74678 3487 /* If the file doesn't support async, just async punt */
f5cac8b1
JA
3488 no_async = force_nonblock && !io_file_supports_async(req->file, READ);
3489 if (no_async)
f67676d1 3490 goto copy_iov;
9e645e11 3491
0fef9483 3492 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count);
fa15bafb
PB
3493 if (unlikely(ret))
3494 goto out_free;
2b188cc1 3495
227c0c96 3496 ret = io_iter_do_read(req, iter);
32960613 3497
227c0c96
JA
3498 if (!ret) {
3499 goto done;
3500 } else if (ret == -EIOCBQUEUED) {
3501 ret = 0;
3502 goto out_free;
3503 } else if (ret == -EAGAIN) {
eefdf30f
JA
3504 /* IOPOLL retry should happen for io-wq threads */
3505 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
f91daf56 3506 goto done;
355afaeb
JA
3507 /* no retry on NONBLOCK marked file */
3508 if (req->file->f_flags & O_NONBLOCK)
3509 goto done;
84216315
JA
3510 /* some cases will consume bytes even on error returns */
3511 iov_iter_revert(iter, iov_count - iov_iter_count(iter));
f38c7e3a
JA
3512 ret = 0;
3513 goto copy_iov;
227c0c96 3514 } else if (ret < 0) {
00d23d51
JA
3515 /* make sure -ERESTARTSYS -> -EINTR is done */
3516 goto done;
227c0c96
JA
3517 }
3518
3519 /* read it all, or we did blocking attempt. no retry. */
f91daf56
JA
3520 if (!iov_iter_count(iter) || !force_nonblock ||
3521 (req->file->f_flags & O_NONBLOCK))
227c0c96
JA
3522 goto done;
3523
3524 io_size -= ret;
3525copy_iov:
3526 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3527 if (ret2) {
3528 ret = ret2;
3529 goto out_free;
3530 }
f5cac8b1
JA
3531 if (no_async)
3532 return -EAGAIN;
e8c2bc1f 3533 rw = req->async_data;
227c0c96
JA
3534 /* it's copied and will be cleaned with ->io */
3535 iovec = NULL;
3536 /* now use our persistent iterator, if we aren't already */
e8c2bc1f 3537 iter = &rw->iter;
227c0c96 3538retry:
e8c2bc1f 3539 rw->bytes_done += ret;
227c0c96
JA
3540 /* if we can retry, do so with the callbacks armed */
3541 if (!io_rw_should_retry(req)) {
fa15bafb
PB
3542 kiocb->ki_flags &= ~IOCB_WAITQ;
3543 return -EAGAIN;
2b188cc1 3544 }
227c0c96
JA
3545
3546 /*
3547 * Now retry read with the IOCB_WAITQ parts set in the iocb. If we
3548 * get -EIOCBQUEUED, then we'll get a notification when the desired
3549 * page gets unlocked. We can also get a partial read here, and if we
3550 * do, then just retry at the new offset.
3551 */
3552 ret = io_iter_do_read(req, iter);
3553 if (ret == -EIOCBQUEUED) {
3554 ret = 0;
3555 goto out_free;
3556 } else if (ret > 0 && ret < io_size) {
3557 /* we got some bytes, but not all. retry. */
3558 goto retry;
3559 }
3560done:
3561 kiocb_done(kiocb, ret, cs);
3562 ret = 0;
f67676d1 3563out_free:
f261c168 3564 /* it's reportedly faster than delegating the null check to kfree() */
252917c3 3565 if (iovec)
6f2cc166 3566 kfree(iovec);
2b188cc1
JA
3567 return ret;
3568}
3569
73debe68 3570static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1
JA
3571{
3572 ssize_t ret;
3573
a88fc400 3574 ret = io_prep_rw(req, sqe);
3529d8c2
JA
3575 if (ret)
3576 return ret;
f67676d1 3577
3529d8c2
JA
3578 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3579 return -EBADF;
f67676d1 3580
5f798bea 3581 /* either don't need iovec imported or already have it */
2d199895 3582 if (!req->async_data)
3529d8c2 3583 return 0;
73debe68 3584 return io_rw_prep_async(req, WRITE);
f67676d1
JA
3585}
3586
a1d7c393
JA
3587static int io_write(struct io_kiocb *req, bool force_nonblock,
3588 struct io_comp_state *cs)
2b188cc1
JA
3589{
3590 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 3591 struct kiocb *kiocb = &req->rw.kiocb;
ff6165b2 3592 struct iov_iter __iter, *iter = &__iter;
e8c2bc1f 3593 struct io_async_rw *rw = req->async_data;
31b51510 3594 size_t iov_count;
fa15bafb 3595 ssize_t ret, ret2, io_size;
2b188cc1 3596
e8c2bc1f
JA
3597 if (rw)
3598 iter = &rw->iter;
ff6165b2
JA
3599
3600 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
06b76d44
JA
3601 if (ret < 0)
3602 return ret;
eefdf30f 3603 iov_count = iov_iter_count(iter);
fa15bafb
PB
3604 io_size = ret;
3605 req->result = io_size;
2b188cc1 3606
fd6c2e4c
JA
3607 /* Ensure we clear previously set non-block flag */
3608 if (!force_nonblock)
a88fc400
PB
3609 kiocb->ki_flags &= ~IOCB_NOWAIT;
3610 else
3611 kiocb->ki_flags |= IOCB_NOWAIT;
fd6c2e4c 3612
24c74678 3613 /* If the file doesn't support async, just async punt */
af197f50 3614 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
f67676d1 3615 goto copy_iov;
31b51510 3616
10d59345
JA
3617 /* file path doesn't support NOWAIT for non-direct_IO */
3618 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3619 (req->flags & REQ_F_ISREG))
f67676d1 3620 goto copy_iov;
31b51510 3621
0fef9483 3622 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count);
fa15bafb
PB
3623 if (unlikely(ret))
3624 goto out_free;
4ed734b0 3625
fa15bafb
PB
3626 /*
3627 * Open-code file_start_write here to grab freeze protection,
3628 * which will be released by another thread in
3629 * io_complete_rw(). Fool lockdep by telling it the lock got
3630 * released so that it doesn't complain about the held lock when
3631 * we return to userspace.
3632 */
3633 if (req->flags & REQ_F_ISREG) {
8a3c84b6 3634 sb_start_write(file_inode(req->file)->i_sb);
fa15bafb
PB
3635 __sb_writers_release(file_inode(req->file)->i_sb,
3636 SB_FREEZE_WRITE);
3637 }
3638 kiocb->ki_flags |= IOCB_WRITE;
4ed734b0 3639
fa15bafb 3640 if (req->file->f_op->write_iter)
ff6165b2 3641 ret2 = call_write_iter(req->file, kiocb, iter);
2dd2111d 3642 else if (req->file->f_op->write)
4017eb91 3643 ret2 = loop_rw_iter(WRITE, req, iter);
2dd2111d
GH
3644 else
3645 ret2 = -EINVAL;
4ed734b0 3646
fa15bafb
PB
3647 /*
3648 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3649 * retry them without IOCB_NOWAIT.
3650 */
3651 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3652 ret2 = -EAGAIN;
355afaeb
JA
3653 /* no retry on NONBLOCK marked file */
3654 if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK))
3655 goto done;
fa15bafb 3656 if (!force_nonblock || ret2 != -EAGAIN) {
eefdf30f
JA
3657 /* IOPOLL retry should happen for io-wq threads */
3658 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3659 goto copy_iov;
355afaeb 3660done:
fa15bafb
PB
3661 kiocb_done(kiocb, ret2, cs);
3662 } else {
f67676d1 3663copy_iov:
84216315
JA
3664 /* some cases will consume bytes even on error returns */
3665 iov_iter_revert(iter, iov_count - iov_iter_count(iter));
227c0c96 3666 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
ff6165b2
JA
3667 if (!ret)
3668 return -EAGAIN;
2b188cc1 3669 }
31b51510 3670out_free:
f261c168 3671 /* it's reportedly faster than delegating the null check to kfree() */
252917c3 3672 if (iovec)
6f2cc166 3673 kfree(iovec);
2b188cc1
JA
3674 return ret;
3675}
3676
80a261fd
JA
3677static int io_renameat_prep(struct io_kiocb *req,
3678 const struct io_uring_sqe *sqe)
3679{
3680 struct io_rename *ren = &req->rename;
3681 const char __user *oldf, *newf;
3682
3683 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3684 return -EBADF;
3685
3686 ren->old_dfd = READ_ONCE(sqe->fd);
3687 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3688 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3689 ren->new_dfd = READ_ONCE(sqe->len);
3690 ren->flags = READ_ONCE(sqe->rename_flags);
3691
3692 ren->oldpath = getname(oldf);
3693 if (IS_ERR(ren->oldpath))
3694 return PTR_ERR(ren->oldpath);
3695
3696 ren->newpath = getname(newf);
3697 if (IS_ERR(ren->newpath)) {
3698 putname(ren->oldpath);
3699 return PTR_ERR(ren->newpath);
3700 }
3701
3702 req->flags |= REQ_F_NEED_CLEANUP;
3703 return 0;
3704}
3705
3706static int io_renameat(struct io_kiocb *req, bool force_nonblock)
3707{
3708 struct io_rename *ren = &req->rename;
3709 int ret;
3710
3711 if (force_nonblock)
3712 return -EAGAIN;
3713
3714 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3715 ren->newpath, ren->flags);
3716
3717 req->flags &= ~REQ_F_NEED_CLEANUP;
3718 if (ret < 0)
3719 req_set_fail_links(req);
3720 io_req_complete(req, ret);
3721 return 0;
3722}
3723
14a1143b
JA
3724static int io_unlinkat_prep(struct io_kiocb *req,
3725 const struct io_uring_sqe *sqe)
3726{
3727 struct io_unlink *un = &req->unlink;
3728 const char __user *fname;
3729
3730 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3731 return -EBADF;
3732
3733 un->dfd = READ_ONCE(sqe->fd);
3734
3735 un->flags = READ_ONCE(sqe->unlink_flags);
3736 if (un->flags & ~AT_REMOVEDIR)
3737 return -EINVAL;
3738
3739 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3740 un->filename = getname(fname);
3741 if (IS_ERR(un->filename))
3742 return PTR_ERR(un->filename);
3743
3744 req->flags |= REQ_F_NEED_CLEANUP;
3745 return 0;
3746}
3747
3748static int io_unlinkat(struct io_kiocb *req, bool force_nonblock)
3749{
3750 struct io_unlink *un = &req->unlink;
3751 int ret;
3752
3753 if (force_nonblock)
3754 return -EAGAIN;
3755
3756 if (un->flags & AT_REMOVEDIR)
3757 ret = do_rmdir(un->dfd, un->filename);
3758 else
3759 ret = do_unlinkat(un->dfd, un->filename);
3760
3761 req->flags &= ~REQ_F_NEED_CLEANUP;
3762 if (ret < 0)
3763 req_set_fail_links(req);
3764 io_req_complete(req, ret);
3765 return 0;
3766}
3767
36f4fa68
JA
3768static int io_shutdown_prep(struct io_kiocb *req,
3769 const struct io_uring_sqe *sqe)
3770{
3771#if defined(CONFIG_NET)
3772 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3773 return -EINVAL;
3774 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3775 sqe->buf_index)
3776 return -EINVAL;
3777
3778 req->shutdown.how = READ_ONCE(sqe->len);
3779 return 0;
3780#else
3781 return -EOPNOTSUPP;
3782#endif
3783}
3784
3785static int io_shutdown(struct io_kiocb *req, bool force_nonblock)
3786{
3787#if defined(CONFIG_NET)
3788 struct socket *sock;
3789 int ret;
3790
3791 if (force_nonblock)
3792 return -EAGAIN;
3793
3794 sock = sock_from_file(req->file, &ret);
3795 if (unlikely(!sock))
3796 return ret;
3797
3798 ret = __sys_shutdown_sock(sock, req->shutdown.how);
3799 io_req_complete(req, ret);
3800 return 0;
3801#else
3802 return -EOPNOTSUPP;
3803#endif
3804}
3805
f2a8d5c7
PB
3806static int __io_splice_prep(struct io_kiocb *req,
3807 const struct io_uring_sqe *sqe)
7d67af2c
PB
3808{
3809 struct io_splice* sp = &req->splice;
3810 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
7d67af2c 3811
3232dd02
PB
3812 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3813 return -EINVAL;
7d67af2c
PB
3814
3815 sp->file_in = NULL;
7d67af2c
PB
3816 sp->len = READ_ONCE(sqe->len);
3817 sp->flags = READ_ONCE(sqe->splice_flags);
3818
3819 if (unlikely(sp->flags & ~valid_flags))
3820 return -EINVAL;
3821
8371adf5
PB
3822 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3823 (sp->flags & SPLICE_F_FD_IN_FIXED));
3824 if (!sp->file_in)
3825 return -EBADF;
7d67af2c
PB
3826 req->flags |= REQ_F_NEED_CLEANUP;
3827
7cdaf587
XW
3828 if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
3829 /*
3830 * Splice operation will be punted aync, and here need to
3831 * modify io_wq_work.flags, so initialize io_wq_work firstly.
3832 */
3833 io_req_init_async(req);
7d67af2c 3834 req->work.flags |= IO_WQ_WORK_UNBOUND;
7cdaf587 3835 }
7d67af2c
PB
3836
3837 return 0;
3838}
3839
f2a8d5c7
PB
3840static int io_tee_prep(struct io_kiocb *req,
3841 const struct io_uring_sqe *sqe)
3842{
3843 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3844 return -EINVAL;
3845 return __io_splice_prep(req, sqe);
3846}
3847
3848static int io_tee(struct io_kiocb *req, bool force_nonblock)
3849{
3850 struct io_splice *sp = &req->splice;
3851 struct file *in = sp->file_in;
3852 struct file *out = sp->file_out;
3853 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3854 long ret = 0;
3855
3856 if (force_nonblock)
3857 return -EAGAIN;
3858 if (sp->len)
3859 ret = do_tee(in, out, sp->len, flags);
3860
3861 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3862 req->flags &= ~REQ_F_NEED_CLEANUP;
3863
f2a8d5c7
PB
3864 if (ret != sp->len)
3865 req_set_fail_links(req);
e1e16097 3866 io_req_complete(req, ret);
f2a8d5c7
PB
3867 return 0;
3868}
3869
3870static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3871{
3872 struct io_splice* sp = &req->splice;
3873
3874 sp->off_in = READ_ONCE(sqe->splice_off_in);
3875 sp->off_out = READ_ONCE(sqe->off);
3876 return __io_splice_prep(req, sqe);
3877}
3878
014db007 3879static int io_splice(struct io_kiocb *req, bool force_nonblock)
7d67af2c
PB
3880{
3881 struct io_splice *sp = &req->splice;
3882 struct file *in = sp->file_in;
3883 struct file *out = sp->file_out;
3884 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3885 loff_t *poff_in, *poff_out;
c9687426 3886 long ret = 0;
7d67af2c 3887
2fb3e822
PB
3888 if (force_nonblock)
3889 return -EAGAIN;
7d67af2c
PB
3890
3891 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3892 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
c9687426 3893
948a7749 3894 if (sp->len)
c9687426 3895 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
7d67af2c
PB
3896
3897 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3898 req->flags &= ~REQ_F_NEED_CLEANUP;
3899
7d67af2c
PB
3900 if (ret != sp->len)
3901 req_set_fail_links(req);
e1e16097 3902 io_req_complete(req, ret);
7d67af2c
PB
3903 return 0;
3904}
3905
2b188cc1
JA
3906/*
3907 * IORING_OP_NOP just posts a completion event, nothing else.
3908 */
229a7b63 3909static int io_nop(struct io_kiocb *req, struct io_comp_state *cs)
2b188cc1
JA
3910{
3911 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 3912
def596e9
JA
3913 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3914 return -EINVAL;
3915
229a7b63 3916 __io_req_complete(req, 0, 0, cs);
2b188cc1
JA
3917 return 0;
3918}
3919
3529d8c2 3920static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 3921{
6b06314c 3922 struct io_ring_ctx *ctx = req->ctx;
c992fe29 3923
09bb8394
JA
3924 if (!req->file)
3925 return -EBADF;
c992fe29 3926
6b06314c 3927 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 3928 return -EINVAL;
edafccee 3929 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
3930 return -EINVAL;
3931
8ed8d3c3
JA
3932 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3933 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3934 return -EINVAL;
3935
3936 req->sync.off = READ_ONCE(sqe->off);
3937 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
3938 return 0;
3939}
3940
ac45abc0 3941static int io_fsync(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3 3942{
8ed8d3c3 3943 loff_t end = req->sync.off + req->sync.len;
8ed8d3c3
JA
3944 int ret;
3945
ac45abc0
PB
3946 /* fsync always requires a blocking context */
3947 if (force_nonblock)
3948 return -EAGAIN;
3949
9adbd45d 3950 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
3951 end > 0 ? end : LLONG_MAX,
3952 req->sync.flags & IORING_FSYNC_DATASYNC);
3953 if (ret < 0)
3954 req_set_fail_links(req);
e1e16097 3955 io_req_complete(req, ret);
c992fe29
CH
3956 return 0;
3957}
3958
d63d1b5e
JA
3959static int io_fallocate_prep(struct io_kiocb *req,
3960 const struct io_uring_sqe *sqe)
3961{
3962 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3963 return -EINVAL;
3232dd02
PB
3964 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3965 return -EINVAL;
d63d1b5e
JA
3966
3967 req->sync.off = READ_ONCE(sqe->off);
3968 req->sync.len = READ_ONCE(sqe->addr);
3969 req->sync.mode = READ_ONCE(sqe->len);
3970 return 0;
3971}
3972
014db007 3973static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
5d17b4a4 3974{
ac45abc0
PB
3975 int ret;
3976
d63d1b5e 3977 /* fallocate always requiring blocking context */
ac45abc0 3978 if (force_nonblock)
5d17b4a4 3979 return -EAGAIN;
ac45abc0
PB
3980 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3981 req->sync.len);
ac45abc0
PB
3982 if (ret < 0)
3983 req_set_fail_links(req);
e1e16097 3984 io_req_complete(req, ret);
5d17b4a4
JA
3985 return 0;
3986}
3987
ec65fea5 3988static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 3989{
f8748881 3990 const char __user *fname;
15b71abe 3991 int ret;
b7bb4f7d 3992
ec65fea5 3993 if (unlikely(sqe->ioprio || sqe->buf_index))
15b71abe 3994 return -EINVAL;
ec65fea5 3995 if (unlikely(req->flags & REQ_F_FIXED_FILE))
cf3040ca 3996 return -EBADF;
03b1230c 3997
ec65fea5
PB
3998 /* open.how should be already initialised */
3999 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
08a1d26e 4000 req->open.how.flags |= O_LARGEFILE;
3529d8c2 4001
25e72d10
PB
4002 req->open.dfd = READ_ONCE(sqe->fd);
4003 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
f8748881 4004 req->open.filename = getname(fname);
15b71abe
JA
4005 if (IS_ERR(req->open.filename)) {
4006 ret = PTR_ERR(req->open.filename);
4007 req->open.filename = NULL;
4008 return ret;
4009 }
4022e7af 4010 req->open.nofile = rlimit(RLIMIT_NOFILE);
944d1444 4011 req->open.ignore_nonblock = false;
8fef80bf 4012 req->flags |= REQ_F_NEED_CLEANUP;
15b71abe 4013 return 0;
03b1230c
JA
4014}
4015
ec65fea5
PB
4016static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4017{
4018 u64 flags, mode;
4019
14587a46 4020 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4eb8dded 4021 return -EINVAL;
ec65fea5
PB
4022 mode = READ_ONCE(sqe->len);
4023 flags = READ_ONCE(sqe->open_flags);
4024 req->open.how = build_open_how(flags, mode);
4025 return __io_openat_prep(req, sqe);
4026}
4027
cebdb986 4028static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 4029{
cebdb986 4030 struct open_how __user *how;
cebdb986 4031 size_t len;
0fa03c62
JA
4032 int ret;
4033
14587a46 4034 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4eb8dded 4035 return -EINVAL;
cebdb986
JA
4036 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4037 len = READ_ONCE(sqe->len);
cebdb986
JA
4038 if (len < OPEN_HOW_SIZE_VER0)
4039 return -EINVAL;
3529d8c2 4040
cebdb986
JA
4041 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
4042 len);
4043 if (ret)
4044 return ret;
3529d8c2 4045
ec65fea5 4046 return __io_openat_prep(req, sqe);
cebdb986
JA
4047}
4048
014db007 4049static int io_openat2(struct io_kiocb *req, bool force_nonblock)
15b71abe
JA
4050{
4051 struct open_flags op;
15b71abe
JA
4052 struct file *file;
4053 int ret;
4054
944d1444 4055 if (force_nonblock && !req->open.ignore_nonblock)
15b71abe 4056 return -EAGAIN;
15b71abe 4057
cebdb986 4058 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
4059 if (ret)
4060 goto err;
4061
4022e7af 4062 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
15b71abe
JA
4063 if (ret < 0)
4064 goto err;
4065
4066 file = do_filp_open(req->open.dfd, req->open.filename, &op);
4067 if (IS_ERR(file)) {
4068 put_unused_fd(ret);
4069 ret = PTR_ERR(file);
944d1444
JA
4070 /*
4071 * A work-around to ensure that /proc/self works that way
4072 * that it should - if we get -EOPNOTSUPP back, then assume
4073 * that proc_self_get_link() failed us because we're in async
4074 * context. We should be safe to retry this from the task
4075 * itself with force_nonblock == false set, as it should not
4076 * block on lookup. Would be nice to know this upfront and
4077 * avoid the async dance, but doesn't seem feasible.
4078 */
4079 if (ret == -EOPNOTSUPP && io_wq_current_is_worker()) {
4080 req->open.ignore_nonblock = true;
4081 refcount_inc(&req->refs);
4082 io_req_task_queue(req);
4083 return 0;
4084 }
15b71abe
JA
4085 } else {
4086 fsnotify_open(file);
4087 fd_install(ret, file);
4088 }
4089err:
4090 putname(req->open.filename);
8fef80bf 4091 req->flags &= ~REQ_F_NEED_CLEANUP;
15b71abe
JA
4092 if (ret < 0)
4093 req_set_fail_links(req);
e1e16097 4094 io_req_complete(req, ret);
15b71abe
JA
4095 return 0;
4096}
4097
014db007 4098static int io_openat(struct io_kiocb *req, bool force_nonblock)
cebdb986 4099{
014db007 4100 return io_openat2(req, force_nonblock);
cebdb986
JA
4101}
4102
067524e9
JA
4103static int io_remove_buffers_prep(struct io_kiocb *req,
4104 const struct io_uring_sqe *sqe)
4105{
4106 struct io_provide_buf *p = &req->pbuf;
4107 u64 tmp;
4108
4109 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
4110 return -EINVAL;
4111
4112 tmp = READ_ONCE(sqe->fd);
4113 if (!tmp || tmp > USHRT_MAX)
4114 return -EINVAL;
4115
4116 memset(p, 0, sizeof(*p));
4117 p->nbufs = tmp;
4118 p->bgid = READ_ONCE(sqe->buf_group);
4119 return 0;
4120}
4121
4122static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
4123 int bgid, unsigned nbufs)
4124{
4125 unsigned i = 0;
4126
4127 /* shouldn't happen */
4128 if (!nbufs)
4129 return 0;
4130
4131 /* the head kbuf is the list itself */
4132 while (!list_empty(&buf->list)) {
4133 struct io_buffer *nxt;
4134
4135 nxt = list_first_entry(&buf->list, struct io_buffer, list);
4136 list_del(&nxt->list);
4137 kfree(nxt);
4138 if (++i == nbufs)
4139 return i;
4140 }
4141 i++;
4142 kfree(buf);
4143 idr_remove(&ctx->io_buffer_idr, bgid);
4144
4145 return i;
4146}
4147
229a7b63
JA
4148static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
4149 struct io_comp_state *cs)
067524e9
JA
4150{
4151 struct io_provide_buf *p = &req->pbuf;
4152 struct io_ring_ctx *ctx = req->ctx;
4153 struct io_buffer *head;
4154 int ret = 0;
4155
4156 io_ring_submit_lock(ctx, !force_nonblock);
4157
4158 lockdep_assert_held(&ctx->uring_lock);
4159
4160 ret = -ENOENT;
4161 head = idr_find(&ctx->io_buffer_idr, p->bgid);
4162 if (head)
4163 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
4164
4165 io_ring_submit_lock(ctx, !force_nonblock);
4166 if (ret < 0)
4167 req_set_fail_links(req);
229a7b63 4168 __io_req_complete(req, ret, 0, cs);
067524e9
JA
4169 return 0;
4170}
4171
ddf0322d
JA
4172static int io_provide_buffers_prep(struct io_kiocb *req,
4173 const struct io_uring_sqe *sqe)
4174{
4175 struct io_provide_buf *p = &req->pbuf;
4176 u64 tmp;
4177
4178 if (sqe->ioprio || sqe->rw_flags)
4179 return -EINVAL;
4180
4181 tmp = READ_ONCE(sqe->fd);
4182 if (!tmp || tmp > USHRT_MAX)
4183 return -E2BIG;
4184 p->nbufs = tmp;
4185 p->addr = READ_ONCE(sqe->addr);
4186 p->len = READ_ONCE(sqe->len);
4187
efe68c1c 4188 if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
ddf0322d
JA
4189 return -EFAULT;
4190
4191 p->bgid = READ_ONCE(sqe->buf_group);
4192 tmp = READ_ONCE(sqe->off);
4193 if (tmp > USHRT_MAX)
4194 return -E2BIG;
4195 p->bid = tmp;
4196 return 0;
4197}
4198
4199static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4200{
4201 struct io_buffer *buf;
4202 u64 addr = pbuf->addr;
4203 int i, bid = pbuf->bid;
4204
4205 for (i = 0; i < pbuf->nbufs; i++) {
4206 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
4207 if (!buf)
4208 break;
4209
4210 buf->addr = addr;
4211 buf->len = pbuf->len;
4212 buf->bid = bid;
4213 addr += pbuf->len;
4214 bid++;
4215 if (!*head) {
4216 INIT_LIST_HEAD(&buf->list);
4217 *head = buf;
4218 } else {
4219 list_add_tail(&buf->list, &(*head)->list);
4220 }
4221 }
4222
4223 return i ? i : -ENOMEM;
4224}
4225
229a7b63
JA
4226static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock,
4227 struct io_comp_state *cs)
ddf0322d
JA
4228{
4229 struct io_provide_buf *p = &req->pbuf;
4230 struct io_ring_ctx *ctx = req->ctx;
4231 struct io_buffer *head, *list;
4232 int ret = 0;
4233
4234 io_ring_submit_lock(ctx, !force_nonblock);
4235
4236 lockdep_assert_held(&ctx->uring_lock);
4237
4238 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
4239
4240 ret = io_add_buffers(p, &head);
4241 if (ret < 0)
4242 goto out;
4243
4244 if (!list) {
4245 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
4246 GFP_KERNEL);
4247 if (ret < 0) {
067524e9 4248 __io_remove_buffers(ctx, head, p->bgid, -1U);
ddf0322d
JA
4249 goto out;
4250 }
4251 }
4252out:
4253 io_ring_submit_unlock(ctx, !force_nonblock);
4254 if (ret < 0)
4255 req_set_fail_links(req);
229a7b63 4256 __io_req_complete(req, ret, 0, cs);
ddf0322d 4257 return 0;
cebdb986
JA
4258}
4259
3e4827b0
JA
4260static int io_epoll_ctl_prep(struct io_kiocb *req,
4261 const struct io_uring_sqe *sqe)
4262{
4263#if defined(CONFIG_EPOLL)
4264 if (sqe->ioprio || sqe->buf_index)
4265 return -EINVAL;
6ca56f84 4266 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
3232dd02 4267 return -EINVAL;
3e4827b0
JA
4268
4269 req->epoll.epfd = READ_ONCE(sqe->fd);
4270 req->epoll.op = READ_ONCE(sqe->len);
4271 req->epoll.fd = READ_ONCE(sqe->off);
4272
4273 if (ep_op_has_event(req->epoll.op)) {
4274 struct epoll_event __user *ev;
4275
4276 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4277 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4278 return -EFAULT;
4279 }
4280
4281 return 0;
4282#else
4283 return -EOPNOTSUPP;
4284#endif
4285}
4286
229a7b63
JA
4287static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock,
4288 struct io_comp_state *cs)
3e4827b0
JA
4289{
4290#if defined(CONFIG_EPOLL)
4291 struct io_epoll *ie = &req->epoll;
4292 int ret;
4293
4294 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4295 if (force_nonblock && ret == -EAGAIN)
4296 return -EAGAIN;
4297
4298 if (ret < 0)
4299 req_set_fail_links(req);
229a7b63 4300 __io_req_complete(req, ret, 0, cs);
3e4827b0
JA
4301 return 0;
4302#else
4303 return -EOPNOTSUPP;
4304#endif
4305}
4306
c1ca757b
JA
4307static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4308{
4309#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4310 if (sqe->ioprio || sqe->buf_index || sqe->off)
4311 return -EINVAL;
3232dd02
PB
4312 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4313 return -EINVAL;
c1ca757b
JA
4314
4315 req->madvise.addr = READ_ONCE(sqe->addr);
4316 req->madvise.len = READ_ONCE(sqe->len);
4317 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4318 return 0;
4319#else
4320 return -EOPNOTSUPP;
4321#endif
4322}
4323
014db007 4324static int io_madvise(struct io_kiocb *req, bool force_nonblock)
c1ca757b
JA
4325{
4326#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4327 struct io_madvise *ma = &req->madvise;
4328 int ret;
4329
4330 if (force_nonblock)
4331 return -EAGAIN;
4332
0726b01e 4333 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
c1ca757b
JA
4334 if (ret < 0)
4335 req_set_fail_links(req);
e1e16097 4336 io_req_complete(req, ret);
c1ca757b
JA
4337 return 0;
4338#else
4339 return -EOPNOTSUPP;
4340#endif
4341}
4342
4840e418
JA
4343static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4344{
4345 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4346 return -EINVAL;
3232dd02
PB
4347 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4348 return -EINVAL;
4840e418
JA
4349
4350 req->fadvise.offset = READ_ONCE(sqe->off);
4351 req->fadvise.len = READ_ONCE(sqe->len);
4352 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4353 return 0;
4354}
4355
014db007 4356static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
4840e418
JA
4357{
4358 struct io_fadvise *fa = &req->fadvise;
4359 int ret;
4360
3e69426d
JA
4361 if (force_nonblock) {
4362 switch (fa->advice) {
4363 case POSIX_FADV_NORMAL:
4364 case POSIX_FADV_RANDOM:
4365 case POSIX_FADV_SEQUENTIAL:
4366 break;
4367 default:
4368 return -EAGAIN;
4369 }
4370 }
4840e418
JA
4371
4372 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4373 if (ret < 0)
4374 req_set_fail_links(req);
e1e16097 4375 io_req_complete(req, ret);
4840e418
JA
4376 return 0;
4377}
4378
eddc7ef5
JA
4379static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4380{
6ca56f84 4381 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
3232dd02 4382 return -EINVAL;
eddc7ef5
JA
4383 if (sqe->ioprio || sqe->buf_index)
4384 return -EINVAL;
9c280f90 4385 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 4386 return -EBADF;
eddc7ef5 4387
1d9e1288
BM
4388 req->statx.dfd = READ_ONCE(sqe->fd);
4389 req->statx.mask = READ_ONCE(sqe->len);
e62753e4 4390 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
1d9e1288
BM
4391 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4392 req->statx.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5
JA
4393
4394 return 0;
4395}
4396
014db007 4397static int io_statx(struct io_kiocb *req, bool force_nonblock)
eddc7ef5 4398{
1d9e1288 4399 struct io_statx *ctx = &req->statx;
eddc7ef5
JA
4400 int ret;
4401
5b0bbee4
JA
4402 if (force_nonblock) {
4403 /* only need file table for an actual valid fd */
4404 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
4405 req->flags |= REQ_F_NO_FILE_TABLE;
eddc7ef5 4406 return -EAGAIN;
5b0bbee4 4407 }
eddc7ef5 4408
e62753e4
BM
4409 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4410 ctx->buffer);
eddc7ef5 4411
eddc7ef5
JA
4412 if (ret < 0)
4413 req_set_fail_links(req);
e1e16097 4414 io_req_complete(req, ret);
eddc7ef5
JA
4415 return 0;
4416}
4417
b5dba59e
JA
4418static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4419{
4420 /*
4421 * If we queue this for async, it must not be cancellable. That would
7cdaf587
XW
4422 * leave the 'file' in an undeterminate state, and here need to modify
4423 * io_wq_work.flags, so initialize io_wq_work firstly.
b5dba59e 4424 */
7cdaf587 4425 io_req_init_async(req);
b5dba59e
JA
4426 req->work.flags |= IO_WQ_WORK_NO_CANCEL;
4427
14587a46 4428 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3232dd02 4429 return -EINVAL;
b5dba59e
JA
4430 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4431 sqe->rw_flags || sqe->buf_index)
4432 return -EINVAL;
9c280f90 4433 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 4434 return -EBADF;
b5dba59e
JA
4435
4436 req->close.fd = READ_ONCE(sqe->fd);
0f212204 4437 if ((req->file && req->file->f_op == &io_uring_fops))
fd2206e4 4438 return -EBADF;
b5dba59e 4439
3af73b28 4440 req->close.put_file = NULL;
b5dba59e 4441 return 0;
b5dba59e
JA
4442}
4443
229a7b63
JA
4444static int io_close(struct io_kiocb *req, bool force_nonblock,
4445 struct io_comp_state *cs)
b5dba59e 4446{
3af73b28 4447 struct io_close *close = &req->close;
b5dba59e
JA
4448 int ret;
4449
3af73b28
PB
4450 /* might be already done during nonblock submission */
4451 if (!close->put_file) {
4452 ret = __close_fd_get_file(close->fd, &close->put_file);
4453 if (ret < 0)
4454 return (ret == -ENOENT) ? -EBADF : ret;
4455 }
b5dba59e
JA
4456
4457 /* if the file has a flush method, be safe and punt to async */
3af73b28 4458 if (close->put_file->f_op->flush && force_nonblock) {
24c74678
PB
4459 /* was never set, but play safe */
4460 req->flags &= ~REQ_F_NOWAIT;
0bf0eefd 4461 /* avoid grabbing files - we don't need the files */
24c74678 4462 req->flags |= REQ_F_NO_FILE_TABLE;
0bf0eefd 4463 return -EAGAIN;
a2100672 4464 }
b5dba59e 4465
3af73b28 4466 /* No ->flush() or already async, safely close from here */
98447d65 4467 ret = filp_close(close->put_file, req->work.identity->files);
3af73b28
PB
4468 if (ret < 0)
4469 req_set_fail_links(req);
3af73b28
PB
4470 fput(close->put_file);
4471 close->put_file = NULL;
229a7b63 4472 __io_req_complete(req, ret, 0, cs);
1a417f4e 4473 return 0;
b5dba59e
JA
4474}
4475
3529d8c2 4476static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4
JA
4477{
4478 struct io_ring_ctx *ctx = req->ctx;
5d17b4a4
JA
4479
4480 if (!req->file)
4481 return -EBADF;
5d17b4a4
JA
4482
4483 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4484 return -EINVAL;
4485 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4486 return -EINVAL;
4487
8ed8d3c3
JA
4488 req->sync.off = READ_ONCE(sqe->off);
4489 req->sync.len = READ_ONCE(sqe->len);
4490 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
4491 return 0;
4492}
4493
ac45abc0 4494static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3 4495{
8ed8d3c3
JA
4496 int ret;
4497
ac45abc0
PB
4498 /* sync_file_range always requires a blocking context */
4499 if (force_nonblock)
4500 return -EAGAIN;
4501
9adbd45d 4502 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3
JA
4503 req->sync.flags);
4504 if (ret < 0)
4505 req_set_fail_links(req);
e1e16097 4506 io_req_complete(req, ret);
5d17b4a4
JA
4507 return 0;
4508}
4509
469956e8 4510#if defined(CONFIG_NET)
02d27d89
PB
4511static int io_setup_async_msg(struct io_kiocb *req,
4512 struct io_async_msghdr *kmsg)
4513{
e8c2bc1f
JA
4514 struct io_async_msghdr *async_msg = req->async_data;
4515
4516 if (async_msg)
02d27d89 4517 return -EAGAIN;
e8c2bc1f 4518 if (io_alloc_async_data(req)) {
02d27d89
PB
4519 if (kmsg->iov != kmsg->fast_iov)
4520 kfree(kmsg->iov);
4521 return -ENOMEM;
4522 }
e8c2bc1f 4523 async_msg = req->async_data;
02d27d89 4524 req->flags |= REQ_F_NEED_CLEANUP;
e8c2bc1f 4525 memcpy(async_msg, kmsg, sizeof(*kmsg));
02d27d89
PB
4526 return -EAGAIN;
4527}
4528
2ae523ed
PB
4529static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4530 struct io_async_msghdr *iomsg)
4531{
4532 iomsg->iov = iomsg->fast_iov;
4533 iomsg->msg.msg_name = &iomsg->addr;
4534 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
4535 req->sr_msg.msg_flags, &iomsg->iov);
4536}
4537
3529d8c2 4538static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 4539{
e8c2bc1f 4540 struct io_async_msghdr *async_msg = req->async_data;
e47293fd 4541 struct io_sr_msg *sr = &req->sr_msg;
99bc4c38 4542 int ret;
03b1230c 4543
d2b6f48b
PB
4544 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4545 return -EINVAL;
4546
e47293fd 4547 sr->msg_flags = READ_ONCE(sqe->msg_flags);
270a5940 4548 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 4549 sr->len = READ_ONCE(sqe->len);
3529d8c2 4550
d8768362
JA
4551#ifdef CONFIG_COMPAT
4552 if (req->ctx->compat)
4553 sr->msg_flags |= MSG_CMSG_COMPAT;
4554#endif
4555
e8c2bc1f 4556 if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
3529d8c2 4557 return 0;
e8c2bc1f 4558 ret = io_sendmsg_copy_hdr(req, async_msg);
99bc4c38
PB
4559 if (!ret)
4560 req->flags |= REQ_F_NEED_CLEANUP;
4561 return ret;
03b1230c
JA
4562}
4563
229a7b63
JA
4564static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
4565 struct io_comp_state *cs)
aa1fa28f 4566{
6b754c8b 4567 struct io_async_msghdr iomsg, *kmsg;
0fa03c62 4568 struct socket *sock;
7a7cacba 4569 unsigned flags;
0fa03c62
JA
4570 int ret;
4571
0fa03c62 4572 sock = sock_from_file(req->file, &ret);
7a7cacba
PB
4573 if (unlikely(!sock))
4574 return ret;
3529d8c2 4575
e8c2bc1f
JA
4576 if (req->async_data) {
4577 kmsg = req->async_data;
4578 kmsg->msg.msg_name = &kmsg->addr;
7a7cacba
PB
4579 /* if iov is set, it's allocated already */
4580 if (!kmsg->iov)
4581 kmsg->iov = kmsg->fast_iov;
4582 kmsg->msg.msg_iter.iov = kmsg->iov;
4583 } else {
4584 ret = io_sendmsg_copy_hdr(req, &iomsg);
4585 if (ret)
4586 return ret;
4587 kmsg = &iomsg;
0fa03c62 4588 }
0fa03c62 4589
7a7cacba
PB
4590 flags = req->sr_msg.msg_flags;
4591 if (flags & MSG_DONTWAIT)
4592 req->flags |= REQ_F_NOWAIT;
4593 else if (force_nonblock)
4594 flags |= MSG_DONTWAIT;
e47293fd 4595
7a7cacba
PB
4596 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
4597 if (force_nonblock && ret == -EAGAIN)
4598 return io_setup_async_msg(req, kmsg);
4599 if (ret == -ERESTARTSYS)
4600 ret = -EINTR;
0fa03c62 4601
6b754c8b 4602 if (kmsg->iov != kmsg->fast_iov)
0b416c3e 4603 kfree(kmsg->iov);
99bc4c38 4604 req->flags &= ~REQ_F_NEED_CLEANUP;
4e88d6e7
JA
4605 if (ret < 0)
4606 req_set_fail_links(req);
229a7b63 4607 __io_req_complete(req, ret, 0, cs);
5d17b4a4 4608 return 0;
03b1230c 4609}
aa1fa28f 4610
229a7b63
JA
4611static int io_send(struct io_kiocb *req, bool force_nonblock,
4612 struct io_comp_state *cs)
fddaface 4613{
7a7cacba
PB
4614 struct io_sr_msg *sr = &req->sr_msg;
4615 struct msghdr msg;
4616 struct iovec iov;
fddaface 4617 struct socket *sock;
7a7cacba 4618 unsigned flags;
fddaface
JA
4619 int ret;
4620
fddaface 4621 sock = sock_from_file(req->file, &ret);
7a7cacba
PB
4622 if (unlikely(!sock))
4623 return ret;
fddaface 4624
7a7cacba
PB
4625 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4626 if (unlikely(ret))
14db8411 4627 return ret;
fddaface 4628
7a7cacba
PB
4629 msg.msg_name = NULL;
4630 msg.msg_control = NULL;
4631 msg.msg_controllen = 0;
4632 msg.msg_namelen = 0;
fddaface 4633
7a7cacba
PB
4634 flags = req->sr_msg.msg_flags;
4635 if (flags & MSG_DONTWAIT)
4636 req->flags |= REQ_F_NOWAIT;
4637 else if (force_nonblock)
4638 flags |= MSG_DONTWAIT;
fddaface 4639
7a7cacba
PB
4640 msg.msg_flags = flags;
4641 ret = sock_sendmsg(sock, &msg);
4642 if (force_nonblock && ret == -EAGAIN)
4643 return -EAGAIN;
4644 if (ret == -ERESTARTSYS)
4645 ret = -EINTR;
fddaface 4646
fddaface
JA
4647 if (ret < 0)
4648 req_set_fail_links(req);
229a7b63 4649 __io_req_complete(req, ret, 0, cs);
fddaface 4650 return 0;
fddaface
JA
4651}
4652
1400e697
PB
4653static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4654 struct io_async_msghdr *iomsg)
52de1fe1
JA
4655{
4656 struct io_sr_msg *sr = &req->sr_msg;
4657 struct iovec __user *uiov;
4658 size_t iov_len;
4659 int ret;
4660
1400e697
PB
4661 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4662 &iomsg->uaddr, &uiov, &iov_len);
52de1fe1
JA
4663 if (ret)
4664 return ret;
4665
4666 if (req->flags & REQ_F_BUFFER_SELECT) {
4667 if (iov_len > 1)
4668 return -EINVAL;
1400e697 4669 if (copy_from_user(iomsg->iov, uiov, sizeof(*uiov)))
52de1fe1 4670 return -EFAULT;
1400e697
PB
4671 sr->len = iomsg->iov[0].iov_len;
4672 iov_iter_init(&iomsg->msg.msg_iter, READ, iomsg->iov, 1,
52de1fe1 4673 sr->len);
1400e697 4674 iomsg->iov = NULL;
52de1fe1 4675 } else {
89cd35c5
CH
4676 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
4677 &iomsg->iov, &iomsg->msg.msg_iter,
4678 false);
52de1fe1
JA
4679 if (ret > 0)
4680 ret = 0;
4681 }
4682
4683 return ret;
4684}
4685
4686#ifdef CONFIG_COMPAT
4687static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
1400e697 4688 struct io_async_msghdr *iomsg)
52de1fe1
JA
4689{
4690 struct compat_msghdr __user *msg_compat;
4691 struct io_sr_msg *sr = &req->sr_msg;
4692 struct compat_iovec __user *uiov;
4693 compat_uptr_t ptr;
4694 compat_size_t len;
4695 int ret;
4696
270a5940 4697 msg_compat = (struct compat_msghdr __user *) sr->umsg;
1400e697 4698 ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
52de1fe1
JA
4699 &ptr, &len);
4700 if (ret)
4701 return ret;
4702
4703 uiov = compat_ptr(ptr);
4704 if (req->flags & REQ_F_BUFFER_SELECT) {
4705 compat_ssize_t clen;
4706
4707 if (len > 1)
4708 return -EINVAL;
4709 if (!access_ok(uiov, sizeof(*uiov)))
4710 return -EFAULT;
4711 if (__get_user(clen, &uiov->iov_len))
4712 return -EFAULT;
4713 if (clen < 0)
4714 return -EINVAL;
1400e697
PB
4715 sr->len = iomsg->iov[0].iov_len;
4716 iomsg->iov = NULL;
52de1fe1 4717 } else {
89cd35c5
CH
4718 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
4719 UIO_FASTIOV, &iomsg->iov,
4720 &iomsg->msg.msg_iter, true);
52de1fe1
JA
4721 if (ret < 0)
4722 return ret;
4723 }
4724
4725 return 0;
4726}
4727#endif
4728
1400e697
PB
4729static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4730 struct io_async_msghdr *iomsg)
52de1fe1 4731{
1400e697
PB
4732 iomsg->msg.msg_name = &iomsg->addr;
4733 iomsg->iov = iomsg->fast_iov;
52de1fe1
JA
4734
4735#ifdef CONFIG_COMPAT
4736 if (req->ctx->compat)
1400e697 4737 return __io_compat_recvmsg_copy_hdr(req, iomsg);
fddaface 4738#endif
52de1fe1 4739
1400e697 4740 return __io_recvmsg_copy_hdr(req, iomsg);
52de1fe1
JA
4741}
4742
bcda7baa 4743static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
7fbb1b54 4744 bool needs_lock)
bcda7baa
JA
4745{
4746 struct io_sr_msg *sr = &req->sr_msg;
4747 struct io_buffer *kbuf;
4748
bcda7baa
JA
4749 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4750 if (IS_ERR(kbuf))
4751 return kbuf;
4752
4753 sr->kbuf = kbuf;
4754 req->flags |= REQ_F_BUFFER_SELECTED;
bcda7baa 4755 return kbuf;
fddaface
JA
4756}
4757
7fbb1b54
PB
4758static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4759{
4760 return io_put_kbuf(req, req->sr_msg.kbuf);
4761}
4762
3529d8c2
JA
4763static int io_recvmsg_prep(struct io_kiocb *req,
4764 const struct io_uring_sqe *sqe)
aa1fa28f 4765{
e8c2bc1f 4766 struct io_async_msghdr *async_msg = req->async_data;
e47293fd 4767 struct io_sr_msg *sr = &req->sr_msg;
99bc4c38 4768 int ret;
3529d8c2 4769
d2b6f48b
PB
4770 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4771 return -EINVAL;
4772
3529d8c2 4773 sr->msg_flags = READ_ONCE(sqe->msg_flags);
270a5940 4774 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 4775 sr->len = READ_ONCE(sqe->len);
bcda7baa 4776 sr->bgid = READ_ONCE(sqe->buf_group);
06b76d44 4777
d8768362
JA
4778#ifdef CONFIG_COMPAT
4779 if (req->ctx->compat)
4780 sr->msg_flags |= MSG_CMSG_COMPAT;
4781#endif
4782
e8c2bc1f 4783 if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
5f798bea 4784 return 0;
e8c2bc1f 4785 ret = io_recvmsg_copy_hdr(req, async_msg);
99bc4c38
PB
4786 if (!ret)
4787 req->flags |= REQ_F_NEED_CLEANUP;
4788 return ret;
aa1fa28f
JA
4789}
4790
229a7b63
JA
4791static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
4792 struct io_comp_state *cs)
aa1fa28f 4793{
6b754c8b 4794 struct io_async_msghdr iomsg, *kmsg;
03b1230c 4795 struct socket *sock;
7fbb1b54 4796 struct io_buffer *kbuf;
7a7cacba 4797 unsigned flags;
52de1fe1 4798 int ret, cflags = 0;
03b1230c 4799
03b1230c 4800 sock = sock_from_file(req->file, &ret);
7a7cacba
PB
4801 if (unlikely(!sock))
4802 return ret;
3529d8c2 4803
e8c2bc1f
JA
4804 if (req->async_data) {
4805 kmsg = req->async_data;
4806 kmsg->msg.msg_name = &kmsg->addr;
7a7cacba
PB
4807 /* if iov is set, it's allocated already */
4808 if (!kmsg->iov)
4809 kmsg->iov = kmsg->fast_iov;
4810 kmsg->msg.msg_iter.iov = kmsg->iov;
4811 } else {
4812 ret = io_recvmsg_copy_hdr(req, &iomsg);
4813 if (ret)
681fda8d 4814 return ret;
7a7cacba
PB
4815 kmsg = &iomsg;
4816 }
03b1230c 4817
bc02ef33 4818 if (req->flags & REQ_F_BUFFER_SELECT) {
7fbb1b54 4819 kbuf = io_recv_buffer_select(req, !force_nonblock);
bc02ef33 4820 if (IS_ERR(kbuf))
52de1fe1 4821 return PTR_ERR(kbuf);
7a7cacba
PB
4822 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
4823 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
4824 1, req->sr_msg.len);
4825 }
52de1fe1 4826
7a7cacba
PB
4827 flags = req->sr_msg.msg_flags;
4828 if (flags & MSG_DONTWAIT)
4829 req->flags |= REQ_F_NOWAIT;
4830 else if (force_nonblock)
4831 flags |= MSG_DONTWAIT;
e47293fd 4832
7a7cacba
PB
4833 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4834 kmsg->uaddr, flags);
0e1b6fe3
PB
4835 if (force_nonblock && ret == -EAGAIN)
4836 return io_setup_async_msg(req, kmsg);
7a7cacba
PB
4837 if (ret == -ERESTARTSYS)
4838 ret = -EINTR;
03b1230c 4839
7fbb1b54
PB
4840 if (req->flags & REQ_F_BUFFER_SELECTED)
4841 cflags = io_put_recv_kbuf(req);
6b754c8b 4842 if (kmsg->iov != kmsg->fast_iov)
0b416c3e 4843 kfree(kmsg->iov);
99bc4c38 4844 req->flags &= ~REQ_F_NEED_CLEANUP;
4e88d6e7
JA
4845 if (ret < 0)
4846 req_set_fail_links(req);
229a7b63 4847 __io_req_complete(req, ret, cflags, cs);
03b1230c 4848 return 0;
0fa03c62 4849}
5d17b4a4 4850
229a7b63
JA
4851static int io_recv(struct io_kiocb *req, bool force_nonblock,
4852 struct io_comp_state *cs)
fddaface 4853{
6b754c8b 4854 struct io_buffer *kbuf;
7a7cacba
PB
4855 struct io_sr_msg *sr = &req->sr_msg;
4856 struct msghdr msg;
4857 void __user *buf = sr->buf;
fddaface 4858 struct socket *sock;
7a7cacba
PB
4859 struct iovec iov;
4860 unsigned flags;
bcda7baa 4861 int ret, cflags = 0;
fddaface 4862
fddaface 4863 sock = sock_from_file(req->file, &ret);
7a7cacba
PB
4864 if (unlikely(!sock))
4865 return ret;
fddaface 4866
bc02ef33 4867 if (req->flags & REQ_F_BUFFER_SELECT) {
7fbb1b54 4868 kbuf = io_recv_buffer_select(req, !force_nonblock);
bcda7baa
JA
4869 if (IS_ERR(kbuf))
4870 return PTR_ERR(kbuf);
7a7cacba 4871 buf = u64_to_user_ptr(kbuf->addr);
bc02ef33 4872 }
bcda7baa 4873
7a7cacba 4874 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
14c32eee
PB
4875 if (unlikely(ret))
4876 goto out_free;
fddaface 4877
7a7cacba
PB
4878 msg.msg_name = NULL;
4879 msg.msg_control = NULL;
4880 msg.msg_controllen = 0;
4881 msg.msg_namelen = 0;
4882 msg.msg_iocb = NULL;
4883 msg.msg_flags = 0;
fddaface 4884
7a7cacba
PB
4885 flags = req->sr_msg.msg_flags;
4886 if (flags & MSG_DONTWAIT)
4887 req->flags |= REQ_F_NOWAIT;
4888 else if (force_nonblock)
4889 flags |= MSG_DONTWAIT;
4890
4891 ret = sock_recvmsg(sock, &msg, flags);
4892 if (force_nonblock && ret == -EAGAIN)
4893 return -EAGAIN;
4894 if (ret == -ERESTARTSYS)
4895 ret = -EINTR;
14c32eee 4896out_free:
7fbb1b54
PB
4897 if (req->flags & REQ_F_BUFFER_SELECTED)
4898 cflags = io_put_recv_kbuf(req);
fddaface
JA
4899 if (ret < 0)
4900 req_set_fail_links(req);
229a7b63 4901 __io_req_complete(req, ret, cflags, cs);
fddaface 4902 return 0;
fddaface
JA
4903}
4904
3529d8c2 4905static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35 4906{
8ed8d3c3
JA
4907 struct io_accept *accept = &req->accept;
4908
14587a46 4909 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
17f2fe35 4910 return -EINVAL;
8042d6ce 4911 if (sqe->ioprio || sqe->len || sqe->buf_index)
17f2fe35
JA
4912 return -EINVAL;
4913
d55e5f5b
JA
4914 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4915 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 4916 accept->flags = READ_ONCE(sqe->accept_flags);
09952e3e 4917 accept->nofile = rlimit(RLIMIT_NOFILE);
8ed8d3c3 4918 return 0;
8ed8d3c3 4919}
17f2fe35 4920
229a7b63
JA
4921static int io_accept(struct io_kiocb *req, bool force_nonblock,
4922 struct io_comp_state *cs)
8ed8d3c3
JA
4923{
4924 struct io_accept *accept = &req->accept;
ac45abc0 4925 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
8ed8d3c3
JA
4926 int ret;
4927
e697deed
JX
4928 if (req->file->f_flags & O_NONBLOCK)
4929 req->flags |= REQ_F_NOWAIT;
4930
8ed8d3c3 4931 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
09952e3e
JA
4932 accept->addr_len, accept->flags,
4933 accept->nofile);
8ed8d3c3 4934 if (ret == -EAGAIN && force_nonblock)
17f2fe35 4935 return -EAGAIN;
ac45abc0
PB
4936 if (ret < 0) {
4937 if (ret == -ERESTARTSYS)
4938 ret = -EINTR;
4e88d6e7 4939 req_set_fail_links(req);
ac45abc0 4940 }
229a7b63 4941 __io_req_complete(req, ret, 0, cs);
17f2fe35 4942 return 0;
8ed8d3c3
JA
4943}
4944
3529d8c2 4945static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021 4946{
3529d8c2 4947 struct io_connect *conn = &req->connect;
e8c2bc1f 4948 struct io_async_connect *io = req->async_data;
f499a021 4949
14587a46 4950 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3fbb51c1
JA
4951 return -EINVAL;
4952 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4953 return -EINVAL;
4954
3529d8c2
JA
4955 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4956 conn->addr_len = READ_ONCE(sqe->addr2);
4957
4958 if (!io)
4959 return 0;
4960
4961 return move_addr_to_kernel(conn->addr, conn->addr_len,
e8c2bc1f 4962 &io->address);
f499a021
JA
4963}
4964
229a7b63
JA
4965static int io_connect(struct io_kiocb *req, bool force_nonblock,
4966 struct io_comp_state *cs)
f8e85cf2 4967{
e8c2bc1f 4968 struct io_async_connect __io, *io;
f8e85cf2 4969 unsigned file_flags;
3fbb51c1 4970 int ret;
f8e85cf2 4971
e8c2bc1f
JA
4972 if (req->async_data) {
4973 io = req->async_data;
f499a021 4974 } else {
3529d8c2
JA
4975 ret = move_addr_to_kernel(req->connect.addr,
4976 req->connect.addr_len,
e8c2bc1f 4977 &__io.address);
f499a021
JA
4978 if (ret)
4979 goto out;
4980 io = &__io;
4981 }
4982
3fbb51c1
JA
4983 file_flags = force_nonblock ? O_NONBLOCK : 0;
4984
e8c2bc1f 4985 ret = __sys_connect_file(req->file, &io->address,
3fbb51c1 4986 req->connect.addr_len, file_flags);
87f80d62 4987 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
e8c2bc1f 4988 if (req->async_data)
b7bb4f7d 4989 return -EAGAIN;
e8c2bc1f 4990 if (io_alloc_async_data(req)) {
f499a021
JA
4991 ret = -ENOMEM;
4992 goto out;
4993 }
e8c2bc1f
JA
4994 io = req->async_data;
4995 memcpy(req->async_data, &__io, sizeof(__io));
f8e85cf2 4996 return -EAGAIN;
f499a021 4997 }
f8e85cf2
JA
4998 if (ret == -ERESTARTSYS)
4999 ret = -EINTR;
f499a021 5000out:
4e88d6e7
JA
5001 if (ret < 0)
5002 req_set_fail_links(req);
229a7b63 5003 __io_req_complete(req, ret, 0, cs);
f8e85cf2 5004 return 0;
469956e8
Y
5005}
5006#else /* !CONFIG_NET */
5007static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5008{
f8e85cf2 5009 return -EOPNOTSUPP;
f8e85cf2
JA
5010}
5011
1e16c2f9
RD
5012static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
5013 struct io_comp_state *cs)
469956e8
Y
5014{
5015 return -EOPNOTSUPP;
5016}
5017
1e16c2f9
RD
5018static int io_send(struct io_kiocb *req, bool force_nonblock,
5019 struct io_comp_state *cs)
469956e8
Y
5020{
5021 return -EOPNOTSUPP;
5022}
5023
5024static int io_recvmsg_prep(struct io_kiocb *req,
5025 const struct io_uring_sqe *sqe)
5026{
5027 return -EOPNOTSUPP;
5028}
5029
1e16c2f9
RD
5030static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
5031 struct io_comp_state *cs)
469956e8
Y
5032{
5033 return -EOPNOTSUPP;
5034}
5035
1e16c2f9
RD
5036static int io_recv(struct io_kiocb *req, bool force_nonblock,
5037 struct io_comp_state *cs)
469956e8
Y
5038{
5039 return -EOPNOTSUPP;
5040}
5041
5042static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5043{
5044 return -EOPNOTSUPP;
5045}
5046
1e16c2f9
RD
5047static int io_accept(struct io_kiocb *req, bool force_nonblock,
5048 struct io_comp_state *cs)
469956e8
Y
5049{
5050 return -EOPNOTSUPP;
5051}
ce593a6c 5052
469956e8
Y
5053static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5054{
5055 return -EOPNOTSUPP;
5056}
5057
1e16c2f9
RD
5058static int io_connect(struct io_kiocb *req, bool force_nonblock,
5059 struct io_comp_state *cs)
469956e8 5060{
f8e85cf2 5061 return -EOPNOTSUPP;
ce593a6c 5062}
469956e8 5063#endif /* CONFIG_NET */
f8e85cf2 5064
d7718a9d
JA
5065struct io_poll_table {
5066 struct poll_table_struct pt;
5067 struct io_kiocb *req;
5068 int error;
5069};
ce593a6c 5070
d7718a9d
JA
5071static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
5072 __poll_t mask, task_work_func_t func)
5073{
fd7d6de2 5074 bool twa_signal_ok;
aa96bf8a 5075 int ret;
d7718a9d
JA
5076
5077 /* for instances that support it check for an event match first: */
5078 if (mask && !(mask & poll->events))
5079 return 0;
5080
5081 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
5082
5083 list_del_init(&poll->wait.entry);
5084
d7718a9d
JA
5085 req->result = mask;
5086 init_task_work(&req->task_work, func);
6d816e08
JA
5087 percpu_ref_get(&req->ctx->refs);
5088
fd7d6de2
JA
5089 /*
5090 * If we using the signalfd wait_queue_head for this wakeup, then
5091 * it's not safe to use TWA_SIGNAL as we could be recursing on the
5092 * tsk->sighand->siglock on doing the wakeup. Should not be needed
5093 * either, as the normal wakeup will suffice.
5094 */
5095 twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh);
5096
d7718a9d 5097 /*
e3aabf95
JA
5098 * If this fails, then the task is exiting. When a task exits, the
5099 * work gets canceled, so just cancel this request as well instead
5100 * of executing it. We can't safely execute it anyway, as we may not
5101 * have the needed state needed for it anyway.
d7718a9d 5102 */
87c4311f 5103 ret = io_req_task_work_add(req, twa_signal_ok);
aa96bf8a 5104 if (unlikely(ret)) {
c2c4c83c
JA
5105 struct task_struct *tsk;
5106
e3aabf95 5107 WRITE_ONCE(poll->canceled, true);
aa96bf8a 5108 tsk = io_wq_get_task(req->ctx->io_wq);
91989c70 5109 task_work_add(tsk, &req->task_work, TWA_NONE);
ce593a6c 5110 wake_up_process(tsk);
aa96bf8a 5111 }
d7718a9d
JA
5112 return 1;
5113}
5114
74ce6ce4
JA
5115static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
5116 __acquires(&req->ctx->completion_lock)
5117{
5118 struct io_ring_ctx *ctx = req->ctx;
5119
5120 if (!req->result && !READ_ONCE(poll->canceled)) {
5121 struct poll_table_struct pt = { ._key = poll->events };
5122
5123 req->result = vfs_poll(req->file, &pt) & poll->events;
5124 }
5125
5126 spin_lock_irq(&ctx->completion_lock);
5127 if (!req->result && !READ_ONCE(poll->canceled)) {
5128 add_wait_queue(poll->head, &poll->wait);
5129 return true;
5130 }
5131
5132 return false;
5133}
5134
d4e7cd36 5135static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
18bceab1 5136{
e8c2bc1f 5137 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
d4e7cd36 5138 if (req->opcode == IORING_OP_POLL_ADD)
e8c2bc1f 5139 return req->async_data;
d4e7cd36
JA
5140 return req->apoll->double_poll;
5141}
5142
5143static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
5144{
5145 if (req->opcode == IORING_OP_POLL_ADD)
5146 return &req->poll;
5147 return &req->apoll->poll;
5148}
5149
5150static void io_poll_remove_double(struct io_kiocb *req)
5151{
5152 struct io_poll_iocb *poll = io_poll_get_double(req);
18bceab1
JA
5153
5154 lockdep_assert_held(&req->ctx->completion_lock);
5155
5156 if (poll && poll->head) {
5157 struct wait_queue_head *head = poll->head;
5158
5159 spin_lock(&head->lock);
5160 list_del_init(&poll->wait.entry);
5161 if (poll->wait.private)
5162 refcount_dec(&req->refs);
5163 poll->head = NULL;
5164 spin_unlock(&head->lock);
5165 }
5166}
5167
5168static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
5169{
5170 struct io_ring_ctx *ctx = req->ctx;
5171
d4e7cd36 5172 io_poll_remove_double(req);
18bceab1
JA
5173 req->poll.done = true;
5174 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
5175 io_commit_cqring(ctx);
5176}
5177
dd221f46 5178static void io_poll_task_func(struct callback_head *cb)
18bceab1 5179{
dd221f46 5180 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
18bceab1 5181 struct io_ring_ctx *ctx = req->ctx;
dd221f46 5182 struct io_kiocb *nxt;
18bceab1
JA
5183
5184 if (io_poll_rewait(req, &req->poll)) {
5185 spin_unlock_irq(&ctx->completion_lock);
dd221f46
PB
5186 } else {
5187 hash_del(&req->hash_node);
5188 io_poll_complete(req, req->result, 0);
5189 spin_unlock_irq(&ctx->completion_lock);
18bceab1 5190
dd221f46
PB
5191 nxt = io_put_req_find_next(req);
5192 io_cqring_ev_posted(ctx);
5193 if (nxt)
5194 __io_req_task_submit(nxt);
5195 }
18bceab1 5196
6d816e08 5197 percpu_ref_put(&ctx->refs);
18bceab1
JA
5198}
5199
5200static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
5201 int sync, void *key)
5202{
5203 struct io_kiocb *req = wait->private;
d4e7cd36 5204 struct io_poll_iocb *poll = io_poll_get_single(req);
18bceab1
JA
5205 __poll_t mask = key_to_poll(key);
5206
5207 /* for instances that support it check for an event match first: */
5208 if (mask && !(mask & poll->events))
5209 return 0;
5210
8706e04e
JA
5211 list_del_init(&wait->entry);
5212
807abcb0 5213 if (poll && poll->head) {
18bceab1
JA
5214 bool done;
5215
807abcb0
JA
5216 spin_lock(&poll->head->lock);
5217 done = list_empty(&poll->wait.entry);
18bceab1 5218 if (!done)
807abcb0 5219 list_del_init(&poll->wait.entry);
d4e7cd36
JA
5220 /* make sure double remove sees this as being gone */
5221 wait->private = NULL;
807abcb0 5222 spin_unlock(&poll->head->lock);
c8b5e260
JA
5223 if (!done) {
5224 /* use wait func handler, so it matches the rq type */
5225 poll->wait.func(&poll->wait, mode, sync, key);
5226 }
18bceab1
JA
5227 }
5228 refcount_dec(&req->refs);
5229 return 1;
5230}
5231
5232static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5233 wait_queue_func_t wake_func)
5234{
5235 poll->head = NULL;
5236 poll->done = false;
5237 poll->canceled = false;
5238 poll->events = events;
5239 INIT_LIST_HEAD(&poll->wait.entry);
5240 init_waitqueue_func_entry(&poll->wait, wake_func);
5241}
5242
5243static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
807abcb0
JA
5244 struct wait_queue_head *head,
5245 struct io_poll_iocb **poll_ptr)
18bceab1
JA
5246{
5247 struct io_kiocb *req = pt->req;
5248
5249 /*
5250 * If poll->head is already set, it's because the file being polled
5251 * uses multiple waitqueues for poll handling (eg one for read, one
5252 * for write). Setup a separate io_poll_iocb if this happens.
5253 */
5254 if (unlikely(poll->head)) {
58852d4d
PB
5255 struct io_poll_iocb *poll_one = poll;
5256
18bceab1 5257 /* already have a 2nd entry, fail a third attempt */
807abcb0 5258 if (*poll_ptr) {
18bceab1
JA
5259 pt->error = -EINVAL;
5260 return;
5261 }
5262 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5263 if (!poll) {
5264 pt->error = -ENOMEM;
5265 return;
5266 }
58852d4d 5267 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
18bceab1
JA
5268 refcount_inc(&req->refs);
5269 poll->wait.private = req;
807abcb0 5270 *poll_ptr = poll;
18bceab1
JA
5271 }
5272
5273 pt->error = 0;
5274 poll->head = head;
a31eb4a2
JX
5275
5276 if (poll->events & EPOLLEXCLUSIVE)
5277 add_wait_queue_exclusive(head, &poll->wait);
5278 else
5279 add_wait_queue(head, &poll->wait);
18bceab1
JA
5280}
5281
5282static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5283 struct poll_table_struct *p)
5284{
5285 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
807abcb0 5286 struct async_poll *apoll = pt->req->apoll;
18bceab1 5287
807abcb0 5288 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
18bceab1
JA
5289}
5290
d7718a9d
JA
5291static void io_async_task_func(struct callback_head *cb)
5292{
5293 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5294 struct async_poll *apoll = req->apoll;
5295 struct io_ring_ctx *ctx = req->ctx;
5296
5297 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
5298
74ce6ce4 5299 if (io_poll_rewait(req, &apoll->poll)) {
d7718a9d 5300 spin_unlock_irq(&ctx->completion_lock);
6d816e08 5301 percpu_ref_put(&ctx->refs);
74ce6ce4 5302 return;
d7718a9d
JA
5303 }
5304
31067255 5305 /* If req is still hashed, it cannot have been canceled. Don't check. */
0be0b0e3 5306 if (hash_hashed(&req->hash_node))
74ce6ce4 5307 hash_del(&req->hash_node);
2bae047e 5308
d4e7cd36 5309 io_poll_remove_double(req);
74ce6ce4
JA
5310 spin_unlock_irq(&ctx->completion_lock);
5311
0be0b0e3
PB
5312 if (!READ_ONCE(apoll->poll.canceled))
5313 __io_req_task_submit(req);
5314 else
5315 __io_req_task_cancel(req, -ECANCELED);
aa340845 5316
6d816e08 5317 percpu_ref_put(&ctx->refs);
807abcb0 5318 kfree(apoll->double_poll);
31067255 5319 kfree(apoll);
d7718a9d
JA
5320}
5321
5322static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5323 void *key)
5324{
5325 struct io_kiocb *req = wait->private;
5326 struct io_poll_iocb *poll = &req->apoll->poll;
5327
5328 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5329 key_to_poll(key));
5330
5331 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5332}
5333
5334static void io_poll_req_insert(struct io_kiocb *req)
5335{
5336 struct io_ring_ctx *ctx = req->ctx;
5337 struct hlist_head *list;
5338
5339 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5340 hlist_add_head(&req->hash_node, list);
5341}
5342
5343static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5344 struct io_poll_iocb *poll,
5345 struct io_poll_table *ipt, __poll_t mask,
5346 wait_queue_func_t wake_func)
5347 __acquires(&ctx->completion_lock)
5348{
5349 struct io_ring_ctx *ctx = req->ctx;
5350 bool cancel = false;
5351
4d52f338 5352 INIT_HLIST_NODE(&req->hash_node);
18bceab1 5353 io_init_poll_iocb(poll, mask, wake_func);
b90cd197 5354 poll->file = req->file;
18bceab1 5355 poll->wait.private = req;
d7718a9d
JA
5356
5357 ipt->pt._key = mask;
5358 ipt->req = req;
5359 ipt->error = -EINVAL;
5360
d7718a9d
JA
5361 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5362
5363 spin_lock_irq(&ctx->completion_lock);
5364 if (likely(poll->head)) {
5365 spin_lock(&poll->head->lock);
5366 if (unlikely(list_empty(&poll->wait.entry))) {
5367 if (ipt->error)
5368 cancel = true;
5369 ipt->error = 0;
5370 mask = 0;
5371 }
5372 if (mask || ipt->error)
5373 list_del_init(&poll->wait.entry);
5374 else if (cancel)
5375 WRITE_ONCE(poll->canceled, true);
5376 else if (!poll->done) /* actually waiting for an event */
5377 io_poll_req_insert(req);
5378 spin_unlock(&poll->head->lock);
5379 }
5380
5381 return mask;
5382}
5383
5384static bool io_arm_poll_handler(struct io_kiocb *req)
5385{
5386 const struct io_op_def *def = &io_op_defs[req->opcode];
5387 struct io_ring_ctx *ctx = req->ctx;
5388 struct async_poll *apoll;
5389 struct io_poll_table ipt;
5390 __poll_t mask, ret;
9dab14b8 5391 int rw;
d7718a9d
JA
5392
5393 if (!req->file || !file_can_poll(req->file))
5394 return false;
24c74678 5395 if (req->flags & REQ_F_POLLED)
d7718a9d 5396 return false;
9dab14b8
JA
5397 if (def->pollin)
5398 rw = READ;
5399 else if (def->pollout)
5400 rw = WRITE;
5401 else
5402 return false;
5403 /* if we can't nonblock try, then no point in arming a poll handler */
5404 if (!io_file_supports_async(req->file, rw))
d7718a9d
JA
5405 return false;
5406
5407 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5408 if (unlikely(!apoll))
5409 return false;
807abcb0 5410 apoll->double_poll = NULL;
d7718a9d
JA
5411
5412 req->flags |= REQ_F_POLLED;
d7718a9d 5413 req->apoll = apoll;
d7718a9d 5414
8755d97a 5415 mask = 0;
d7718a9d 5416 if (def->pollin)
8755d97a 5417 mask |= POLLIN | POLLRDNORM;
d7718a9d
JA
5418 if (def->pollout)
5419 mask |= POLLOUT | POLLWRNORM;
901341bb
LH
5420
5421 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5422 if ((req->opcode == IORING_OP_RECVMSG) &&
5423 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5424 mask &= ~POLLIN;
5425
d7718a9d
JA
5426 mask |= POLLERR | POLLPRI;
5427
5428 ipt.pt._qproc = io_async_queue_proc;
5429
5430 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5431 io_async_wake);
a36da65c 5432 if (ret || ipt.error) {
d4e7cd36 5433 io_poll_remove_double(req);
d7718a9d 5434 spin_unlock_irq(&ctx->completion_lock);
807abcb0 5435 kfree(apoll->double_poll);
d7718a9d
JA
5436 kfree(apoll);
5437 return false;
5438 }
5439 spin_unlock_irq(&ctx->completion_lock);
5440 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
5441 apoll->poll.events);
5442 return true;
5443}
5444
5445static bool __io_poll_remove_one(struct io_kiocb *req,
5446 struct io_poll_iocb *poll)
221c5eb2 5447{
b41e9852 5448 bool do_complete = false;
221c5eb2
JA
5449
5450 spin_lock(&poll->head->lock);
5451 WRITE_ONCE(poll->canceled, true);
392edb45
JA
5452 if (!list_empty(&poll->wait.entry)) {
5453 list_del_init(&poll->wait.entry);
b41e9852 5454 do_complete = true;
221c5eb2
JA
5455 }
5456 spin_unlock(&poll->head->lock);
3bfa5bcb 5457 hash_del(&req->hash_node);
d7718a9d
JA
5458 return do_complete;
5459}
5460
5461static bool io_poll_remove_one(struct io_kiocb *req)
5462{
5463 bool do_complete;
5464
d4e7cd36
JA
5465 io_poll_remove_double(req);
5466
d7718a9d
JA
5467 if (req->opcode == IORING_OP_POLL_ADD) {
5468 do_complete = __io_poll_remove_one(req, &req->poll);
5469 } else {
3bfa5bcb
JA
5470 struct async_poll *apoll = req->apoll;
5471
d7718a9d 5472 /* non-poll requests have submit ref still */
3bfa5bcb
JA
5473 do_complete = __io_poll_remove_one(req, &apoll->poll);
5474 if (do_complete) {
d7718a9d 5475 io_put_req(req);
807abcb0 5476 kfree(apoll->double_poll);
3bfa5bcb
JA
5477 kfree(apoll);
5478 }
b1f573bd
XW
5479 }
5480
b41e9852
JA
5481 if (do_complete) {
5482 io_cqring_fill_event(req, -ECANCELED);
5483 io_commit_cqring(req->ctx);
f254ac04 5484 req_set_fail_links(req);
216578e5 5485 io_put_req_deferred(req, 1);
b41e9852
JA
5486 }
5487
5488 return do_complete;
221c5eb2
JA
5489}
5490
76e1b642
JA
5491/*
5492 * Returns true if we found and killed one or more poll requests
5493 */
5494static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk)
221c5eb2 5495{
78076bb6 5496 struct hlist_node *tmp;
221c5eb2 5497 struct io_kiocb *req;
8e2e1faf 5498 int posted = 0, i;
221c5eb2
JA
5499
5500 spin_lock_irq(&ctx->completion_lock);
78076bb6
JA
5501 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5502 struct hlist_head *list;
5503
5504 list = &ctx->cancel_hash[i];
f3606e3a
JA
5505 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
5506 if (io_task_match(req, tsk))
5507 posted += io_poll_remove_one(req);
5508 }
221c5eb2
JA
5509 }
5510 spin_unlock_irq(&ctx->completion_lock);
b41e9852 5511
8e2e1faf
JA
5512 if (posted)
5513 io_cqring_ev_posted(ctx);
76e1b642
JA
5514
5515 return posted != 0;
221c5eb2
JA
5516}
5517
47f46768
JA
5518static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
5519{
78076bb6 5520 struct hlist_head *list;
47f46768
JA
5521 struct io_kiocb *req;
5522
78076bb6
JA
5523 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5524 hlist_for_each_entry(req, list, hash_node) {
b41e9852
JA
5525 if (sqe_addr != req->user_data)
5526 continue;
5527 if (io_poll_remove_one(req))
eac406c6 5528 return 0;
b41e9852 5529 return -EALREADY;
47f46768
JA
5530 }
5531
5532 return -ENOENT;
5533}
5534
3529d8c2
JA
5535static int io_poll_remove_prep(struct io_kiocb *req,
5536 const struct io_uring_sqe *sqe)
0969e783 5537{
0969e783
JA
5538 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5539 return -EINVAL;
5540 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
5541 sqe->poll_events)
5542 return -EINVAL;
5543
018043be 5544 req->poll_remove.addr = READ_ONCE(sqe->addr);
0969e783
JA
5545 return 0;
5546}
5547
221c5eb2
JA
5548/*
5549 * Find a running poll command that matches one specified in sqe->addr,
5550 * and remove it if found.
5551 */
fc4df999 5552static int io_poll_remove(struct io_kiocb *req)
221c5eb2
JA
5553{
5554 struct io_ring_ctx *ctx = req->ctx;
47f46768 5555 int ret;
221c5eb2 5556
221c5eb2 5557 spin_lock_irq(&ctx->completion_lock);
018043be 5558 ret = io_poll_cancel(ctx, req->poll_remove.addr);
221c5eb2
JA
5559 spin_unlock_irq(&ctx->completion_lock);
5560
4e88d6e7
JA
5561 if (ret < 0)
5562 req_set_fail_links(req);
e1e16097 5563 io_req_complete(req, ret);
221c5eb2
JA
5564 return 0;
5565}
5566
221c5eb2
JA
5567static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5568 void *key)
5569{
c2f2eb7d
JA
5570 struct io_kiocb *req = wait->private;
5571 struct io_poll_iocb *poll = &req->poll;
221c5eb2 5572
d7718a9d 5573 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
221c5eb2
JA
5574}
5575
221c5eb2
JA
5576static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5577 struct poll_table_struct *p)
5578{
5579 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5580
e8c2bc1f 5581 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
eac406c6
JA
5582}
5583
3529d8c2 5584static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
5585{
5586 struct io_poll_iocb *poll = &req->poll;
5769a351 5587 u32 events;
221c5eb2
JA
5588
5589 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5590 return -EINVAL;
5591 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
5592 return -EINVAL;
5593
5769a351
JX
5594 events = READ_ONCE(sqe->poll32_events);
5595#ifdef __BIG_ENDIAN
5596 events = swahw32(events);
5597#endif
a31eb4a2
JX
5598 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
5599 (events & EPOLLEXCLUSIVE);
0969e783
JA
5600 return 0;
5601}
5602
014db007 5603static int io_poll_add(struct io_kiocb *req)
0969e783
JA
5604{
5605 struct io_poll_iocb *poll = &req->poll;
5606 struct io_ring_ctx *ctx = req->ctx;
5607 struct io_poll_table ipt;
0969e783 5608 __poll_t mask;
0969e783 5609
d7718a9d 5610 ipt.pt._qproc = io_poll_queue_proc;
36703247 5611
d7718a9d
JA
5612 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5613 io_poll_wake);
221c5eb2 5614
8c838788 5615 if (mask) { /* no async, we'd stolen it */
221c5eb2 5616 ipt.error = 0;
b0dd8a41 5617 io_poll_complete(req, mask, 0);
221c5eb2 5618 }
221c5eb2
JA
5619 spin_unlock_irq(&ctx->completion_lock);
5620
8c838788
JA
5621 if (mask) {
5622 io_cqring_ev_posted(ctx);
014db007 5623 io_put_req(req);
221c5eb2 5624 }
8c838788 5625 return ipt.error;
221c5eb2
JA
5626}
5627
5262f567
JA
5628static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5629{
ad8a48ac
JA
5630 struct io_timeout_data *data = container_of(timer,
5631 struct io_timeout_data, timer);
5632 struct io_kiocb *req = data->req;
5633 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
5634 unsigned long flags;
5635
5262f567 5636 spin_lock_irqsave(&ctx->completion_lock, flags);
a71976f3 5637 list_del_init(&req->timeout.list);
01cec8c1
PB
5638 atomic_set(&req->ctx->cq_timeouts,
5639 atomic_read(&req->ctx->cq_timeouts) + 1);
5640
78e19bbe 5641 io_cqring_fill_event(req, -ETIME);
5262f567
JA
5642 io_commit_cqring(ctx);
5643 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5644
5645 io_cqring_ev_posted(ctx);
4e88d6e7 5646 req_set_fail_links(req);
5262f567
JA
5647 io_put_req(req);
5648 return HRTIMER_NORESTART;
5649}
5650
f254ac04
JA
5651static int __io_timeout_cancel(struct io_kiocb *req)
5652{
e8c2bc1f 5653 struct io_timeout_data *io = req->async_data;
f254ac04
JA
5654 int ret;
5655
e8c2bc1f 5656 ret = hrtimer_try_to_cancel(&io->timer);
f254ac04
JA
5657 if (ret == -1)
5658 return -EALREADY;
a71976f3 5659 list_del_init(&req->timeout.list);
f254ac04
JA
5660
5661 req_set_fail_links(req);
f254ac04 5662 io_cqring_fill_event(req, -ECANCELED);
216578e5 5663 io_put_req_deferred(req, 1);
f254ac04
JA
5664 return 0;
5665}
5666
47f46768
JA
5667static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5668{
5669 struct io_kiocb *req;
5670 int ret = -ENOENT;
5671
135fcde8 5672 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
47f46768 5673 if (user_data == req->user_data) {
47f46768
JA
5674 ret = 0;
5675 break;
5676 }
5677 }
5678
5679 if (ret == -ENOENT)
5680 return ret;
5681
f254ac04 5682 return __io_timeout_cancel(req);
47f46768
JA
5683}
5684
3529d8c2
JA
5685static int io_timeout_remove_prep(struct io_kiocb *req,
5686 const struct io_uring_sqe *sqe)
b29472ee 5687{
b29472ee
JA
5688 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5689 return -EINVAL;
61710e43
DA
5690 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5691 return -EINVAL;
0bdf7a2d 5692 if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags)
b29472ee
JA
5693 return -EINVAL;
5694
0bdf7a2d 5695 req->timeout_rem.addr = READ_ONCE(sqe->addr);
b29472ee
JA
5696 return 0;
5697}
5698
11365043
JA
5699/*
5700 * Remove or update an existing timeout command
5701 */
fc4df999 5702static int io_timeout_remove(struct io_kiocb *req)
11365043
JA
5703{
5704 struct io_ring_ctx *ctx = req->ctx;
47f46768 5705 int ret;
11365043 5706
11365043 5707 spin_lock_irq(&ctx->completion_lock);
0bdf7a2d 5708 ret = io_timeout_cancel(ctx, req->timeout_rem.addr);
11365043 5709
47f46768 5710 io_cqring_fill_event(req, ret);
11365043
JA
5711 io_commit_cqring(ctx);
5712 spin_unlock_irq(&ctx->completion_lock);
5262f567 5713 io_cqring_ev_posted(ctx);
4e88d6e7
JA
5714 if (ret < 0)
5715 req_set_fail_links(req);
ec9c02ad 5716 io_put_req(req);
11365043 5717 return 0;
5262f567
JA
5718}
5719
3529d8c2 5720static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2d28390a 5721 bool is_timeout_link)
5262f567 5722{
ad8a48ac 5723 struct io_timeout_data *data;
a41525ab 5724 unsigned flags;
56080b02 5725 u32 off = READ_ONCE(sqe->off);
5262f567 5726
ad8a48ac 5727 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5262f567 5728 return -EINVAL;
ad8a48ac 5729 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
a41525ab 5730 return -EINVAL;
56080b02 5731 if (off && is_timeout_link)
2d28390a 5732 return -EINVAL;
a41525ab
JA
5733 flags = READ_ONCE(sqe->timeout_flags);
5734 if (flags & ~IORING_TIMEOUT_ABS)
5262f567 5735 return -EINVAL;
bdf20073 5736
bfe68a22 5737 req->timeout.off = off;
26a61679 5738
e8c2bc1f 5739 if (!req->async_data && io_alloc_async_data(req))
26a61679
JA
5740 return -ENOMEM;
5741
e8c2bc1f 5742 data = req->async_data;
ad8a48ac 5743 data->req = req;
ad8a48ac
JA
5744
5745 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
5746 return -EFAULT;
5747
11365043 5748 if (flags & IORING_TIMEOUT_ABS)
ad8a48ac 5749 data->mode = HRTIMER_MODE_ABS;
11365043 5750 else
ad8a48ac 5751 data->mode = HRTIMER_MODE_REL;
11365043 5752
ad8a48ac
JA
5753 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
5754 return 0;
5755}
5756
fc4df999 5757static int io_timeout(struct io_kiocb *req)
ad8a48ac 5758{
ad8a48ac 5759 struct io_ring_ctx *ctx = req->ctx;
e8c2bc1f 5760 struct io_timeout_data *data = req->async_data;
ad8a48ac 5761 struct list_head *entry;
bfe68a22 5762 u32 tail, off = req->timeout.off;
ad8a48ac 5763
733f5c95 5764 spin_lock_irq(&ctx->completion_lock);
93bd25bb 5765
5262f567
JA
5766 /*
5767 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
5768 * timeout event to be satisfied. If it isn't set, then this is
5769 * a pure timeout request, sequence isn't used.
5262f567 5770 */
8eb7e2d0 5771 if (io_is_timeout_noseq(req)) {
93bd25bb
JA
5772 entry = ctx->timeout_list.prev;
5773 goto add;
5774 }
5262f567 5775
bfe68a22
PB
5776 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5777 req->timeout.target_seq = tail + off;
5262f567
JA
5778
5779 /*
5780 * Insertion sort, ensuring the first entry in the list is always
5781 * the one we need first.
5782 */
5262f567 5783 list_for_each_prev(entry, &ctx->timeout_list) {
135fcde8
PB
5784 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5785 timeout.list);
5262f567 5786
8eb7e2d0 5787 if (io_is_timeout_noseq(nxt))
93bd25bb 5788 continue;
bfe68a22
PB
5789 /* nxt.seq is behind @tail, otherwise would've been completed */
5790 if (off >= nxt->timeout.target_seq - tail)
5262f567
JA
5791 break;
5792 }
93bd25bb 5793add:
135fcde8 5794 list_add(&req->timeout.list, entry);
ad8a48ac
JA
5795 data->timer.function = io_timeout_fn;
5796 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5262f567 5797 spin_unlock_irq(&ctx->completion_lock);
5262f567
JA
5798 return 0;
5799}
5262f567 5800
62755e35
JA
5801static bool io_cancel_cb(struct io_wq_work *work, void *data)
5802{
5803 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
5804
5805 return req->user_data == (unsigned long) data;
5806}
5807
e977d6d3 5808static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
62755e35 5809{
62755e35 5810 enum io_wq_cancel cancel_ret;
62755e35
JA
5811 int ret = 0;
5812
4f26bda1 5813 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
62755e35
JA
5814 switch (cancel_ret) {
5815 case IO_WQ_CANCEL_OK:
5816 ret = 0;
5817 break;
5818 case IO_WQ_CANCEL_RUNNING:
5819 ret = -EALREADY;
5820 break;
5821 case IO_WQ_CANCEL_NOTFOUND:
5822 ret = -ENOENT;
5823 break;
5824 }
5825
e977d6d3
JA
5826 return ret;
5827}
5828
47f46768
JA
5829static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5830 struct io_kiocb *req, __u64 sqe_addr,
014db007 5831 int success_ret)
47f46768
JA
5832{
5833 unsigned long flags;
5834 int ret;
5835
5836 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
5837 if (ret != -ENOENT) {
5838 spin_lock_irqsave(&ctx->completion_lock, flags);
5839 goto done;
5840 }
5841
5842 spin_lock_irqsave(&ctx->completion_lock, flags);
5843 ret = io_timeout_cancel(ctx, sqe_addr);
5844 if (ret != -ENOENT)
5845 goto done;
5846 ret = io_poll_cancel(ctx, sqe_addr);
5847done:
b0dd8a41
JA
5848 if (!ret)
5849 ret = success_ret;
47f46768
JA
5850 io_cqring_fill_event(req, ret);
5851 io_commit_cqring(ctx);
5852 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5853 io_cqring_ev_posted(ctx);
5854
4e88d6e7
JA
5855 if (ret < 0)
5856 req_set_fail_links(req);
014db007 5857 io_put_req(req);
47f46768
JA
5858}
5859
3529d8c2
JA
5860static int io_async_cancel_prep(struct io_kiocb *req,
5861 const struct io_uring_sqe *sqe)
e977d6d3 5862{
fbf23849 5863 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
e977d6d3 5864 return -EINVAL;
61710e43
DA
5865 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5866 return -EINVAL;
5867 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
e977d6d3
JA
5868 return -EINVAL;
5869
fbf23849
JA
5870 req->cancel.addr = READ_ONCE(sqe->addr);
5871 return 0;
5872}
5873
014db007 5874static int io_async_cancel(struct io_kiocb *req)
fbf23849
JA
5875{
5876 struct io_ring_ctx *ctx = req->ctx;
fbf23849 5877
014db007 5878 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
5262f567
JA
5879 return 0;
5880}
5881
05f3fb3c
JA
5882static int io_files_update_prep(struct io_kiocb *req,
5883 const struct io_uring_sqe *sqe)
5884{
6ca56f84
JA
5885 if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
5886 return -EINVAL;
61710e43
DA
5887 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5888 return -EINVAL;
5889 if (sqe->ioprio || sqe->rw_flags)
05f3fb3c
JA
5890 return -EINVAL;
5891
5892 req->files_update.offset = READ_ONCE(sqe->off);
5893 req->files_update.nr_args = READ_ONCE(sqe->len);
5894 if (!req->files_update.nr_args)
5895 return -EINVAL;
5896 req->files_update.arg = READ_ONCE(sqe->addr);
5897 return 0;
5898}
5899
229a7b63
JA
5900static int io_files_update(struct io_kiocb *req, bool force_nonblock,
5901 struct io_comp_state *cs)
fbf23849
JA
5902{
5903 struct io_ring_ctx *ctx = req->ctx;
05f3fb3c
JA
5904 struct io_uring_files_update up;
5905 int ret;
fbf23849 5906
f86cd20c 5907 if (force_nonblock)
05f3fb3c 5908 return -EAGAIN;
05f3fb3c
JA
5909
5910 up.offset = req->files_update.offset;
5911 up.fds = req->files_update.arg;
5912
5913 mutex_lock(&ctx->uring_lock);
5914 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
5915 mutex_unlock(&ctx->uring_lock);
5916
5917 if (ret < 0)
5918 req_set_fail_links(req);
229a7b63 5919 __io_req_complete(req, ret, 0, cs);
5262f567
JA
5920 return 0;
5921}
5922
bfe76559 5923static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1 5924{
d625c6ee 5925 switch (req->opcode) {
e781573e 5926 case IORING_OP_NOP:
bfe76559 5927 return 0;
f67676d1
JA
5928 case IORING_OP_READV:
5929 case IORING_OP_READ_FIXED:
3a6820f2 5930 case IORING_OP_READ:
bfe76559 5931 return io_read_prep(req, sqe);
f67676d1
JA
5932 case IORING_OP_WRITEV:
5933 case IORING_OP_WRITE_FIXED:
3a6820f2 5934 case IORING_OP_WRITE:
bfe76559 5935 return io_write_prep(req, sqe);
0969e783 5936 case IORING_OP_POLL_ADD:
bfe76559 5937 return io_poll_add_prep(req, sqe);
0969e783 5938 case IORING_OP_POLL_REMOVE:
bfe76559 5939 return io_poll_remove_prep(req, sqe);
8ed8d3c3 5940 case IORING_OP_FSYNC:
bfe76559 5941 return io_prep_fsync(req, sqe);
8ed8d3c3 5942 case IORING_OP_SYNC_FILE_RANGE:
bfe76559 5943 return io_prep_sfr(req, sqe);
03b1230c 5944 case IORING_OP_SENDMSG:
fddaface 5945 case IORING_OP_SEND:
bfe76559 5946 return io_sendmsg_prep(req, sqe);
03b1230c 5947 case IORING_OP_RECVMSG:
fddaface 5948 case IORING_OP_RECV:
bfe76559 5949 return io_recvmsg_prep(req, sqe);
f499a021 5950 case IORING_OP_CONNECT:
bfe76559 5951 return io_connect_prep(req, sqe);
2d28390a 5952 case IORING_OP_TIMEOUT:
bfe76559 5953 return io_timeout_prep(req, sqe, false);
b29472ee 5954 case IORING_OP_TIMEOUT_REMOVE:
bfe76559 5955 return io_timeout_remove_prep(req, sqe);
fbf23849 5956 case IORING_OP_ASYNC_CANCEL:
bfe76559 5957 return io_async_cancel_prep(req, sqe);
2d28390a 5958 case IORING_OP_LINK_TIMEOUT:
bfe76559 5959 return io_timeout_prep(req, sqe, true);
8ed8d3c3 5960 case IORING_OP_ACCEPT:
bfe76559 5961 return io_accept_prep(req, sqe);
d63d1b5e 5962 case IORING_OP_FALLOCATE:
bfe76559 5963 return io_fallocate_prep(req, sqe);
15b71abe 5964 case IORING_OP_OPENAT:
bfe76559 5965 return io_openat_prep(req, sqe);
b5dba59e 5966 case IORING_OP_CLOSE:
bfe76559 5967 return io_close_prep(req, sqe);
05f3fb3c 5968 case IORING_OP_FILES_UPDATE:
bfe76559 5969 return io_files_update_prep(req, sqe);
eddc7ef5 5970 case IORING_OP_STATX:
bfe76559 5971 return io_statx_prep(req, sqe);
4840e418 5972 case IORING_OP_FADVISE:
bfe76559 5973 return io_fadvise_prep(req, sqe);
c1ca757b 5974 case IORING_OP_MADVISE:
bfe76559 5975 return io_madvise_prep(req, sqe);
cebdb986 5976 case IORING_OP_OPENAT2:
bfe76559 5977 return io_openat2_prep(req, sqe);
3e4827b0 5978 case IORING_OP_EPOLL_CTL:
bfe76559 5979 return io_epoll_ctl_prep(req, sqe);
7d67af2c 5980 case IORING_OP_SPLICE:
bfe76559 5981 return io_splice_prep(req, sqe);
ddf0322d 5982 case IORING_OP_PROVIDE_BUFFERS:
bfe76559 5983 return io_provide_buffers_prep(req, sqe);
067524e9 5984 case IORING_OP_REMOVE_BUFFERS:
bfe76559 5985 return io_remove_buffers_prep(req, sqe);
f2a8d5c7 5986 case IORING_OP_TEE:
bfe76559 5987 return io_tee_prep(req, sqe);
36f4fa68
JA
5988 case IORING_OP_SHUTDOWN:
5989 return io_shutdown_prep(req, sqe);
80a261fd
JA
5990 case IORING_OP_RENAMEAT:
5991 return io_renameat_prep(req, sqe);
14a1143b
JA
5992 case IORING_OP_UNLINKAT:
5993 return io_unlinkat_prep(req, sqe);
f67676d1
JA
5994 }
5995
bfe76559
PB
5996 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5997 req->opcode);
5998 return-EINVAL;
5999}
6000
6001static int io_req_defer_prep(struct io_kiocb *req,
6002 const struct io_uring_sqe *sqe)
6003{
bfe76559
PB
6004 if (!sqe)
6005 return 0;
6006 if (io_alloc_async_data(req))
6007 return -EAGAIN;
bfe76559 6008 return io_req_prep(req, sqe);
f67676d1
JA
6009}
6010
9cf7c104
PB
6011static u32 io_get_sequence(struct io_kiocb *req)
6012{
6013 struct io_kiocb *pos;
6014 struct io_ring_ctx *ctx = req->ctx;
6015 u32 total_submitted, nr_reqs = 1;
6016
6017 if (req->flags & REQ_F_LINK_HEAD)
6018 list_for_each_entry(pos, &req->link_list, link_list)
6019 nr_reqs++;
6020
6021 total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
6022 return total_submitted - nr_reqs;
6023}
6024
3529d8c2 6025static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
de0617e4 6026{
a197f664 6027 struct io_ring_ctx *ctx = req->ctx;
27dc8338 6028 struct io_defer_entry *de;
f67676d1 6029 int ret;
9cf7c104 6030 u32 seq;
de0617e4 6031
9d858b21 6032 /* Still need defer if there is pending req in defer list. */
9cf7c104
PB
6033 if (likely(list_empty_careful(&ctx->defer_list) &&
6034 !(req->flags & REQ_F_IO_DRAIN)))
6035 return 0;
6036
6037 seq = io_get_sequence(req);
6038 /* Still a chance to pass the sequence check */
6039 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
de0617e4
JA
6040 return 0;
6041
e8c2bc1f 6042 if (!req->async_data) {
650b5481 6043 ret = io_req_defer_prep(req, sqe);
327d6d96 6044 if (ret)
650b5481
PB
6045 return ret;
6046 }
cbdcb435 6047 io_prep_async_link(req);
27dc8338
PB
6048 de = kmalloc(sizeof(*de), GFP_KERNEL);
6049 if (!de)
6050 return -ENOMEM;
2d28390a 6051
de0617e4 6052 spin_lock_irq(&ctx->completion_lock);
9cf7c104 6053 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
de0617e4 6054 spin_unlock_irq(&ctx->completion_lock);
27dc8338 6055 kfree(de);
ae34817b
PB
6056 io_queue_async_work(req);
6057 return -EIOCBQUEUED;
de0617e4
JA
6058 }
6059
915967f6 6060 trace_io_uring_defer(ctx, req, req->user_data);
27dc8338 6061 de->req = req;
9cf7c104 6062 de->seq = seq;
27dc8338 6063 list_add_tail(&de->list, &ctx->defer_list);
de0617e4
JA
6064 spin_unlock_irq(&ctx->completion_lock);
6065 return -EIOCBQUEUED;
6066}
6067
f573d384 6068static void io_req_drop_files(struct io_kiocb *req)
99bc4c38 6069{
f573d384
JA
6070 struct io_ring_ctx *ctx = req->ctx;
6071 unsigned long flags;
6072
6073 spin_lock_irqsave(&ctx->inflight_lock, flags);
6074 list_del(&req->inflight_entry);
6075 if (waitqueue_active(&ctx->inflight_wait))
6076 wake_up(&ctx->inflight_wait);
6077 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
6078 req->flags &= ~REQ_F_INFLIGHT;
98447d65
JA
6079 put_files_struct(req->work.identity->files);
6080 put_nsproxy(req->work.identity->nsproxy);
dfead8a8 6081 req->work.flags &= ~IO_WQ_WORK_FILES;
f573d384 6082}
99bc4c38 6083
3ca405eb 6084static void __io_clean_op(struct io_kiocb *req)
99bc4c38 6085{
0e1b6fe3
PB
6086 if (req->flags & REQ_F_BUFFER_SELECTED) {
6087 switch (req->opcode) {
6088 case IORING_OP_READV:
6089 case IORING_OP_READ_FIXED:
6090 case IORING_OP_READ:
bcda7baa 6091 kfree((void *)(unsigned long)req->rw.addr);
0e1b6fe3
PB
6092 break;
6093 case IORING_OP_RECVMSG:
6094 case IORING_OP_RECV:
bcda7baa 6095 kfree(req->sr_msg.kbuf);
0e1b6fe3
PB
6096 break;
6097 }
6098 req->flags &= ~REQ_F_BUFFER_SELECTED;
99bc4c38
PB
6099 }
6100
0e1b6fe3
PB
6101 if (req->flags & REQ_F_NEED_CLEANUP) {
6102 switch (req->opcode) {
6103 case IORING_OP_READV:
6104 case IORING_OP_READ_FIXED:
6105 case IORING_OP_READ:
6106 case IORING_OP_WRITEV:
6107 case IORING_OP_WRITE_FIXED:
e8c2bc1f
JA
6108 case IORING_OP_WRITE: {
6109 struct io_async_rw *io = req->async_data;
6110 if (io->free_iovec)
6111 kfree(io->free_iovec);
0e1b6fe3 6112 break;
e8c2bc1f 6113 }
0e1b6fe3 6114 case IORING_OP_RECVMSG:
e8c2bc1f
JA
6115 case IORING_OP_SENDMSG: {
6116 struct io_async_msghdr *io = req->async_data;
6117 if (io->iov != io->fast_iov)
6118 kfree(io->iov);
0e1b6fe3 6119 break;
e8c2bc1f 6120 }
0e1b6fe3
PB
6121 case IORING_OP_SPLICE:
6122 case IORING_OP_TEE:
6123 io_put_file(req, req->splice.file_in,
6124 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
6125 break;
f3cd4850
JA
6126 case IORING_OP_OPENAT:
6127 case IORING_OP_OPENAT2:
6128 if (req->open.filename)
6129 putname(req->open.filename);
6130 break;
80a261fd
JA
6131 case IORING_OP_RENAMEAT:
6132 putname(req->rename.oldpath);
6133 putname(req->rename.newpath);
6134 break;
14a1143b
JA
6135 case IORING_OP_UNLINKAT:
6136 putname(req->unlink.filename);
6137 break;
0e1b6fe3
PB
6138 }
6139 req->flags &= ~REQ_F_NEED_CLEANUP;
99bc4c38 6140 }
bb175342 6141
f573d384
JA
6142 if (req->flags & REQ_F_INFLIGHT)
6143 io_req_drop_files(req);
99bc4c38
PB
6144}
6145
c1379e24
PB
6146static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
6147 struct io_comp_state *cs)
2b188cc1 6148{
a197f664 6149 struct io_ring_ctx *ctx = req->ctx;
d625c6ee 6150 int ret;
2b188cc1 6151
d625c6ee 6152 switch (req->opcode) {
2b188cc1 6153 case IORING_OP_NOP:
229a7b63 6154 ret = io_nop(req, cs);
2b188cc1
JA
6155 break;
6156 case IORING_OP_READV:
edafccee 6157 case IORING_OP_READ_FIXED:
3a6820f2 6158 case IORING_OP_READ:
a1d7c393 6159 ret = io_read(req, force_nonblock, cs);
edafccee 6160 break;
3529d8c2 6161 case IORING_OP_WRITEV:
edafccee 6162 case IORING_OP_WRITE_FIXED:
3a6820f2 6163 case IORING_OP_WRITE:
a1d7c393 6164 ret = io_write(req, force_nonblock, cs);
2b188cc1 6165 break;
c992fe29 6166 case IORING_OP_FSYNC:
014db007 6167 ret = io_fsync(req, force_nonblock);
c992fe29 6168 break;
221c5eb2 6169 case IORING_OP_POLL_ADD:
014db007 6170 ret = io_poll_add(req);
221c5eb2
JA
6171 break;
6172 case IORING_OP_POLL_REMOVE:
fc4df999 6173 ret = io_poll_remove(req);
221c5eb2 6174 break;
5d17b4a4 6175 case IORING_OP_SYNC_FILE_RANGE:
014db007 6176 ret = io_sync_file_range(req, force_nonblock);
5d17b4a4 6177 break;
0fa03c62 6178 case IORING_OP_SENDMSG:
062d04d7
PB
6179 ret = io_sendmsg(req, force_nonblock, cs);
6180 break;
fddaface 6181 case IORING_OP_SEND:
062d04d7 6182 ret = io_send(req, force_nonblock, cs);
0fa03c62 6183 break;
aa1fa28f 6184 case IORING_OP_RECVMSG:
062d04d7
PB
6185 ret = io_recvmsg(req, force_nonblock, cs);
6186 break;
fddaface 6187 case IORING_OP_RECV:
062d04d7 6188 ret = io_recv(req, force_nonblock, cs);
aa1fa28f 6189 break;
5262f567 6190 case IORING_OP_TIMEOUT:
fc4df999 6191 ret = io_timeout(req);
5262f567 6192 break;
11365043 6193 case IORING_OP_TIMEOUT_REMOVE:
fc4df999 6194 ret = io_timeout_remove(req);
11365043 6195 break;
17f2fe35 6196 case IORING_OP_ACCEPT:
229a7b63 6197 ret = io_accept(req, force_nonblock, cs);
17f2fe35 6198 break;
f8e85cf2 6199 case IORING_OP_CONNECT:
229a7b63 6200 ret = io_connect(req, force_nonblock, cs);
f8e85cf2 6201 break;
62755e35 6202 case IORING_OP_ASYNC_CANCEL:
014db007 6203 ret = io_async_cancel(req);
62755e35 6204 break;
d63d1b5e 6205 case IORING_OP_FALLOCATE:
014db007 6206 ret = io_fallocate(req, force_nonblock);
d63d1b5e 6207 break;
15b71abe 6208 case IORING_OP_OPENAT:
014db007 6209 ret = io_openat(req, force_nonblock);
15b71abe 6210 break;
b5dba59e 6211 case IORING_OP_CLOSE:
229a7b63 6212 ret = io_close(req, force_nonblock, cs);
b5dba59e 6213 break;
05f3fb3c 6214 case IORING_OP_FILES_UPDATE:
229a7b63 6215 ret = io_files_update(req, force_nonblock, cs);
05f3fb3c 6216 break;
eddc7ef5 6217 case IORING_OP_STATX:
014db007 6218 ret = io_statx(req, force_nonblock);
eddc7ef5 6219 break;
4840e418 6220 case IORING_OP_FADVISE:
014db007 6221 ret = io_fadvise(req, force_nonblock);
4840e418 6222 break;
c1ca757b 6223 case IORING_OP_MADVISE:
014db007 6224 ret = io_madvise(req, force_nonblock);
c1ca757b 6225 break;
cebdb986 6226 case IORING_OP_OPENAT2:
014db007 6227 ret = io_openat2(req, force_nonblock);
cebdb986 6228 break;
3e4827b0 6229 case IORING_OP_EPOLL_CTL:
229a7b63 6230 ret = io_epoll_ctl(req, force_nonblock, cs);
3e4827b0 6231 break;
7d67af2c 6232 case IORING_OP_SPLICE:
014db007 6233 ret = io_splice(req, force_nonblock);
7d67af2c 6234 break;
ddf0322d 6235 case IORING_OP_PROVIDE_BUFFERS:
229a7b63 6236 ret = io_provide_buffers(req, force_nonblock, cs);
ddf0322d 6237 break;
067524e9 6238 case IORING_OP_REMOVE_BUFFERS:
229a7b63 6239 ret = io_remove_buffers(req, force_nonblock, cs);
3e4827b0 6240 break;
f2a8d5c7 6241 case IORING_OP_TEE:
f2a8d5c7
PB
6242 ret = io_tee(req, force_nonblock);
6243 break;
36f4fa68
JA
6244 case IORING_OP_SHUTDOWN:
6245 ret = io_shutdown(req, force_nonblock);
6246 break;
80a261fd
JA
6247 case IORING_OP_RENAMEAT:
6248 ret = io_renameat(req, force_nonblock);
6249 break;
14a1143b
JA
6250 case IORING_OP_UNLINKAT:
6251 ret = io_unlinkat(req, force_nonblock);
6252 break;
2b188cc1
JA
6253 default:
6254 ret = -EINVAL;
6255 break;
6256 }
6257
def596e9
JA
6258 if (ret)
6259 return ret;
6260
b532576e
JA
6261 /* If the op doesn't have a file, we're not polling for it */
6262 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
11ba820b
JA
6263 const bool in_async = io_wq_current_is_worker();
6264
11ba820b
JA
6265 /* workqueue context doesn't hold uring_lock, grab it now */
6266 if (in_async)
6267 mutex_lock(&ctx->uring_lock);
6268
def596e9 6269 io_iopoll_req_issued(req);
11ba820b
JA
6270
6271 if (in_async)
6272 mutex_unlock(&ctx->uring_lock);
def596e9
JA
6273 }
6274
6275 return 0;
2b188cc1
JA
6276}
6277
f4db7182 6278static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
2b188cc1
JA
6279{
6280 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6df1db6b 6281 struct io_kiocb *timeout;
561fb04a 6282 int ret = 0;
2b188cc1 6283
6df1db6b
PB
6284 timeout = io_prep_linked_timeout(req);
6285 if (timeout)
6286 io_queue_linked_timeout(timeout);
d4c81f38 6287
0c9d5ccd
JA
6288 /* if NO_CANCEL is set, we must still run the work */
6289 if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
6290 IO_WQ_WORK_CANCEL) {
561fb04a 6291 ret = -ECANCELED;
0c9d5ccd 6292 }
31b51510 6293
561fb04a 6294 if (!ret) {
561fb04a 6295 do {
c1379e24 6296 ret = io_issue_sqe(req, false, NULL);
561fb04a
JA
6297 /*
6298 * We can get EAGAIN for polled IO even though we're
6299 * forcing a sync submission from here, since we can't
6300 * wait for request slots on the block side.
6301 */
6302 if (ret != -EAGAIN)
6303 break;
6304 cond_resched();
6305 } while (1);
6306 }
31b51510 6307
561fb04a 6308 if (ret) {
4e88d6e7 6309 req_set_fail_links(req);
e1e16097 6310 io_req_complete(req, ret);
edafccee 6311 }
2b188cc1 6312
f4db7182 6313 return io_steal_work(req);
2b188cc1
JA
6314}
6315
65e19f54
JA
6316static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6317 int index)
6318{
6319 struct fixed_file_table *table;
6320
05f3fb3c 6321 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
84695089 6322 return table->files[index & IORING_FILE_TABLE_MASK];
65e19f54
JA
6323}
6324
8371adf5
PB
6325static struct file *io_file_get(struct io_submit_state *state,
6326 struct io_kiocb *req, int fd, bool fixed)
09bb8394 6327{
a197f664 6328 struct io_ring_ctx *ctx = req->ctx;
8da11c19 6329 struct file *file;
09bb8394 6330
8da11c19 6331 if (fixed) {
479f517b 6332 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
8371adf5 6333 return NULL;
b7620121 6334 fd = array_index_nospec(fd, ctx->nr_user_files);
8da11c19 6335 file = io_file_from_index(ctx, fd);
fd2206e4 6336 if (file) {
b2e96852 6337 req->fixed_file_refs = &ctx->file_data->node->refs;
fd2206e4
JA
6338 percpu_ref_get(req->fixed_file_refs);
6339 }
09bb8394 6340 } else {
c826bd7a 6341 trace_io_uring_file_get(ctx, fd);
8da11c19 6342 file = __io_file_get(state, fd);
09bb8394
JA
6343 }
6344
8371adf5 6345 return file;
09bb8394
JA
6346}
6347
8da11c19 6348static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
63ff8223 6349 int fd)
8da11c19 6350{
28cea78a 6351 req->file = io_file_get(state, req, fd, req->flags & REQ_F_FIXED_FILE);
8371adf5 6352 if (req->file || io_op_defs[req->opcode].needs_file_no_error)
f86cd20c 6353 return 0;
8371adf5 6354 return -EBADF;
f56040b8
PB
6355}
6356
2665abfd 6357static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 6358{
ad8a48ac
JA
6359 struct io_timeout_data *data = container_of(timer,
6360 struct io_timeout_data, timer);
6361 struct io_kiocb *req = data->req;
2665abfd
JA
6362 struct io_ring_ctx *ctx = req->ctx;
6363 struct io_kiocb *prev = NULL;
6364 unsigned long flags;
2665abfd
JA
6365
6366 spin_lock_irqsave(&ctx->completion_lock, flags);
6367
6368 /*
6369 * We don't expect the list to be empty, that will only happen if we
6370 * race with the completion of the linked work.
6371 */
4493233e
PB
6372 if (!list_empty(&req->link_list)) {
6373 prev = list_entry(req->link_list.prev, struct io_kiocb,
6374 link_list);
900fad45 6375 if (refcount_inc_not_zero(&prev->refs))
4493233e 6376 list_del_init(&req->link_list);
900fad45 6377 else
76a46e06 6378 prev = NULL;
2665abfd
JA
6379 }
6380
6381 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6382
6383 if (prev) {
4e88d6e7 6384 req_set_fail_links(prev);
014db007 6385 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
76a46e06 6386 io_put_req(prev);
47f46768 6387 } else {
e1e16097 6388 io_req_complete(req, -ETIME);
2665abfd 6389 }
2665abfd
JA
6390 return HRTIMER_NORESTART;
6391}
6392
7271ef3a 6393static void __io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 6394{
76a46e06
JA
6395 /*
6396 * If the list is now empty, then our linked request finished before
6397 * we got a chance to setup the timer
6398 */
4493233e 6399 if (!list_empty(&req->link_list)) {
e8c2bc1f 6400 struct io_timeout_data *data = req->async_data;
94ae5e77 6401
ad8a48ac
JA
6402 data->timer.function = io_link_timeout_fn;
6403 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6404 data->mode);
2665abfd 6405 }
7271ef3a
JA
6406}
6407
6408static void io_queue_linked_timeout(struct io_kiocb *req)
6409{
6410 struct io_ring_ctx *ctx = req->ctx;
6411
6412 spin_lock_irq(&ctx->completion_lock);
6413 __io_queue_linked_timeout(req);
76a46e06 6414 spin_unlock_irq(&ctx->completion_lock);
2665abfd 6415
2665abfd 6416 /* drop submission reference */
76a46e06
JA
6417 io_put_req(req);
6418}
2665abfd 6419
ad8a48ac 6420static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
2665abfd
JA
6421{
6422 struct io_kiocb *nxt;
6423
dea3b49c 6424 if (!(req->flags & REQ_F_LINK_HEAD))
2665abfd 6425 return NULL;
6df1db6b 6426 if (req->flags & REQ_F_LINK_TIMEOUT)
d7718a9d 6427 return NULL;
2665abfd 6428
4493233e
PB
6429 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
6430 link_list);
d625c6ee 6431 if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
76a46e06 6432 return NULL;
2665abfd 6433
900fad45 6434 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
76a46e06 6435 req->flags |= REQ_F_LINK_TIMEOUT;
76a46e06 6436 return nxt;
2665abfd
JA
6437}
6438
c1379e24 6439static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs)
2b188cc1 6440{
4a0a7a18 6441 struct io_kiocb *linked_timeout;
193155c8 6442 const struct cred *old_creds = NULL;
e0c5c576 6443 int ret;
2b188cc1 6444
4a0a7a18
JA
6445again:
6446 linked_timeout = io_prep_linked_timeout(req);
6447
2e5aa6cb
PB
6448 if ((req->flags & REQ_F_WORK_INITIALIZED) &&
6449 (req->work.flags & IO_WQ_WORK_CREDS) &&
98447d65 6450 req->work.identity->creds != current_cred()) {
193155c8
JA
6451 if (old_creds)
6452 revert_creds(old_creds);
98447d65 6453 if (old_creds == req->work.identity->creds)
193155c8
JA
6454 old_creds = NULL; /* restored original creds */
6455 else
98447d65 6456 old_creds = override_creds(req->work.identity->creds);
193155c8
JA
6457 }
6458
c1379e24 6459 ret = io_issue_sqe(req, true, cs);
491381ce
JA
6460
6461 /*
6462 * We async punt it if the file wasn't marked NOWAIT, or if the file
6463 * doesn't support non-blocking read/write attempts
6464 */
24c74678 6465 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
f063c547 6466 if (!io_arm_poll_handler(req)) {
f063c547
PB
6467 /*
6468 * Queued up for async execution, worker will release
6469 * submit reference when the iocb is actually submitted.
6470 */
6471 io_queue_async_work(req);
2b188cc1 6472 }
bbad27b2 6473
f063c547
PB
6474 if (linked_timeout)
6475 io_queue_linked_timeout(linked_timeout);
0d63c148
PB
6476 } else if (likely(!ret)) {
6477 /* drop submission reference */
6478 req = io_put_req_find_next(req);
6479 if (linked_timeout)
6480 io_queue_linked_timeout(linked_timeout);
e65ef56d 6481
0d63c148
PB
6482 if (req) {
6483 if (!(req->flags & REQ_F_FORCE_ASYNC))
6484 goto again;
6485 io_queue_async_work(req);
6486 }
6487 } else {
652532ad
PB
6488 /* un-prep timeout, so it'll be killed as any other linked */
6489 req->flags &= ~REQ_F_LINK_TIMEOUT;
4e88d6e7 6490 req_set_fail_links(req);
e65ef56d 6491 io_put_req(req);
e1e16097 6492 io_req_complete(req, ret);
9e645e11 6493 }
652532ad 6494
193155c8
JA
6495 if (old_creds)
6496 revert_creds(old_creds);
2b188cc1
JA
6497}
6498
f13fad7b
JA
6499static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6500 struct io_comp_state *cs)
4fe2c963
JL
6501{
6502 int ret;
6503
3529d8c2 6504 ret = io_req_defer(req, sqe);
4fe2c963
JL
6505 if (ret) {
6506 if (ret != -EIOCBQUEUED) {
1118591a 6507fail_req:
4e88d6e7 6508 req_set_fail_links(req);
e1e16097
JA
6509 io_put_req(req);
6510 io_req_complete(req, ret);
4fe2c963 6511 }
2550878f 6512 } else if (req->flags & REQ_F_FORCE_ASYNC) {
e8c2bc1f 6513 if (!req->async_data) {
bd2ab18a 6514 ret = io_req_defer_prep(req, sqe);
327d6d96 6515 if (unlikely(ret))
bd2ab18a
PB
6516 goto fail_req;
6517 }
ce35a47a
JA
6518 io_queue_async_work(req);
6519 } else {
c1379e24
PB
6520 if (sqe) {
6521 ret = io_req_prep(req, sqe);
6522 if (unlikely(ret))
6523 goto fail_req;
6524 }
6525 __io_queue_sqe(req, cs);
ce35a47a 6526 }
4fe2c963
JL
6527}
6528
f13fad7b
JA
6529static inline void io_queue_link_head(struct io_kiocb *req,
6530 struct io_comp_state *cs)
4fe2c963 6531{
94ae5e77 6532 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
e1e16097
JA
6533 io_put_req(req);
6534 io_req_complete(req, -ECANCELED);
1b4a51b6 6535 } else
f13fad7b 6536 io_queue_sqe(req, NULL, cs);
4fe2c963
JL
6537}
6538
1d4240cc 6539static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
f13fad7b 6540 struct io_kiocb **link, struct io_comp_state *cs)
9e645e11 6541{
a197f664 6542 struct io_ring_ctx *ctx = req->ctx;
ef4ff581 6543 int ret;
9e645e11 6544
9e645e11
JA
6545 /*
6546 * If we already have a head request, queue this one for async
6547 * submittal once the head completes. If we don't have a head but
6548 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6549 * submitted sync once the chain is complete. If none of those
6550 * conditions are true (normal request), then just queue it.
6551 */
6552 if (*link) {
9d76377f 6553 struct io_kiocb *head = *link;
4e88d6e7 6554
8cdf2193
PB
6555 /*
6556 * Taking sequential execution of a link, draining both sides
6557 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6558 * requests in the link. So, it drains the head and the
6559 * next after the link request. The last one is done via
6560 * drain_next flag to persist the effect across calls.
6561 */
ef4ff581 6562 if (req->flags & REQ_F_IO_DRAIN) {
711be031
PB
6563 head->flags |= REQ_F_IO_DRAIN;
6564 ctx->drain_next = 1;
6565 }
3529d8c2 6566 ret = io_req_defer_prep(req, sqe);
327d6d96 6567 if (unlikely(ret)) {
4e88d6e7 6568 /* fail even hard links since we don't submit */
9d76377f 6569 head->flags |= REQ_F_FAIL_LINK;
1d4240cc 6570 return ret;
2d28390a 6571 }
9d76377f
PB
6572 trace_io_uring_link(ctx, req, head);
6573 list_add_tail(&req->link_list, &head->link_list);
32fe525b
PB
6574
6575 /* last request of a link, enqueue the link */
ef4ff581 6576 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
f13fad7b 6577 io_queue_link_head(head, cs);
32fe525b
PB
6578 *link = NULL;
6579 }
9e645e11 6580 } else {
711be031
PB
6581 if (unlikely(ctx->drain_next)) {
6582 req->flags |= REQ_F_IO_DRAIN;
ef4ff581 6583 ctx->drain_next = 0;
711be031 6584 }
ef4ff581 6585 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
dea3b49c 6586 req->flags |= REQ_F_LINK_HEAD;
711be031 6587 INIT_LIST_HEAD(&req->link_list);
f1d96a8f 6588
711be031 6589 ret = io_req_defer_prep(req, sqe);
327d6d96 6590 if (unlikely(ret))
711be031
PB
6591 req->flags |= REQ_F_FAIL_LINK;
6592 *link = req;
6593 } else {
f13fad7b 6594 io_queue_sqe(req, sqe, cs);
711be031 6595 }
9e645e11 6596 }
2e6e1fde 6597
1d4240cc 6598 return 0;
9e645e11
JA
6599}
6600
9a56a232
JA
6601/*
6602 * Batched submission is done, ensure local IO is flushed out.
6603 */
6604static void io_submit_state_end(struct io_submit_state *state)
6605{
f13fad7b
JA
6606 if (!list_empty(&state->comp.list))
6607 io_submit_flush_completions(&state->comp);
9a56a232 6608 blk_finish_plug(&state->plug);
9f13c35b 6609 io_state_file_put(state);
2579f913 6610 if (state->free_reqs)
6c8a3134 6611 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
9a56a232
JA
6612}
6613
6614/*
6615 * Start submission side cache.
6616 */
6617static void io_submit_state_start(struct io_submit_state *state,
013538bd 6618 struct io_ring_ctx *ctx, unsigned int max_ios)
9a56a232
JA
6619{
6620 blk_start_plug(&state->plug);
013538bd
JA
6621 state->comp.nr = 0;
6622 INIT_LIST_HEAD(&state->comp.list);
6623 state->comp.ctx = ctx;
2579f913 6624 state->free_reqs = 0;
9a56a232
JA
6625 state->file = NULL;
6626 state->ios_left = max_ios;
6627}
6628
2b188cc1
JA
6629static void io_commit_sqring(struct io_ring_ctx *ctx)
6630{
75b28aff 6631 struct io_rings *rings = ctx->rings;
2b188cc1 6632
caf582c6
PB
6633 /*
6634 * Ensure any loads from the SQEs are done at this point,
6635 * since once we write the new head, the application could
6636 * write new data to them.
6637 */
6638 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
6639}
6640
2b188cc1 6641/*
3529d8c2 6642 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
2b188cc1
JA
6643 * that is mapped by userspace. This means that care needs to be taken to
6644 * ensure that reads are stable, as we cannot rely on userspace always
6645 * being a good citizen. If members of the sqe are validated and then later
6646 * used, it's important that those reads are done through READ_ONCE() to
6647 * prevent a re-load down the line.
6648 */
709b302f 6649static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2b188cc1 6650{
75b28aff 6651 u32 *sq_array = ctx->sq_array;
2b188cc1
JA
6652 unsigned head;
6653
6654 /*
6655 * The cached sq head (or cq tail) serves two purposes:
6656 *
6657 * 1) allows us to batch the cost of updating the user visible
6658 * head updates.
6659 * 2) allows the kernel side to track the head on its own, even
6660 * though the application is the one updating it.
6661 */
ee7d46d9 6662 head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
709b302f
PB
6663 if (likely(head < ctx->sq_entries))
6664 return &ctx->sq_sqes[head];
2b188cc1
JA
6665
6666 /* drop invalid entries */
498ccd9e 6667 ctx->cached_sq_dropped++;
ee7d46d9 6668 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
709b302f
PB
6669 return NULL;
6670}
6671
6672static inline void io_consume_sqe(struct io_ring_ctx *ctx)
6673{
6674 ctx->cached_sq_head++;
2b188cc1
JA
6675}
6676
21b55dbc
SG
6677/*
6678 * Check SQE restrictions (opcode and flags).
6679 *
6680 * Returns 'true' if SQE is allowed, 'false' otherwise.
6681 */
6682static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6683 struct io_kiocb *req,
6684 unsigned int sqe_flags)
6685{
6686 if (!ctx->restricted)
6687 return true;
6688
6689 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6690 return false;
6691
6692 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6693 ctx->restrictions.sqe_flags_required)
6694 return false;
6695
6696 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6697 ctx->restrictions.sqe_flags_required))
6698 return false;
6699
6700 return true;
6701}
6702
ef4ff581
PB
6703#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
6704 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
6705 IOSQE_BUFFER_SELECT)
6706
6707static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
6708 const struct io_uring_sqe *sqe,
0cdaf760 6709 struct io_submit_state *state)
0553b8bd 6710{
ef4ff581 6711 unsigned int sqe_flags;
71b547c0 6712 int id, ret;
ef4ff581 6713
0553b8bd
PB
6714 req->opcode = READ_ONCE(sqe->opcode);
6715 req->user_data = READ_ONCE(sqe->user_data);
e8c2bc1f 6716 req->async_data = NULL;
0553b8bd
PB
6717 req->file = NULL;
6718 req->ctx = ctx;
6719 req->flags = 0;
6720 /* one is dropped after submission, the other at completion */
6721 refcount_set(&req->refs, 2);
4dd2824d 6722 req->task = current;
0553b8bd 6723 req->result = 0;
ef4ff581
PB
6724
6725 if (unlikely(req->opcode >= IORING_OP_LAST))
6726 return -EINVAL;
6727
28cea78a 6728 if (unlikely(io_sq_thread_acquire_mm_files(ctx, req)))
9d8426a0 6729 return -EFAULT;
ef4ff581
PB
6730
6731 sqe_flags = READ_ONCE(sqe->flags);
6732 /* enforce forwards compatibility on users */
6733 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
6734 return -EINVAL;
6735
21b55dbc
SG
6736 if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
6737 return -EACCES;
6738
ef4ff581
PB
6739 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6740 !io_op_defs[req->opcode].buffer_select)
6741 return -EOPNOTSUPP;
6742
6743 id = READ_ONCE(sqe->personality);
6744 if (id) {
1e6fa521
JA
6745 struct io_identity *iod;
6746
1e6fa521
JA
6747 iod = idr_find(&ctx->personality_idr, id);
6748 if (unlikely(!iod))
ef4ff581 6749 return -EINVAL;
1e6fa521 6750 refcount_inc(&iod->count);
ec99ca6c
PB
6751
6752 __io_req_init_async(req);
1e6fa521
JA
6753 get_cred(iod->creds);
6754 req->work.identity = iod;
dfead8a8 6755 req->work.flags |= IO_WQ_WORK_CREDS;
ef4ff581
PB
6756 }
6757
6758 /* same numerical values with corresponding REQ_F_*, safe to copy */
c11368a5 6759 req->flags |= sqe_flags;
ef4ff581 6760
63ff8223
JA
6761 if (!io_op_defs[req->opcode].needs_file)
6762 return 0;
6763
71b547c0
PB
6764 ret = io_req_set_file(state, req, READ_ONCE(sqe->fd));
6765 state->ios_left--;
6766 return ret;
0553b8bd
PB
6767}
6768
0f212204 6769static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
6c271ce2 6770{
ac8691c4 6771 struct io_submit_state state;
9e645e11 6772 struct io_kiocb *link = NULL;
9e645e11 6773 int i, submitted = 0;
6c271ce2 6774
c4a2ed72 6775 /* if we have a backlog and couldn't flush it all, return BUSY */
ad3eb2c8
JA
6776 if (test_bit(0, &ctx->sq_check_overflow)) {
6777 if (!list_empty(&ctx->cq_overflow_list) &&
e6c8aa9a 6778 !io_cqring_overflow_flush(ctx, false, NULL, NULL))
ad3eb2c8
JA
6779 return -EBUSY;
6780 }
6c271ce2 6781
ee7d46d9
PB
6782 /* make sure SQ entry isn't read before tail */
6783 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
9ef4f124 6784
2b85edfc
PB
6785 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6786 return -EAGAIN;
6c271ce2 6787
d8a6df10 6788 percpu_counter_add(&current->io_uring->inflight, nr);
faf7b51c 6789 refcount_add(nr, &current->usage);
6c271ce2 6790
013538bd 6791 io_submit_state_start(&state, ctx, nr);
b14cca0c 6792
6c271ce2 6793 for (i = 0; i < nr; i++) {
3529d8c2 6794 const struct io_uring_sqe *sqe;
196be95c 6795 struct io_kiocb *req;
1cb1edb2 6796 int err;
fb5ccc98 6797
b1e50e54
PB
6798 sqe = io_get_sqe(ctx);
6799 if (unlikely(!sqe)) {
6800 io_consume_sqe(ctx);
6801 break;
6802 }
ac8691c4 6803 req = io_alloc_req(ctx, &state);
196be95c
PB
6804 if (unlikely(!req)) {
6805 if (!submitted)
6806 submitted = -EAGAIN;
fb5ccc98 6807 break;
196be95c 6808 }
709b302f 6809 io_consume_sqe(ctx);
d3656344
JA
6810 /* will complete beyond this point, count as submitted */
6811 submitted++;
6812
692d8363 6813 err = io_init_req(ctx, req, sqe, &state);
ef4ff581 6814 if (unlikely(err)) {
1cb1edb2 6815fail_req:
e1e16097
JA
6816 io_put_req(req);
6817 io_req_complete(req, err);
196be95c
PB
6818 break;
6819 }
fb5ccc98 6820
354420f7 6821 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
0cdaf760 6822 true, io_async_submit(ctx));
f13fad7b 6823 err = io_submit_sqe(req, sqe, &link, &state.comp);
1d4240cc
PB
6824 if (err)
6825 goto fail_req;
6c271ce2
JA
6826 }
6827
9466f437
PB
6828 if (unlikely(submitted != nr)) {
6829 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
d8a6df10
JA
6830 struct io_uring_task *tctx = current->io_uring;
6831 int unused = nr - ref_used;
9466f437 6832
d8a6df10
JA
6833 percpu_ref_put_many(&ctx->refs, unused);
6834 percpu_counter_sub(&tctx->inflight, unused);
6835 put_task_struct_many(current, unused);
9466f437 6836 }
9e645e11 6837 if (link)
f13fad7b 6838 io_queue_link_head(link, &state.comp);
ac8691c4 6839 io_submit_state_end(&state);
6c271ce2 6840
ae9428ca
PB
6841 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6842 io_commit_sqring(ctx);
6843
6c271ce2
JA
6844 return submitted;
6845}
6846
23b3628e
XW
6847static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6848{
6849 /* Tell userspace we may need a wakeup call */
6850 spin_lock_irq(&ctx->completion_lock);
6851 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6852 spin_unlock_irq(&ctx->completion_lock);
6853}
6854
6855static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6856{
6857 spin_lock_irq(&ctx->completion_lock);
6858 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6859 spin_unlock_irq(&ctx->completion_lock);
6860}
6861
3f0e64d0
JA
6862static int io_sq_wake_function(struct wait_queue_entry *wqe, unsigned mode,
6863 int sync, void *key)
6c271ce2 6864{
3f0e64d0
JA
6865 struct io_ring_ctx *ctx = container_of(wqe, struct io_ring_ctx, sqo_wait_entry);
6866 int ret;
6867
6868 ret = autoremove_wake_function(wqe, mode, sync, key);
6869 if (ret) {
6870 unsigned long flags;
6871
6872 spin_lock_irqsave(&ctx->completion_lock, flags);
6873 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6874 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6875 }
6876 return ret;
6877}
6878
c8d1ba58
JA
6879enum sq_ret {
6880 SQT_IDLE = 1,
6881 SQT_SPIN = 2,
6882 SQT_DID_WORK = 4,
6883};
6884
6885static enum sq_ret __io_sq_thread(struct io_ring_ctx *ctx,
e95eee2d 6886 unsigned long start_jiffies, bool cap_entries)
6c271ce2 6887{
c8d1ba58 6888 unsigned long timeout = start_jiffies + ctx->sq_thread_idle;
534ca6d6 6889 struct io_sq_data *sqd = ctx->sq_data;
c8d1ba58 6890 unsigned int to_submit;
bdcd3eab 6891 int ret = 0;
6c271ce2 6892
c8d1ba58
JA
6893again:
6894 if (!list_empty(&ctx->iopoll_list)) {
6895 unsigned nr_events = 0;
a4c0b3de 6896
c8d1ba58
JA
6897 mutex_lock(&ctx->uring_lock);
6898 if (!list_empty(&ctx->iopoll_list) && !need_resched())
6899 io_do_iopoll(ctx, &nr_events, 0);
6900 mutex_unlock(&ctx->uring_lock);
6901 }
6c271ce2 6902
c8d1ba58 6903 to_submit = io_sqring_entries(ctx);
6c271ce2 6904
c8d1ba58
JA
6905 /*
6906 * If submit got -EBUSY, flag us as needing the application
6907 * to enter the kernel to reap and flush events.
6908 */
6909 if (!to_submit || ret == -EBUSY || need_resched()) {
6910 /*
6911 * Drop cur_mm before scheduling, we can't hold it for
6912 * long periods (or over schedule()). Do this before
6913 * adding ourselves to the waitqueue, as the unuse/drop
6914 * may sleep.
6915 */
28cea78a 6916 io_sq_thread_drop_mm_files();
6c271ce2 6917
c8d1ba58
JA
6918 /*
6919 * We're polling. If we're within the defined idle
6920 * period, then let us spin without work before going
6921 * to sleep. The exception is if we got EBUSY doing
6922 * more IO, we should wait for the application to
6923 * reap events and wake us up.
6924 */
6925 if (!list_empty(&ctx->iopoll_list) || need_resched() ||
6926 (!time_after(jiffies, timeout) && ret != -EBUSY &&
6927 !percpu_ref_is_dying(&ctx->refs)))
6928 return SQT_SPIN;
6c271ce2 6929
534ca6d6 6930 prepare_to_wait(&sqd->wait, &ctx->sqo_wait_entry,
c8d1ba58 6931 TASK_INTERRUPTIBLE);
6c271ce2 6932
c8d1ba58
JA
6933 /*
6934 * While doing polled IO, before going to sleep, we need
6935 * to check if there are new reqs added to iopoll_list,
6936 * it is because reqs may have been punted to io worker
6937 * and will be added to iopoll_list later, hence check
6938 * the iopoll_list again.
6939 */
6940 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6941 !list_empty_careful(&ctx->iopoll_list)) {
534ca6d6 6942 finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
c8d1ba58 6943 goto again;
6c271ce2
JA
6944 }
6945
fb5ccc98 6946 to_submit = io_sqring_entries(ctx);
c8d1ba58
JA
6947 if (!to_submit || ret == -EBUSY)
6948 return SQT_IDLE;
6949 }
c1edbf5f 6950
534ca6d6 6951 finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
c8d1ba58 6952 io_ring_clear_wakeup_flag(ctx);
7143b5ac 6953
e95eee2d
JA
6954 /* if we're handling multiple rings, cap submit size for fairness */
6955 if (cap_entries && to_submit > 8)
6956 to_submit = 8;
6957
c8d1ba58
JA
6958 mutex_lock(&ctx->uring_lock);
6959 if (likely(!percpu_ref_is_dying(&ctx->refs)))
6960 ret = io_submit_sqes(ctx, to_submit);
6961 mutex_unlock(&ctx->uring_lock);
90554200
JA
6962
6963 if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
6964 wake_up(&ctx->sqo_sq_wait);
6965
c8d1ba58
JA
6966 return SQT_DID_WORK;
6967}
6c271ce2 6968
69fb2131
JA
6969static void io_sqd_init_new(struct io_sq_data *sqd)
6970{
6971 struct io_ring_ctx *ctx;
6972
6973 while (!list_empty(&sqd->ctx_new_list)) {
6974 ctx = list_first_entry(&sqd->ctx_new_list, struct io_ring_ctx, sqd_list);
6975 init_wait(&ctx->sqo_wait_entry);
6976 ctx->sqo_wait_entry.func = io_sq_wake_function;
6977 list_move_tail(&ctx->sqd_list, &sqd->ctx_list);
6978 complete(&ctx->sq_thread_comp);
6979 }
6980}
6981
c8d1ba58
JA
6982static int io_sq_thread(void *data)
6983{
91d8f519 6984 struct cgroup_subsys_state *cur_css = NULL;
28cea78a
JA
6985 struct files_struct *old_files = current->files;
6986 struct nsproxy *old_nsproxy = current->nsproxy;
69fb2131
JA
6987 const struct cred *old_cred = NULL;
6988 struct io_sq_data *sqd = data;
6989 struct io_ring_ctx *ctx;
c8d1ba58 6990 unsigned long start_jiffies;
6c271ce2 6991
28cea78a
JA
6992 task_lock(current);
6993 current->files = NULL;
6994 current->nsproxy = NULL;
6995 task_unlock(current);
6996
69fb2131
JA
6997 start_jiffies = jiffies;
6998 while (!kthread_should_stop()) {
6999 enum sq_ret ret = 0;
e95eee2d 7000 bool cap_entries;
c1edbf5f
JA
7001
7002 /*
69fb2131
JA
7003 * Any changes to the sqd lists are synchronized through the
7004 * kthread parking. This synchronizes the thread vs users,
7005 * the users are synchronized on the sqd->ctx_lock.
c1edbf5f 7006 */
69fb2131
JA
7007 if (kthread_should_park())
7008 kthread_parkme();
7143b5ac 7009
69fb2131
JA
7010 if (unlikely(!list_empty(&sqd->ctx_new_list)))
7011 io_sqd_init_new(sqd);
6c271ce2 7012
e95eee2d 7013 cap_entries = !list_is_singular(&sqd->ctx_list);
6c271ce2 7014
69fb2131
JA
7015 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
7016 if (current->cred != ctx->creds) {
7017 if (old_cred)
7018 revert_creds(old_cred);
7019 old_cred = override_creds(ctx->creds);
bdcd3eab 7020 }
91d8f519 7021 io_sq_thread_associate_blkcg(ctx, &cur_css);
4ea33a97
JA
7022#ifdef CONFIG_AUDIT
7023 current->loginuid = ctx->loginuid;
7024 current->sessionid = ctx->sessionid;
7025#endif
bdcd3eab 7026
e95eee2d 7027 ret |= __io_sq_thread(ctx, start_jiffies, cap_entries);
6c271ce2 7028
28cea78a 7029 io_sq_thread_drop_mm_files();
69fb2131 7030 }
6c271ce2 7031
69fb2131 7032 if (ret & SQT_SPIN) {
c8d1ba58
JA
7033 io_run_task_work();
7034 cond_resched();
69fb2131
JA
7035 } else if (ret == SQT_IDLE) {
7036 if (kthread_should_park())
6c271ce2 7037 continue;
69fb2131
JA
7038 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7039 io_ring_set_wakeup_flag(ctx);
7040 schedule();
7041 start_jiffies = jiffies;
7042 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7043 io_ring_clear_wakeup_flag(ctx);
6c271ce2 7044 }
6c271ce2
JA
7045 }
7046
4c6e277c 7047 io_run_task_work();
b41e9852 7048
91d8f519
DZ
7049 if (cur_css)
7050 io_sq_thread_unassociate_blkcg();
69fb2131
JA
7051 if (old_cred)
7052 revert_creds(old_cred);
06058632 7053
28cea78a
JA
7054 task_lock(current);
7055 current->files = old_files;
7056 current->nsproxy = old_nsproxy;
7057 task_unlock(current);
7058
2bbcd6d3 7059 kthread_parkme();
06058632 7060
6c271ce2
JA
7061 return 0;
7062}
7063
bda52162
JA
7064struct io_wait_queue {
7065 struct wait_queue_entry wq;
7066 struct io_ring_ctx *ctx;
7067 unsigned to_wait;
7068 unsigned nr_timeouts;
7069};
7070
1d7bb1d5 7071static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
bda52162
JA
7072{
7073 struct io_ring_ctx *ctx = iowq->ctx;
7074
7075 /*
d195a66e 7076 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
7077 * started waiting. For timeouts, we always want to return to userspace,
7078 * regardless of event count.
7079 */
1d7bb1d5 7080 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
bda52162
JA
7081 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
7082}
7083
7084static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
7085 int wake_flags, void *key)
7086{
7087 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
7088 wq);
7089
1d7bb1d5
JA
7090 /* use noflush == true, as we can't safely rely on locking context */
7091 if (!io_should_wake(iowq, true))
bda52162
JA
7092 return -1;
7093
7094 return autoremove_wake_function(curr, mode, wake_flags, key);
7095}
7096
af9c1a44
JA
7097static int io_run_task_work_sig(void)
7098{
7099 if (io_run_task_work())
7100 return 1;
7101 if (!signal_pending(current))
7102 return 0;
7103 if (current->jobctl & JOBCTL_TASK_WORK) {
7104 spin_lock_irq(&current->sighand->siglock);
7105 current->jobctl &= ~JOBCTL_TASK_WORK;
7106 recalc_sigpending();
7107 spin_unlock_irq(&current->sighand->siglock);
7108 return 1;
7109 }
7110 return -EINTR;
7111}
7112
2b188cc1
JA
7113/*
7114 * Wait until events become available, if we don't already have some. The
7115 * application must reap them itself, as they reside on the shared cq ring.
7116 */
7117static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
7118 const sigset_t __user *sig, size_t sigsz)
7119{
bda52162
JA
7120 struct io_wait_queue iowq = {
7121 .wq = {
7122 .private = current,
7123 .func = io_wake_function,
7124 .entry = LIST_HEAD_INIT(iowq.wq.entry),
7125 },
7126 .ctx = ctx,
7127 .to_wait = min_events,
7128 };
75b28aff 7129 struct io_rings *rings = ctx->rings;
e9ffa5c2 7130 int ret = 0;
2b188cc1 7131
b41e9852
JA
7132 do {
7133 if (io_cqring_events(ctx, false) >= min_events)
7134 return 0;
4c6e277c 7135 if (!io_run_task_work())
b41e9852 7136 break;
b41e9852 7137 } while (1);
2b188cc1
JA
7138
7139 if (sig) {
9e75ad5d
AB
7140#ifdef CONFIG_COMPAT
7141 if (in_compat_syscall())
7142 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 7143 sigsz);
9e75ad5d
AB
7144 else
7145#endif
b772434b 7146 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 7147
2b188cc1
JA
7148 if (ret)
7149 return ret;
7150 }
7151
bda52162 7152 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
c826bd7a 7153 trace_io_uring_cqring_wait(ctx, min_events);
bda52162
JA
7154 do {
7155 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
7156 TASK_INTERRUPTIBLE);
ce593a6c 7157 /* make sure we run task_work before checking for signals */
af9c1a44
JA
7158 ret = io_run_task_work_sig();
7159 if (ret > 0)
4c6e277c 7160 continue;
af9c1a44 7161 else if (ret < 0)
bda52162 7162 break;
ce593a6c
JA
7163 if (io_should_wake(&iowq, false))
7164 break;
7165 schedule();
bda52162
JA
7166 } while (1);
7167 finish_wait(&ctx->wait, &iowq.wq);
7168
b7db41c9 7169 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 7170
75b28aff 7171 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
7172}
7173
6b06314c
JA
7174static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7175{
7176#if defined(CONFIG_UNIX)
7177 if (ctx->ring_sock) {
7178 struct sock *sock = ctx->ring_sock->sk;
7179 struct sk_buff *skb;
7180
7181 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7182 kfree_skb(skb);
7183 }
7184#else
7185 int i;
7186
65e19f54
JA
7187 for (i = 0; i < ctx->nr_user_files; i++) {
7188 struct file *file;
7189
7190 file = io_file_from_index(ctx, i);
7191 if (file)
7192 fput(file);
7193 }
6b06314c
JA
7194#endif
7195}
7196
05f3fb3c
JA
7197static void io_file_ref_kill(struct percpu_ref *ref)
7198{
7199 struct fixed_file_data *data;
7200
7201 data = container_of(ref, struct fixed_file_data, refs);
7202 complete(&data->done);
7203}
7204
6b06314c
JA
7205static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7206{
05f3fb3c 7207 struct fixed_file_data *data = ctx->file_data;
05589553 7208 struct fixed_file_ref_node *ref_node = NULL;
65e19f54
JA
7209 unsigned nr_tables, i;
7210
05f3fb3c 7211 if (!data)
6b06314c
JA
7212 return -ENXIO;
7213
6a4d07cd 7214 spin_lock(&data->lock);
1e5d770b 7215 ref_node = data->node;
6a4d07cd 7216 spin_unlock(&data->lock);
05589553
XW
7217 if (ref_node)
7218 percpu_ref_kill(&ref_node->refs);
7219
7220 percpu_ref_kill(&data->refs);
7221
7222 /* wait for all refs nodes to complete */
4a38aed2 7223 flush_delayed_work(&ctx->file_put_work);
2faf852d 7224 wait_for_completion(&data->done);
05f3fb3c 7225
6b06314c 7226 __io_sqe_files_unregister(ctx);
65e19f54
JA
7227 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
7228 for (i = 0; i < nr_tables; i++)
05f3fb3c
JA
7229 kfree(data->table[i].files);
7230 kfree(data->table);
05589553
XW
7231 percpu_ref_exit(&data->refs);
7232 kfree(data);
05f3fb3c 7233 ctx->file_data = NULL;
6b06314c
JA
7234 ctx->nr_user_files = 0;
7235 return 0;
7236}
7237
534ca6d6 7238static void io_put_sq_data(struct io_sq_data *sqd)
6c271ce2 7239{
534ca6d6 7240 if (refcount_dec_and_test(&sqd->refs)) {
2bbcd6d3
RP
7241 /*
7242 * The park is a bit of a work-around, without it we get
7243 * warning spews on shutdown with SQPOLL set and affinity
7244 * set to a single CPU.
7245 */
534ca6d6
JA
7246 if (sqd->thread) {
7247 kthread_park(sqd->thread);
7248 kthread_stop(sqd->thread);
7249 }
7250
7251 kfree(sqd);
7252 }
7253}
7254
aa06165d
JA
7255static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7256{
7257 struct io_ring_ctx *ctx_attach;
7258 struct io_sq_data *sqd;
7259 struct fd f;
7260
7261 f = fdget(p->wq_fd);
7262 if (!f.file)
7263 return ERR_PTR(-ENXIO);
7264 if (f.file->f_op != &io_uring_fops) {
7265 fdput(f);
7266 return ERR_PTR(-EINVAL);
7267 }
7268
7269 ctx_attach = f.file->private_data;
7270 sqd = ctx_attach->sq_data;
7271 if (!sqd) {
7272 fdput(f);
7273 return ERR_PTR(-EINVAL);
7274 }
7275
7276 refcount_inc(&sqd->refs);
7277 fdput(f);
7278 return sqd;
7279}
7280
534ca6d6
JA
7281static struct io_sq_data *io_get_sq_data(struct io_uring_params *p)
7282{
7283 struct io_sq_data *sqd;
7284
aa06165d
JA
7285 if (p->flags & IORING_SETUP_ATTACH_WQ)
7286 return io_attach_sq_data(p);
7287
534ca6d6
JA
7288 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7289 if (!sqd)
7290 return ERR_PTR(-ENOMEM);
7291
7292 refcount_set(&sqd->refs, 1);
69fb2131
JA
7293 INIT_LIST_HEAD(&sqd->ctx_list);
7294 INIT_LIST_HEAD(&sqd->ctx_new_list);
7295 mutex_init(&sqd->ctx_lock);
7296 mutex_init(&sqd->lock);
534ca6d6
JA
7297 init_waitqueue_head(&sqd->wait);
7298 return sqd;
7299}
7300
69fb2131
JA
7301static void io_sq_thread_unpark(struct io_sq_data *sqd)
7302 __releases(&sqd->lock)
7303{
7304 if (!sqd->thread)
7305 return;
7306 kthread_unpark(sqd->thread);
7307 mutex_unlock(&sqd->lock);
7308}
7309
7310static void io_sq_thread_park(struct io_sq_data *sqd)
7311 __acquires(&sqd->lock)
7312{
7313 if (!sqd->thread)
7314 return;
7315 mutex_lock(&sqd->lock);
7316 kthread_park(sqd->thread);
7317}
7318
534ca6d6
JA
7319static void io_sq_thread_stop(struct io_ring_ctx *ctx)
7320{
7321 struct io_sq_data *sqd = ctx->sq_data;
7322
7323 if (sqd) {
7324 if (sqd->thread) {
7325 /*
7326 * We may arrive here from the error branch in
7327 * io_sq_offload_create() where the kthread is created
7328 * without being waked up, thus wake it up now to make
7329 * sure the wait will complete.
7330 */
7331 wake_up_process(sqd->thread);
7332 wait_for_completion(&ctx->sq_thread_comp);
69fb2131
JA
7333
7334 io_sq_thread_park(sqd);
7335 }
7336
7337 mutex_lock(&sqd->ctx_lock);
7338 list_del(&ctx->sqd_list);
7339 mutex_unlock(&sqd->ctx_lock);
7340
7341 if (sqd->thread) {
7342 finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
7343 io_sq_thread_unpark(sqd);
534ca6d6
JA
7344 }
7345
7346 io_put_sq_data(sqd);
7347 ctx->sq_data = NULL;
6c271ce2
JA
7348 }
7349}
7350
6b06314c
JA
7351static void io_finish_async(struct io_ring_ctx *ctx)
7352{
6c271ce2
JA
7353 io_sq_thread_stop(ctx);
7354
561fb04a
JA
7355 if (ctx->io_wq) {
7356 io_wq_destroy(ctx->io_wq);
7357 ctx->io_wq = NULL;
6b06314c
JA
7358 }
7359}
7360
7361#if defined(CONFIG_UNIX)
6b06314c
JA
7362/*
7363 * Ensure the UNIX gc is aware of our file set, so we are certain that
7364 * the io_uring can be safely unregistered on process exit, even if we have
7365 * loops in the file referencing.
7366 */
7367static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7368{
7369 struct sock *sk = ctx->ring_sock->sk;
7370 struct scm_fp_list *fpl;
7371 struct sk_buff *skb;
08a45173 7372 int i, nr_files;
6b06314c 7373
6b06314c
JA
7374 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7375 if (!fpl)
7376 return -ENOMEM;
7377
7378 skb = alloc_skb(0, GFP_KERNEL);
7379 if (!skb) {
7380 kfree(fpl);
7381 return -ENOMEM;
7382 }
7383
7384 skb->sk = sk;
6b06314c 7385
08a45173 7386 nr_files = 0;
6b06314c
JA
7387 fpl->user = get_uid(ctx->user);
7388 for (i = 0; i < nr; i++) {
65e19f54
JA
7389 struct file *file = io_file_from_index(ctx, i + offset);
7390
7391 if (!file)
08a45173 7392 continue;
65e19f54 7393 fpl->fp[nr_files] = get_file(file);
08a45173
JA
7394 unix_inflight(fpl->user, fpl->fp[nr_files]);
7395 nr_files++;
6b06314c
JA
7396 }
7397
08a45173
JA
7398 if (nr_files) {
7399 fpl->max = SCM_MAX_FD;
7400 fpl->count = nr_files;
7401 UNIXCB(skb).fp = fpl;
05f3fb3c 7402 skb->destructor = unix_destruct_scm;
08a45173
JA
7403 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7404 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 7405
08a45173
JA
7406 for (i = 0; i < nr_files; i++)
7407 fput(fpl->fp[i]);
7408 } else {
7409 kfree_skb(skb);
7410 kfree(fpl);
7411 }
6b06314c
JA
7412
7413 return 0;
7414}
7415
7416/*
7417 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7418 * causes regular reference counting to break down. We rely on the UNIX
7419 * garbage collection to take care of this problem for us.
7420 */
7421static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7422{
7423 unsigned left, total;
7424 int ret = 0;
7425
7426 total = 0;
7427 left = ctx->nr_user_files;
7428 while (left) {
7429 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
7430
7431 ret = __io_sqe_files_scm(ctx, this_files, total);
7432 if (ret)
7433 break;
7434 left -= this_files;
7435 total += this_files;
7436 }
7437
7438 if (!ret)
7439 return 0;
7440
7441 while (total < ctx->nr_user_files) {
65e19f54
JA
7442 struct file *file = io_file_from_index(ctx, total);
7443
7444 if (file)
7445 fput(file);
6b06314c
JA
7446 total++;
7447 }
7448
7449 return ret;
7450}
7451#else
7452static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7453{
7454 return 0;
7455}
7456#endif
7457
5398ae69
PB
7458static int io_sqe_alloc_file_tables(struct fixed_file_data *file_data,
7459 unsigned nr_tables, unsigned nr_files)
65e19f54
JA
7460{
7461 int i;
7462
7463 for (i = 0; i < nr_tables; i++) {
5398ae69 7464 struct fixed_file_table *table = &file_data->table[i];
65e19f54
JA
7465 unsigned this_files;
7466
7467 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
7468 table->files = kcalloc(this_files, sizeof(struct file *),
7469 GFP_KERNEL);
7470 if (!table->files)
7471 break;
7472 nr_files -= this_files;
7473 }
7474
7475 if (i == nr_tables)
7476 return 0;
7477
7478 for (i = 0; i < nr_tables; i++) {
5398ae69 7479 struct fixed_file_table *table = &file_data->table[i];
65e19f54
JA
7480 kfree(table->files);
7481 }
7482 return 1;
7483}
7484
05f3fb3c
JA
7485static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
7486{
7487#if defined(CONFIG_UNIX)
7488 struct sock *sock = ctx->ring_sock->sk;
7489 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7490 struct sk_buff *skb;
7491 int i;
7492
7493 __skb_queue_head_init(&list);
7494
7495 /*
7496 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7497 * remove this entry and rearrange the file array.
7498 */
7499 skb = skb_dequeue(head);
7500 while (skb) {
7501 struct scm_fp_list *fp;
7502
7503 fp = UNIXCB(skb).fp;
7504 for (i = 0; i < fp->count; i++) {
7505 int left;
7506
7507 if (fp->fp[i] != file)
7508 continue;
7509
7510 unix_notinflight(fp->user, fp->fp[i]);
7511 left = fp->count - 1 - i;
7512 if (left) {
7513 memmove(&fp->fp[i], &fp->fp[i + 1],
7514 left * sizeof(struct file *));
7515 }
7516 fp->count--;
7517 if (!fp->count) {
7518 kfree_skb(skb);
7519 skb = NULL;
7520 } else {
7521 __skb_queue_tail(&list, skb);
7522 }
7523 fput(file);
7524 file = NULL;
7525 break;
7526 }
7527
7528 if (!file)
7529 break;
7530
7531 __skb_queue_tail(&list, skb);
7532
7533 skb = skb_dequeue(head);
7534 }
7535
7536 if (skb_peek(&list)) {
7537 spin_lock_irq(&head->lock);
7538 while ((skb = __skb_dequeue(&list)) != NULL)
7539 __skb_queue_tail(head, skb);
7540 spin_unlock_irq(&head->lock);
7541 }
7542#else
7543 fput(file);
7544#endif
7545}
7546
7547struct io_file_put {
05589553 7548 struct list_head list;
05f3fb3c 7549 struct file *file;
05f3fb3c
JA
7550};
7551
4a38aed2 7552static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
65e19f54 7553{
4a38aed2
JA
7554 struct fixed_file_data *file_data = ref_node->file_data;
7555 struct io_ring_ctx *ctx = file_data->ctx;
05f3fb3c 7556 struct io_file_put *pfile, *tmp;
05589553
XW
7557
7558 list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
6a4d07cd 7559 list_del(&pfile->list);
05589553
XW
7560 io_ring_file_put(ctx, pfile->file);
7561 kfree(pfile);
65e19f54 7562 }
05589553 7563
05589553
XW
7564 percpu_ref_exit(&ref_node->refs);
7565 kfree(ref_node);
7566 percpu_ref_put(&file_data->refs);
2faf852d 7567}
65e19f54 7568
4a38aed2
JA
7569static void io_file_put_work(struct work_struct *work)
7570{
7571 struct io_ring_ctx *ctx;
7572 struct llist_node *node;
7573
7574 ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
7575 node = llist_del_all(&ctx->file_put_llist);
7576
7577 while (node) {
7578 struct fixed_file_ref_node *ref_node;
7579 struct llist_node *next = node->next;
7580
7581 ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
7582 __io_file_put_work(ref_node);
7583 node = next;
7584 }
7585}
7586
05589553 7587static void io_file_data_ref_zero(struct percpu_ref *ref)
2faf852d 7588{
05589553 7589 struct fixed_file_ref_node *ref_node;
e297822b 7590 struct fixed_file_data *data;
4a38aed2 7591 struct io_ring_ctx *ctx;
e297822b 7592 bool first_add = false;
4a38aed2 7593 int delay = HZ;
65e19f54 7594
05589553 7595 ref_node = container_of(ref, struct fixed_file_ref_node, refs);
e297822b
PB
7596 data = ref_node->file_data;
7597 ctx = data->ctx;
7598
7599 spin_lock(&data->lock);
7600 ref_node->done = true;
7601
7602 while (!list_empty(&data->ref_list)) {
7603 ref_node = list_first_entry(&data->ref_list,
7604 struct fixed_file_ref_node, node);
7605 /* recycle ref nodes in order */
7606 if (!ref_node->done)
7607 break;
7608 list_del(&ref_node->node);
7609 first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
7610 }
7611 spin_unlock(&data->lock);
05589553 7612
e297822b 7613 if (percpu_ref_is_dying(&data->refs))
4a38aed2 7614 delay = 0;
05589553 7615
4a38aed2
JA
7616 if (!delay)
7617 mod_delayed_work(system_wq, &ctx->file_put_work, 0);
7618 else if (first_add)
7619 queue_delayed_work(system_wq, &ctx->file_put_work, delay);
05f3fb3c 7620}
65e19f54 7621
05589553
XW
7622static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
7623 struct io_ring_ctx *ctx)
05f3fb3c 7624{
05589553 7625 struct fixed_file_ref_node *ref_node;
05f3fb3c 7626
05589553
XW
7627 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7628 if (!ref_node)
7629 return ERR_PTR(-ENOMEM);
05f3fb3c 7630
05589553
XW
7631 if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
7632 0, GFP_KERNEL)) {
7633 kfree(ref_node);
7634 return ERR_PTR(-ENOMEM);
7635 }
7636 INIT_LIST_HEAD(&ref_node->node);
7637 INIT_LIST_HEAD(&ref_node->file_list);
05589553 7638 ref_node->file_data = ctx->file_data;
e297822b 7639 ref_node->done = false;
05589553 7640 return ref_node;
05589553
XW
7641}
7642
7643static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
7644{
7645 percpu_ref_exit(&ref_node->refs);
7646 kfree(ref_node);
65e19f54
JA
7647}
7648
6b06314c
JA
7649static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
7650 unsigned nr_args)
7651{
7652 __s32 __user *fds = (__s32 __user *) arg;
600cf3f8 7653 unsigned nr_tables, i;
05f3fb3c 7654 struct file *file;
600cf3f8 7655 int fd, ret = -ENOMEM;
05589553 7656 struct fixed_file_ref_node *ref_node;
5398ae69 7657 struct fixed_file_data *file_data;
6b06314c 7658
05f3fb3c 7659 if (ctx->file_data)
6b06314c
JA
7660 return -EBUSY;
7661 if (!nr_args)
7662 return -EINVAL;
7663 if (nr_args > IORING_MAX_FIXED_FILES)
7664 return -EMFILE;
7665
5398ae69
PB
7666 file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
7667 if (!file_data)
05f3fb3c 7668 return -ENOMEM;
5398ae69
PB
7669 file_data->ctx = ctx;
7670 init_completion(&file_data->done);
7671 INIT_LIST_HEAD(&file_data->ref_list);
7672 spin_lock_init(&file_data->lock);
05f3fb3c 7673
65e19f54 7674 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
035fbafc 7675 file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
5398ae69 7676 GFP_KERNEL);
600cf3f8
PB
7677 if (!file_data->table)
7678 goto out_free;
05f3fb3c 7679
5398ae69 7680 if (percpu_ref_init(&file_data->refs, io_file_ref_kill,
600cf3f8
PB
7681 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
7682 goto out_free;
6b06314c 7683
600cf3f8
PB
7684 if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
7685 goto out_ref;
55cbc256 7686 ctx->file_data = file_data;
65e19f54 7687
08a45173 7688 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
65e19f54
JA
7689 struct fixed_file_table *table;
7690 unsigned index;
7691
600cf3f8
PB
7692 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
7693 ret = -EFAULT;
7694 goto out_fput;
7695 }
08a45173 7696 /* allow sparse sets */
600cf3f8 7697 if (fd == -1)
08a45173 7698 continue;
6b06314c 7699
05f3fb3c 7700 file = fget(fd);
6b06314c 7701 ret = -EBADF;
05f3fb3c 7702 if (!file)
600cf3f8 7703 goto out_fput;
05f3fb3c 7704
6b06314c
JA
7705 /*
7706 * Don't allow io_uring instances to be registered. If UNIX
7707 * isn't enabled, then this causes a reference cycle and this
7708 * instance can never get freed. If UNIX is enabled we'll
7709 * handle it just fine, but there's still no point in allowing
7710 * a ring fd as it doesn't support regular read/write anyway.
7711 */
05f3fb3c
JA
7712 if (file->f_op == &io_uring_fops) {
7713 fput(file);
600cf3f8 7714 goto out_fput;
6b06314c 7715 }
600cf3f8
PB
7716 table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7717 index = i & IORING_FILE_TABLE_MASK;
05f3fb3c 7718 table->files[index] = file;
6b06314c
JA
7719 }
7720
6b06314c 7721 ret = io_sqe_files_scm(ctx);
05589553 7722 if (ret) {
6b06314c 7723 io_sqe_files_unregister(ctx);
05589553
XW
7724 return ret;
7725 }
6b06314c 7726
05589553
XW
7727 ref_node = alloc_fixed_file_ref_node(ctx);
7728 if (IS_ERR(ref_node)) {
7729 io_sqe_files_unregister(ctx);
7730 return PTR_ERR(ref_node);
7731 }
7732
b2e96852 7733 file_data->node = ref_node;
5398ae69 7734 spin_lock(&file_data->lock);
e297822b 7735 list_add_tail(&ref_node->node, &file_data->ref_list);
5398ae69
PB
7736 spin_unlock(&file_data->lock);
7737 percpu_ref_get(&file_data->refs);
6b06314c 7738 return ret;
600cf3f8
PB
7739out_fput:
7740 for (i = 0; i < ctx->nr_user_files; i++) {
7741 file = io_file_from_index(ctx, i);
7742 if (file)
7743 fput(file);
7744 }
7745 for (i = 0; i < nr_tables; i++)
7746 kfree(file_data->table[i].files);
7747 ctx->nr_user_files = 0;
7748out_ref:
7749 percpu_ref_exit(&file_data->refs);
7750out_free:
7751 kfree(file_data->table);
7752 kfree(file_data);
55cbc256 7753 ctx->file_data = NULL;
6b06314c
JA
7754 return ret;
7755}
7756
c3a31e60
JA
7757static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7758 int index)
7759{
7760#if defined(CONFIG_UNIX)
7761 struct sock *sock = ctx->ring_sock->sk;
7762 struct sk_buff_head *head = &sock->sk_receive_queue;
7763 struct sk_buff *skb;
7764
7765 /*
7766 * See if we can merge this file into an existing skb SCM_RIGHTS
7767 * file set. If there's no room, fall back to allocating a new skb
7768 * and filling it in.
7769 */
7770 spin_lock_irq(&head->lock);
7771 skb = skb_peek(head);
7772 if (skb) {
7773 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7774
7775 if (fpl->count < SCM_MAX_FD) {
7776 __skb_unlink(skb, head);
7777 spin_unlock_irq(&head->lock);
7778 fpl->fp[fpl->count] = get_file(file);
7779 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7780 fpl->count++;
7781 spin_lock_irq(&head->lock);
7782 __skb_queue_head(head, skb);
7783 } else {
7784 skb = NULL;
7785 }
7786 }
7787 spin_unlock_irq(&head->lock);
7788
7789 if (skb) {
7790 fput(file);
7791 return 0;
7792 }
7793
7794 return __io_sqe_files_scm(ctx, 1, index);
7795#else
7796 return 0;
7797#endif
7798}
7799
a5318d3c 7800static int io_queue_file_removal(struct fixed_file_data *data,
05589553 7801 struct file *file)
05f3fb3c 7802{
a5318d3c 7803 struct io_file_put *pfile;
b2e96852 7804 struct fixed_file_ref_node *ref_node = data->node;
05f3fb3c 7805
05f3fb3c 7806 pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
a5318d3c
HD
7807 if (!pfile)
7808 return -ENOMEM;
05f3fb3c
JA
7809
7810 pfile->file = file;
05589553
XW
7811 list_add(&pfile->list, &ref_node->file_list);
7812
a5318d3c 7813 return 0;
05f3fb3c
JA
7814}
7815
7816static int __io_sqe_files_update(struct io_ring_ctx *ctx,
7817 struct io_uring_files_update *up,
7818 unsigned nr_args)
7819{
7820 struct fixed_file_data *data = ctx->file_data;
05589553 7821 struct fixed_file_ref_node *ref_node;
05f3fb3c 7822 struct file *file;
c3a31e60
JA
7823 __s32 __user *fds;
7824 int fd, i, err;
7825 __u32 done;
05589553 7826 bool needs_switch = false;
c3a31e60 7827
05f3fb3c 7828 if (check_add_overflow(up->offset, nr_args, &done))
c3a31e60
JA
7829 return -EOVERFLOW;
7830 if (done > ctx->nr_user_files)
7831 return -EINVAL;
7832
05589553
XW
7833 ref_node = alloc_fixed_file_ref_node(ctx);
7834 if (IS_ERR(ref_node))
7835 return PTR_ERR(ref_node);
7836
c3a31e60 7837 done = 0;
05f3fb3c 7838 fds = u64_to_user_ptr(up->fds);
c3a31e60 7839 while (nr_args) {
65e19f54
JA
7840 struct fixed_file_table *table;
7841 unsigned index;
7842
c3a31e60
JA
7843 err = 0;
7844 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
7845 err = -EFAULT;
7846 break;
7847 }
05f3fb3c
JA
7848 i = array_index_nospec(up->offset, ctx->nr_user_files);
7849 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54
JA
7850 index = i & IORING_FILE_TABLE_MASK;
7851 if (table->files[index]) {
98dfd502 7852 file = table->files[index];
a5318d3c
HD
7853 err = io_queue_file_removal(data, file);
7854 if (err)
7855 break;
65e19f54 7856 table->files[index] = NULL;
05589553 7857 needs_switch = true;
c3a31e60
JA
7858 }
7859 if (fd != -1) {
c3a31e60
JA
7860 file = fget(fd);
7861 if (!file) {
7862 err = -EBADF;
7863 break;
7864 }
7865 /*
7866 * Don't allow io_uring instances to be registered. If
7867 * UNIX isn't enabled, then this causes a reference
7868 * cycle and this instance can never get freed. If UNIX
7869 * is enabled we'll handle it just fine, but there's
7870 * still no point in allowing a ring fd as it doesn't
7871 * support regular read/write anyway.
7872 */
7873 if (file->f_op == &io_uring_fops) {
7874 fput(file);
7875 err = -EBADF;
7876 break;
7877 }
65e19f54 7878 table->files[index] = file;
c3a31e60 7879 err = io_sqe_file_register(ctx, file, i);
f3bd9dae 7880 if (err) {
95d1c8e5 7881 table->files[index] = NULL;
f3bd9dae 7882 fput(file);
c3a31e60 7883 break;
f3bd9dae 7884 }
c3a31e60
JA
7885 }
7886 nr_args--;
7887 done++;
05f3fb3c
JA
7888 up->offset++;
7889 }
7890
05589553 7891 if (needs_switch) {
b2e96852 7892 percpu_ref_kill(&data->node->refs);
6a4d07cd 7893 spin_lock(&data->lock);
e297822b 7894 list_add_tail(&ref_node->node, &data->ref_list);
b2e96852 7895 data->node = ref_node;
6a4d07cd 7896 spin_unlock(&data->lock);
05589553
XW
7897 percpu_ref_get(&ctx->file_data->refs);
7898 } else
7899 destroy_fixed_file_ref_node(ref_node);
c3a31e60
JA
7900
7901 return done ? done : err;
7902}
05589553 7903
05f3fb3c
JA
7904static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
7905 unsigned nr_args)
7906{
7907 struct io_uring_files_update up;
7908
7909 if (!ctx->file_data)
7910 return -ENXIO;
7911 if (!nr_args)
7912 return -EINVAL;
7913 if (copy_from_user(&up, arg, sizeof(up)))
7914 return -EFAULT;
7915 if (up.resv)
7916 return -EINVAL;
7917
7918 return __io_sqe_files_update(ctx, &up, nr_args);
7919}
c3a31e60 7920
e9fd9396 7921static void io_free_work(struct io_wq_work *work)
7d723065
JA
7922{
7923 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7924
e9fd9396 7925 /* Consider that io_steal_work() relies on this ref */
7d723065
JA
7926 io_put_req(req);
7927}
7928
24369c2e
PB
7929static int io_init_wq_offload(struct io_ring_ctx *ctx,
7930 struct io_uring_params *p)
7931{
7932 struct io_wq_data data;
7933 struct fd f;
7934 struct io_ring_ctx *ctx_attach;
7935 unsigned int concurrency;
7936 int ret = 0;
7937
7938 data.user = ctx->user;
e9fd9396 7939 data.free_work = io_free_work;
f5fa38c5 7940 data.do_work = io_wq_submit_work;
24369c2e
PB
7941
7942 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
7943 /* Do QD, or 4 * CPUS, whatever is smallest */
7944 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
7945
7946 ctx->io_wq = io_wq_create(concurrency, &data);
7947 if (IS_ERR(ctx->io_wq)) {
7948 ret = PTR_ERR(ctx->io_wq);
7949 ctx->io_wq = NULL;
7950 }
7951 return ret;
7952 }
7953
7954 f = fdget(p->wq_fd);
7955 if (!f.file)
7956 return -EBADF;
7957
7958 if (f.file->f_op != &io_uring_fops) {
7959 ret = -EINVAL;
7960 goto out_fput;
7961 }
7962
7963 ctx_attach = f.file->private_data;
7964 /* @io_wq is protected by holding the fd */
7965 if (!io_wq_get(ctx_attach->io_wq, &data)) {
7966 ret = -EINVAL;
7967 goto out_fput;
7968 }
7969
7970 ctx->io_wq = ctx_attach->io_wq;
7971out_fput:
7972 fdput(f);
7973 return ret;
7974}
7975
0f212204
JA
7976static int io_uring_alloc_task_context(struct task_struct *task)
7977{
7978 struct io_uring_task *tctx;
d8a6df10 7979 int ret;
0f212204
JA
7980
7981 tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
7982 if (unlikely(!tctx))
7983 return -ENOMEM;
7984
d8a6df10
JA
7985 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7986 if (unlikely(ret)) {
7987 kfree(tctx);
7988 return ret;
7989 }
7990
0f212204
JA
7991 xa_init(&tctx->xa);
7992 init_waitqueue_head(&tctx->wait);
7993 tctx->last = NULL;
fdaf083c
JA
7994 atomic_set(&tctx->in_idle, 0);
7995 tctx->sqpoll = false;
500a373d
JA
7996 io_init_identity(&tctx->__identity);
7997 tctx->identity = &tctx->__identity;
0f212204
JA
7998 task->io_uring = tctx;
7999 return 0;
8000}
8001
8002void __io_uring_free(struct task_struct *tsk)
8003{
8004 struct io_uring_task *tctx = tsk->io_uring;
8005
8006 WARN_ON_ONCE(!xa_empty(&tctx->xa));
500a373d
JA
8007 WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1);
8008 if (tctx->identity != &tctx->__identity)
8009 kfree(tctx->identity);
d8a6df10 8010 percpu_counter_destroy(&tctx->inflight);
0f212204
JA
8011 kfree(tctx);
8012 tsk->io_uring = NULL;
8013}
8014
7e84e1c7
SG
8015static int io_sq_offload_create(struct io_ring_ctx *ctx,
8016 struct io_uring_params *p)
2b188cc1
JA
8017{
8018 int ret;
8019
6c271ce2 8020 if (ctx->flags & IORING_SETUP_SQPOLL) {
534ca6d6
JA
8021 struct io_sq_data *sqd;
8022
3ec482d1 8023 ret = -EPERM;
ce59fc69 8024 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
3ec482d1
JA
8025 goto err;
8026
534ca6d6
JA
8027 sqd = io_get_sq_data(p);
8028 if (IS_ERR(sqd)) {
8029 ret = PTR_ERR(sqd);
8030 goto err;
8031 }
69fb2131 8032
534ca6d6 8033 ctx->sq_data = sqd;
69fb2131
JA
8034 io_sq_thread_park(sqd);
8035 mutex_lock(&sqd->ctx_lock);
8036 list_add(&ctx->sqd_list, &sqd->ctx_new_list);
8037 mutex_unlock(&sqd->ctx_lock);
8038 io_sq_thread_unpark(sqd);
534ca6d6 8039
917257da
JA
8040 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8041 if (!ctx->sq_thread_idle)
8042 ctx->sq_thread_idle = HZ;
8043
aa06165d
JA
8044 if (sqd->thread)
8045 goto done;
8046
6c271ce2 8047 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 8048 int cpu = p->sq_thread_cpu;
6c271ce2 8049
917257da 8050 ret = -EINVAL;
44a9bd18
JA
8051 if (cpu >= nr_cpu_ids)
8052 goto err;
7889f44d 8053 if (!cpu_online(cpu))
917257da
JA
8054 goto err;
8055
69fb2131 8056 sqd->thread = kthread_create_on_cpu(io_sq_thread, sqd,
534ca6d6 8057 cpu, "io_uring-sq");
6c271ce2 8058 } else {
69fb2131 8059 sqd->thread = kthread_create(io_sq_thread, sqd,
6c271ce2
JA
8060 "io_uring-sq");
8061 }
534ca6d6
JA
8062 if (IS_ERR(sqd->thread)) {
8063 ret = PTR_ERR(sqd->thread);
8064 sqd->thread = NULL;
6c271ce2
JA
8065 goto err;
8066 }
534ca6d6 8067 ret = io_uring_alloc_task_context(sqd->thread);
0f212204
JA
8068 if (ret)
8069 goto err;
6c271ce2
JA
8070 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8071 /* Can't have SQ_AFF without SQPOLL */
8072 ret = -EINVAL;
8073 goto err;
8074 }
8075
aa06165d 8076done:
24369c2e
PB
8077 ret = io_init_wq_offload(ctx, p);
8078 if (ret)
2b188cc1 8079 goto err;
2b188cc1
JA
8080
8081 return 0;
8082err:
54a91f3b 8083 io_finish_async(ctx);
2b188cc1
JA
8084 return ret;
8085}
8086
7e84e1c7
SG
8087static void io_sq_offload_start(struct io_ring_ctx *ctx)
8088{
534ca6d6
JA
8089 struct io_sq_data *sqd = ctx->sq_data;
8090
8091 if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd->thread)
8092 wake_up_process(sqd->thread);
7e84e1c7
SG
8093}
8094
a087e2b5
BM
8095static inline void __io_unaccount_mem(struct user_struct *user,
8096 unsigned long nr_pages)
2b188cc1
JA
8097{
8098 atomic_long_sub(nr_pages, &user->locked_vm);
8099}
8100
a087e2b5
BM
8101static inline int __io_account_mem(struct user_struct *user,
8102 unsigned long nr_pages)
2b188cc1
JA
8103{
8104 unsigned long page_limit, cur_pages, new_pages;
8105
8106 /* Don't allow more pages than we can safely lock */
8107 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8108
8109 do {
8110 cur_pages = atomic_long_read(&user->locked_vm);
8111 new_pages = cur_pages + nr_pages;
8112 if (new_pages > page_limit)
8113 return -ENOMEM;
8114 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8115 new_pages) != cur_pages);
8116
8117 return 0;
8118}
8119
2e0464d4
BM
8120static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
8121 enum io_mem_account acct)
a087e2b5 8122{
aad5d8da 8123 if (ctx->limit_mem)
a087e2b5 8124 __io_unaccount_mem(ctx->user, nr_pages);
30975825 8125
2aede0e4 8126 if (ctx->mm_account) {
2e0464d4 8127 if (acct == ACCT_LOCKED)
2aede0e4 8128 ctx->mm_account->locked_vm -= nr_pages;
2e0464d4 8129 else if (acct == ACCT_PINNED)
2aede0e4 8130 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
2e0464d4 8131 }
a087e2b5
BM
8132}
8133
2e0464d4
BM
8134static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
8135 enum io_mem_account acct)
a087e2b5 8136{
30975825
BM
8137 int ret;
8138
8139 if (ctx->limit_mem) {
8140 ret = __io_account_mem(ctx->user, nr_pages);
8141 if (ret)
8142 return ret;
8143 }
8144
2aede0e4 8145 if (ctx->mm_account) {
2e0464d4 8146 if (acct == ACCT_LOCKED)
2aede0e4 8147 ctx->mm_account->locked_vm += nr_pages;
2e0464d4 8148 else if (acct == ACCT_PINNED)
2aede0e4 8149 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
2e0464d4 8150 }
a087e2b5
BM
8151
8152 return 0;
8153}
8154
2b188cc1
JA
8155static void io_mem_free(void *ptr)
8156{
52e04ef4
MR
8157 struct page *page;
8158
8159 if (!ptr)
8160 return;
2b188cc1 8161
52e04ef4 8162 page = virt_to_head_page(ptr);
2b188cc1
JA
8163 if (put_page_testzero(page))
8164 free_compound_page(page);
8165}
8166
8167static void *io_mem_alloc(size_t size)
8168{
8169 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
8170 __GFP_NORETRY;
8171
8172 return (void *) __get_free_pages(gfp_flags, get_order(size));
8173}
8174
75b28aff
HV
8175static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8176 size_t *sq_offset)
8177{
8178 struct io_rings *rings;
8179 size_t off, sq_array_size;
8180
8181 off = struct_size(rings, cqes, cq_entries);
8182 if (off == SIZE_MAX)
8183 return SIZE_MAX;
8184
8185#ifdef CONFIG_SMP
8186 off = ALIGN(off, SMP_CACHE_BYTES);
8187 if (off == 0)
8188 return SIZE_MAX;
8189#endif
8190
b36200f5
DV
8191 if (sq_offset)
8192 *sq_offset = off;
8193
75b28aff
HV
8194 sq_array_size = array_size(sizeof(u32), sq_entries);
8195 if (sq_array_size == SIZE_MAX)
8196 return SIZE_MAX;
8197
8198 if (check_add_overflow(off, sq_array_size, &off))
8199 return SIZE_MAX;
8200
75b28aff
HV
8201 return off;
8202}
8203
2b188cc1
JA
8204static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
8205{
75b28aff 8206 size_t pages;
2b188cc1 8207
75b28aff
HV
8208 pages = (size_t)1 << get_order(
8209 rings_size(sq_entries, cq_entries, NULL));
8210 pages += (size_t)1 << get_order(
8211 array_size(sizeof(struct io_uring_sqe), sq_entries));
2b188cc1 8212
75b28aff 8213 return pages;
2b188cc1
JA
8214}
8215
edafccee
JA
8216static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
8217{
8218 int i, j;
8219
8220 if (!ctx->user_bufs)
8221 return -ENXIO;
8222
8223 for (i = 0; i < ctx->nr_user_bufs; i++) {
8224 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8225
8226 for (j = 0; j < imu->nr_bvecs; j++)
f1f6a7dd 8227 unpin_user_page(imu->bvec[j].bv_page);
edafccee 8228
de293938
JA
8229 if (imu->acct_pages)
8230 io_unaccount_mem(ctx, imu->acct_pages, ACCT_PINNED);
d4ef6475 8231 kvfree(imu->bvec);
edafccee
JA
8232 imu->nr_bvecs = 0;
8233 }
8234
8235 kfree(ctx->user_bufs);
8236 ctx->user_bufs = NULL;
8237 ctx->nr_user_bufs = 0;
8238 return 0;
8239}
8240
8241static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8242 void __user *arg, unsigned index)
8243{
8244 struct iovec __user *src;
8245
8246#ifdef CONFIG_COMPAT
8247 if (ctx->compat) {
8248 struct compat_iovec __user *ciovs;
8249 struct compat_iovec ciov;
8250
8251 ciovs = (struct compat_iovec __user *) arg;
8252 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8253 return -EFAULT;
8254
d55e5f5b 8255 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
8256 dst->iov_len = ciov.iov_len;
8257 return 0;
8258 }
8259#endif
8260 src = (struct iovec __user *) arg;
8261 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8262 return -EFAULT;
8263 return 0;
8264}
8265
de293938
JA
8266/*
8267 * Not super efficient, but this is just a registration time. And we do cache
8268 * the last compound head, so generally we'll only do a full search if we don't
8269 * match that one.
8270 *
8271 * We check if the given compound head page has already been accounted, to
8272 * avoid double accounting it. This allows us to account the full size of the
8273 * page, not just the constituent pages of a huge page.
8274 */
8275static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8276 int nr_pages, struct page *hpage)
8277{
8278 int i, j;
8279
8280 /* check current page array */
8281 for (i = 0; i < nr_pages; i++) {
8282 if (!PageCompound(pages[i]))
8283 continue;
8284 if (compound_head(pages[i]) == hpage)
8285 return true;
8286 }
8287
8288 /* check previously registered pages */
8289 for (i = 0; i < ctx->nr_user_bufs; i++) {
8290 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8291
8292 for (j = 0; j < imu->nr_bvecs; j++) {
8293 if (!PageCompound(imu->bvec[j].bv_page))
8294 continue;
8295 if (compound_head(imu->bvec[j].bv_page) == hpage)
8296 return true;
8297 }
8298 }
8299
8300 return false;
8301}
8302
8303static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8304 int nr_pages, struct io_mapped_ubuf *imu,
8305 struct page **last_hpage)
8306{
8307 int i, ret;
8308
8309 for (i = 0; i < nr_pages; i++) {
8310 if (!PageCompound(pages[i])) {
8311 imu->acct_pages++;
8312 } else {
8313 struct page *hpage;
8314
8315 hpage = compound_head(pages[i]);
8316 if (hpage == *last_hpage)
8317 continue;
8318 *last_hpage = hpage;
8319 if (headpage_already_acct(ctx, pages, i, hpage))
8320 continue;
8321 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8322 }
8323 }
8324
8325 if (!imu->acct_pages)
8326 return 0;
8327
8328 ret = io_account_mem(ctx, imu->acct_pages, ACCT_PINNED);
8329 if (ret)
8330 imu->acct_pages = 0;
8331 return ret;
8332}
8333
edafccee
JA
8334static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
8335 unsigned nr_args)
8336{
8337 struct vm_area_struct **vmas = NULL;
8338 struct page **pages = NULL;
de293938 8339 struct page *last_hpage = NULL;
edafccee
JA
8340 int i, j, got_pages = 0;
8341 int ret = -EINVAL;
8342
8343 if (ctx->user_bufs)
8344 return -EBUSY;
8345 if (!nr_args || nr_args > UIO_MAXIOV)
8346 return -EINVAL;
8347
8348 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
8349 GFP_KERNEL);
8350 if (!ctx->user_bufs)
8351 return -ENOMEM;
8352
8353 for (i = 0; i < nr_args; i++) {
8354 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8355 unsigned long off, start, end, ubuf;
8356 int pret, nr_pages;
8357 struct iovec iov;
8358 size_t size;
8359
8360 ret = io_copy_iov(ctx, &iov, arg, i);
8361 if (ret)
a278682d 8362 goto err;
edafccee
JA
8363
8364 /*
8365 * Don't impose further limits on the size and buffer
8366 * constraints here, we'll -EINVAL later when IO is
8367 * submitted if they are wrong.
8368 */
8369 ret = -EFAULT;
8370 if (!iov.iov_base || !iov.iov_len)
8371 goto err;
8372
8373 /* arbitrary limit, but we need something */
8374 if (iov.iov_len > SZ_1G)
8375 goto err;
8376
8377 ubuf = (unsigned long) iov.iov_base;
8378 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8379 start = ubuf >> PAGE_SHIFT;
8380 nr_pages = end - start;
8381
edafccee
JA
8382 ret = 0;
8383 if (!pages || nr_pages > got_pages) {
a8c73c1a
DE
8384 kvfree(vmas);
8385 kvfree(pages);
d4ef6475 8386 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
edafccee 8387 GFP_KERNEL);
d4ef6475 8388 vmas = kvmalloc_array(nr_pages,
edafccee
JA
8389 sizeof(struct vm_area_struct *),
8390 GFP_KERNEL);
8391 if (!pages || !vmas) {
8392 ret = -ENOMEM;
edafccee
JA
8393 goto err;
8394 }
8395 got_pages = nr_pages;
8396 }
8397
d4ef6475 8398 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
edafccee
JA
8399 GFP_KERNEL);
8400 ret = -ENOMEM;
de293938 8401 if (!imu->bvec)
edafccee 8402 goto err;
edafccee
JA
8403
8404 ret = 0;
d8ed45c5 8405 mmap_read_lock(current->mm);
2113b05d 8406 pret = pin_user_pages(ubuf, nr_pages,
932f4a63
IW
8407 FOLL_WRITE | FOLL_LONGTERM,
8408 pages, vmas);
edafccee
JA
8409 if (pret == nr_pages) {
8410 /* don't support file backed memory */
8411 for (j = 0; j < nr_pages; j++) {
8412 struct vm_area_struct *vma = vmas[j];
8413
8414 if (vma->vm_file &&
8415 !is_file_hugepages(vma->vm_file)) {
8416 ret = -EOPNOTSUPP;
8417 break;
8418 }
8419 }
8420 } else {
8421 ret = pret < 0 ? pret : -EFAULT;
8422 }
d8ed45c5 8423 mmap_read_unlock(current->mm);
edafccee
JA
8424 if (ret) {
8425 /*
8426 * if we did partial map, or found file backed vmas,
8427 * release any pages we did get
8428 */
27c4d3a3 8429 if (pret > 0)
f1f6a7dd 8430 unpin_user_pages(pages, pret);
de293938
JA
8431 kvfree(imu->bvec);
8432 goto err;
8433 }
8434
8435 ret = io_buffer_account_pin(ctx, pages, pret, imu, &last_hpage);
8436 if (ret) {
8437 unpin_user_pages(pages, pret);
d4ef6475 8438 kvfree(imu->bvec);
edafccee
JA
8439 goto err;
8440 }
8441
8442 off = ubuf & ~PAGE_MASK;
8443 size = iov.iov_len;
8444 for (j = 0; j < nr_pages; j++) {
8445 size_t vec_len;
8446
8447 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8448 imu->bvec[j].bv_page = pages[j];
8449 imu->bvec[j].bv_len = vec_len;
8450 imu->bvec[j].bv_offset = off;
8451 off = 0;
8452 size -= vec_len;
8453 }
8454 /* store original address for later verification */
8455 imu->ubuf = ubuf;
8456 imu->len = iov.iov_len;
8457 imu->nr_bvecs = nr_pages;
8458
8459 ctx->nr_user_bufs++;
8460 }
d4ef6475
MR
8461 kvfree(pages);
8462 kvfree(vmas);
edafccee
JA
8463 return 0;
8464err:
d4ef6475
MR
8465 kvfree(pages);
8466 kvfree(vmas);
edafccee
JA
8467 io_sqe_buffer_unregister(ctx);
8468 return ret;
8469}
8470
9b402849
JA
8471static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8472{
8473 __s32 __user *fds = arg;
8474 int fd;
8475
8476 if (ctx->cq_ev_fd)
8477 return -EBUSY;
8478
8479 if (copy_from_user(&fd, fds, sizeof(*fds)))
8480 return -EFAULT;
8481
8482 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8483 if (IS_ERR(ctx->cq_ev_fd)) {
8484 int ret = PTR_ERR(ctx->cq_ev_fd);
8485 ctx->cq_ev_fd = NULL;
8486 return ret;
8487 }
8488
8489 return 0;
8490}
8491
8492static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8493{
8494 if (ctx->cq_ev_fd) {
8495 eventfd_ctx_put(ctx->cq_ev_fd);
8496 ctx->cq_ev_fd = NULL;
8497 return 0;
8498 }
8499
8500 return -ENXIO;
8501}
8502
5a2e745d
JA
8503static int __io_destroy_buffers(int id, void *p, void *data)
8504{
8505 struct io_ring_ctx *ctx = data;
8506 struct io_buffer *buf = p;
8507
067524e9 8508 __io_remove_buffers(ctx, buf, id, -1U);
5a2e745d
JA
8509 return 0;
8510}
8511
8512static void io_destroy_buffers(struct io_ring_ctx *ctx)
8513{
8514 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
8515 idr_destroy(&ctx->io_buffer_idr);
8516}
8517
2b188cc1
JA
8518static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8519{
6b06314c 8520 io_finish_async(ctx);
5dbcad51 8521 io_sqe_buffer_unregister(ctx);
2aede0e4
JA
8522
8523 if (ctx->sqo_task) {
8524 put_task_struct(ctx->sqo_task);
8525 ctx->sqo_task = NULL;
8526 mmdrop(ctx->mm_account);
8527 ctx->mm_account = NULL;
30975825 8528 }
def596e9 8529
91d8f519
DZ
8530#ifdef CONFIG_BLK_CGROUP
8531 if (ctx->sqo_blkcg_css)
8532 css_put(ctx->sqo_blkcg_css);
8533#endif
8534
6b06314c 8535 io_sqe_files_unregister(ctx);
9b402849 8536 io_eventfd_unregister(ctx);
5a2e745d 8537 io_destroy_buffers(ctx);
41726c9a 8538 idr_destroy(&ctx->personality_idr);
def596e9 8539
2b188cc1 8540#if defined(CONFIG_UNIX)
355e8d26
EB
8541 if (ctx->ring_sock) {
8542 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 8543 sock_release(ctx->ring_sock);
355e8d26 8544 }
2b188cc1
JA
8545#endif
8546
75b28aff 8547 io_mem_free(ctx->rings);
2b188cc1 8548 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
8549
8550 percpu_ref_exit(&ctx->refs);
2b188cc1 8551 free_uid(ctx->user);
181e448d 8552 put_cred(ctx->creds);
78076bb6 8553 kfree(ctx->cancel_hash);
0ddf92e8 8554 kmem_cache_free(req_cachep, ctx->fallback_req);
2b188cc1
JA
8555 kfree(ctx);
8556}
8557
8558static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8559{
8560 struct io_ring_ctx *ctx = file->private_data;
8561 __poll_t mask = 0;
8562
8563 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
8564 /*
8565 * synchronizes with barrier from wq_has_sleeper call in
8566 * io_commit_cqring
8567 */
2b188cc1 8568 smp_rmb();
90554200 8569 if (!io_sqring_full(ctx))
2b188cc1 8570 mask |= EPOLLOUT | EPOLLWRNORM;
63e5d81f 8571 if (io_cqring_events(ctx, false))
2b188cc1
JA
8572 mask |= EPOLLIN | EPOLLRDNORM;
8573
8574 return mask;
8575}
8576
8577static int io_uring_fasync(int fd, struct file *file, int on)
8578{
8579 struct io_ring_ctx *ctx = file->private_data;
8580
8581 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8582}
8583
071698e1
JA
8584static int io_remove_personalities(int id, void *p, void *data)
8585{
8586 struct io_ring_ctx *ctx = data;
1e6fa521 8587 struct io_identity *iod;
071698e1 8588
1e6fa521
JA
8589 iod = idr_remove(&ctx->personality_idr, id);
8590 if (iod) {
8591 put_cred(iod->creds);
8592 if (refcount_dec_and_test(&iod->count))
8593 kfree(iod);
8594 }
071698e1
JA
8595 return 0;
8596}
8597
85faa7b8
JA
8598static void io_ring_exit_work(struct work_struct *work)
8599{
b2edc0a7
PB
8600 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
8601 exit_work);
85faa7b8 8602
56952e91
JA
8603 /*
8604 * If we're doing polled IO and end up having requests being
8605 * submitted async (out-of-line), then completions can come in while
8606 * we're waiting for refs to drop. We need to reap these manually,
8607 * as nobody else will be looking for them.
8608 */
b2edc0a7 8609 do {
56952e91 8610 if (ctx->rings)
e6c8aa9a 8611 io_cqring_overflow_flush(ctx, true, NULL, NULL);
b2edc0a7
PB
8612 io_iopoll_try_reap_events(ctx);
8613 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
85faa7b8
JA
8614 io_ring_ctx_free(ctx);
8615}
8616
2b188cc1
JA
8617static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8618{
8619 mutex_lock(&ctx->uring_lock);
8620 percpu_ref_kill(&ctx->refs);
8621 mutex_unlock(&ctx->uring_lock);
8622
f3606e3a
JA
8623 io_kill_timeouts(ctx, NULL);
8624 io_poll_remove_all(ctx, NULL);
561fb04a
JA
8625
8626 if (ctx->io_wq)
8627 io_wq_cancel_all(ctx->io_wq);
8628
15dff286
JA
8629 /* if we failed setting up the ctx, we might not have any rings */
8630 if (ctx->rings)
e6c8aa9a 8631 io_cqring_overflow_flush(ctx, true, NULL, NULL);
b2edc0a7 8632 io_iopoll_try_reap_events(ctx);
071698e1 8633 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
309fc03a
JA
8634
8635 /*
8636 * Do this upfront, so we won't have a grace period where the ring
8637 * is closed but resources aren't reaped yet. This can cause
8638 * spurious failure in setting up a new ring.
8639 */
760618f7
JA
8640 io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries),
8641 ACCT_LOCKED);
309fc03a 8642
85faa7b8 8643 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
fc666777
JA
8644 /*
8645 * Use system_unbound_wq to avoid spawning tons of event kworkers
8646 * if we're exiting a ton of rings at the same time. It just adds
8647 * noise and overhead, there's no discernable change in runtime
8648 * over using system_wq.
8649 */
8650 queue_work(system_unbound_wq, &ctx->exit_work);
2b188cc1
JA
8651}
8652
8653static int io_uring_release(struct inode *inode, struct file *file)
8654{
8655 struct io_ring_ctx *ctx = file->private_data;
8656
8657 file->private_data = NULL;
8658 io_ring_ctx_wait_and_kill(ctx);
8659 return 0;
8660}
8661
67c4d9e6
PB
8662static bool io_wq_files_match(struct io_wq_work *work, void *data)
8663{
8664 struct files_struct *files = data;
8665
dfead8a8 8666 return !files || ((work->flags & IO_WQ_WORK_FILES) &&
98447d65 8667 work->identity->files == files);
67c4d9e6
PB
8668}
8669
f254ac04
JA
8670/*
8671 * Returns true if 'preq' is the link parent of 'req'
8672 */
8673static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req)
8674{
8675 struct io_kiocb *link;
8676
8677 if (!(preq->flags & REQ_F_LINK_HEAD))
8678 return false;
8679
8680 list_for_each_entry(link, &preq->link_list, link_list) {
8681 if (link == req)
8682 return true;
8683 }
8684
8685 return false;
8686}
8687
8688/*
8689 * We're looking to cancel 'req' because it's holding on to our files, but
8690 * 'req' could be a link to another request. See if it is, and cancel that
8691 * parent request if so.
8692 */
8693static bool io_poll_remove_link(struct io_ring_ctx *ctx, struct io_kiocb *req)
8694{
8695 struct hlist_node *tmp;
8696 struct io_kiocb *preq;
8697 bool found = false;
8698 int i;
8699
8700 spin_lock_irq(&ctx->completion_lock);
8701 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
8702 struct hlist_head *list;
8703
8704 list = &ctx->cancel_hash[i];
8705 hlist_for_each_entry_safe(preq, tmp, list, hash_node) {
8706 found = io_match_link(preq, req);
8707 if (found) {
8708 io_poll_remove_one(preq);
8709 break;
8710 }
8711 }
8712 }
8713 spin_unlock_irq(&ctx->completion_lock);
8714 return found;
8715}
8716
8717static bool io_timeout_remove_link(struct io_ring_ctx *ctx,
8718 struct io_kiocb *req)
8719{
8720 struct io_kiocb *preq;
8721 bool found = false;
8722
8723 spin_lock_irq(&ctx->completion_lock);
8724 list_for_each_entry(preq, &ctx->timeout_list, timeout.list) {
8725 found = io_match_link(preq, req);
8726 if (found) {
8727 __io_timeout_cancel(preq);
8728 break;
8729 }
8730 }
8731 spin_unlock_irq(&ctx->completion_lock);
8732 return found;
8733}
8734
b711d4ea
JA
8735static bool io_cancel_link_cb(struct io_wq_work *work, void *data)
8736{
9a472ef7
PB
8737 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8738 bool ret;
8739
8740 if (req->flags & REQ_F_LINK_TIMEOUT) {
8741 unsigned long flags;
8742 struct io_ring_ctx *ctx = req->ctx;
8743
8744 /* protect against races with linked timeouts */
8745 spin_lock_irqsave(&ctx->completion_lock, flags);
8746 ret = io_match_link(req, data);
8747 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8748 } else {
8749 ret = io_match_link(req, data);
8750 }
8751 return ret;
b711d4ea
JA
8752}
8753
8754static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
8755{
8756 enum io_wq_cancel cret;
8757
8758 /* cancel this particular work, if it's running */
8759 cret = io_wq_cancel_work(ctx->io_wq, &req->work);
8760 if (cret != IO_WQ_CANCEL_NOTFOUND)
8761 return;
8762
8763 /* find links that hold this pending, cancel those */
8764 cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true);
8765 if (cret != IO_WQ_CANCEL_NOTFOUND)
8766 return;
8767
8768 /* if we have a poll link holding this pending, cancel that */
8769 if (io_poll_remove_link(ctx, req))
8770 return;
8771
8772 /* final option, timeout link is holding this req pending */
8773 io_timeout_remove_link(ctx, req);
8774}
8775
b7ddce3c 8776static void io_cancel_defer_files(struct io_ring_ctx *ctx,
ef9865a4 8777 struct task_struct *task,
b7ddce3c
PB
8778 struct files_struct *files)
8779{
8780 struct io_defer_entry *de = NULL;
8781 LIST_HEAD(list);
8782
8783 spin_lock_irq(&ctx->completion_lock);
8784 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
ef9865a4
PB
8785 if (io_task_match(de->req, task) &&
8786 io_match_files(de->req, files)) {
b7ddce3c
PB
8787 list_cut_position(&list, &ctx->defer_list, &de->list);
8788 break;
8789 }
8790 }
8791 spin_unlock_irq(&ctx->completion_lock);
8792
8793 while (!list_empty(&list)) {
8794 de = list_first_entry(&list, struct io_defer_entry, list);
8795 list_del_init(&de->list);
8796 req_set_fail_links(de->req);
8797 io_put_req(de->req);
8798 io_req_complete(de->req, -ECANCELED);
8799 kfree(de);
8800 }
8801}
8802
76e1b642
JA
8803/*
8804 * Returns true if we found and killed one or more files pinning requests
8805 */
8806static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
fcb323cc
JA
8807 struct files_struct *files)
8808{
67c4d9e6 8809 if (list_empty_careful(&ctx->inflight_list))
76e1b642 8810 return false;
67c4d9e6
PB
8811
8812 /* cancel all at once, should be faster than doing it one by one*/
8813 io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
8814
fcb323cc 8815 while (!list_empty_careful(&ctx->inflight_list)) {
d8f1b971
XW
8816 struct io_kiocb *cancel_req = NULL, *req;
8817 DEFINE_WAIT(wait);
fcb323cc
JA
8818
8819 spin_lock_irq(&ctx->inflight_lock);
8820 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
dfead8a8 8821 if (files && (req->work.flags & IO_WQ_WORK_FILES) &&
98447d65 8822 req->work.identity->files != files)
768134d4
JA
8823 continue;
8824 /* req is being completed, ignore */
8825 if (!refcount_inc_not_zero(&req->refs))
8826 continue;
8827 cancel_req = req;
8828 break;
fcb323cc 8829 }
768134d4 8830 if (cancel_req)
fcb323cc 8831 prepare_to_wait(&ctx->inflight_wait, &wait,
768134d4 8832 TASK_UNINTERRUPTIBLE);
fcb323cc
JA
8833 spin_unlock_irq(&ctx->inflight_lock);
8834
768134d4
JA
8835 /* We need to keep going until we don't find a matching req */
8836 if (!cancel_req)
fcb323cc 8837 break;
bb175342
PB
8838 /* cancel this request, or head link requests */
8839 io_attempt_cancel(ctx, cancel_req);
8840 io_put_req(cancel_req);
6200b0ae
JA
8841 /* cancellations _may_ trigger task work */
8842 io_run_task_work();
fcb323cc 8843 schedule();
d8f1b971 8844 finish_wait(&ctx->inflight_wait, &wait);
fcb323cc 8845 }
76e1b642
JA
8846
8847 return true;
fcb323cc
JA
8848}
8849
801dd57b 8850static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
44e728b8 8851{
801dd57b
PB
8852 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8853 struct task_struct *task = data;
44e728b8 8854
f3606e3a 8855 return io_task_match(req, task);
44e728b8
PB
8856}
8857
0f212204
JA
8858static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
8859 struct task_struct *task,
8860 struct files_struct *files)
8861{
8862 bool ret;
8863
8864 ret = io_uring_cancel_files(ctx, files);
8865 if (!files) {
8866 enum io_wq_cancel cret;
8867
8868 cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, task, true);
8869 if (cret != IO_WQ_CANCEL_NOTFOUND)
8870 ret = true;
8871
8872 /* SQPOLL thread does its own polling */
8873 if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
8874 while (!list_empty_careful(&ctx->iopoll_list)) {
8875 io_iopoll_try_reap_events(ctx);
8876 ret = true;
8877 }
8878 }
8879
8880 ret |= io_poll_remove_all(ctx, task);
8881 ret |= io_kill_timeouts(ctx, task);
8882 }
8883
8884 return ret;
8885}
8886
8887/*
8888 * We need to iteratively cancel requests, in case a request has dependent
8889 * hard links. These persist even for failure of cancelations, hence keep
8890 * looping until none are found.
8891 */
8892static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
8893 struct files_struct *files)
8894{
8895 struct task_struct *task = current;
8896
fdaf083c 8897 if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
534ca6d6 8898 task = ctx->sq_data->thread;
fdaf083c
JA
8899 atomic_inc(&task->io_uring->in_idle);
8900 io_sq_thread_park(ctx->sq_data);
8901 }
0f212204 8902
ef9865a4
PB
8903 if (files)
8904 io_cancel_defer_files(ctx, NULL, files);
8905 else
8906 io_cancel_defer_files(ctx, task, NULL);
8907
0f212204
JA
8908 io_cqring_overflow_flush(ctx, true, task, files);
8909
8910 while (__io_uring_cancel_task_requests(ctx, task, files)) {
8911 io_run_task_work();
8912 cond_resched();
8913 }
fdaf083c
JA
8914
8915 if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
8916 atomic_dec(&task->io_uring->in_idle);
8917 /*
8918 * If the files that are going away are the ones in the thread
8919 * identity, clear them out.
8920 */
8921 if (task->io_uring->identity->files == files)
8922 task->io_uring->identity->files = NULL;
8923 io_sq_thread_unpark(ctx->sq_data);
8924 }
0f212204
JA
8925}
8926
8927/*
8928 * Note that this task has used io_uring. We use it for cancelation purposes.
8929 */
fdaf083c 8930static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file)
0f212204 8931{
236434c3
MWO
8932 struct io_uring_task *tctx = current->io_uring;
8933
8934 if (unlikely(!tctx)) {
0f212204
JA
8935 int ret;
8936
8937 ret = io_uring_alloc_task_context(current);
8938 if (unlikely(ret))
8939 return ret;
236434c3 8940 tctx = current->io_uring;
0f212204 8941 }
236434c3
MWO
8942 if (tctx->last != file) {
8943 void *old = xa_load(&tctx->xa, (unsigned long)file);
0f212204 8944
236434c3 8945 if (!old) {
0f212204 8946 get_file(file);
236434c3 8947 xa_store(&tctx->xa, (unsigned long)file, file, GFP_KERNEL);
0f212204 8948 }
236434c3 8949 tctx->last = file;
0f212204
JA
8950 }
8951
fdaf083c
JA
8952 /*
8953 * This is race safe in that the task itself is doing this, hence it
8954 * cannot be going through the exit/cancel paths at the same time.
8955 * This cannot be modified while exit/cancel is running.
8956 */
8957 if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL))
8958 tctx->sqpoll = true;
8959
0f212204
JA
8960 return 0;
8961}
8962
8963/*
8964 * Remove this io_uring_file -> task mapping.
8965 */
8966static void io_uring_del_task_file(struct file *file)
8967{
8968 struct io_uring_task *tctx = current->io_uring;
0f212204
JA
8969
8970 if (tctx->last == file)
8971 tctx->last = NULL;
5e2ed8c4 8972 file = xa_erase(&tctx->xa, (unsigned long)file);
0f212204
JA
8973 if (file)
8974 fput(file);
8975}
8976
0f212204
JA
8977/*
8978 * Drop task note for this file if we're the only ones that hold it after
8979 * pending fput()
8980 */
c8fb20b5 8981static void io_uring_attempt_task_drop(struct file *file)
0f212204
JA
8982{
8983 if (!current->io_uring)
8984 return;
8985 /*
8986 * fput() is pending, will be 2 if the only other ref is our potential
8987 * task file note. If the task is exiting, drop regardless of count.
8988 */
c8fb20b5
PB
8989 if (fatal_signal_pending(current) || (current->flags & PF_EXITING) ||
8990 atomic_long_read(&file->f_count) == 2)
8991 io_uring_del_task_file(file);
0f212204
JA
8992}
8993
8994void __io_uring_files_cancel(struct files_struct *files)
8995{
8996 struct io_uring_task *tctx = current->io_uring;
ce765372
MWO
8997 struct file *file;
8998 unsigned long index;
0f212204
JA
8999
9000 /* make sure overflow events are dropped */
fdaf083c 9001 atomic_inc(&tctx->in_idle);
0f212204 9002
ce765372
MWO
9003 xa_for_each(&tctx->xa, index, file) {
9004 struct io_ring_ctx *ctx = file->private_data;
0f212204
JA
9005
9006 io_uring_cancel_task_requests(ctx, files);
9007 if (files)
9008 io_uring_del_task_file(file);
ce765372 9009 }
fdaf083c
JA
9010
9011 atomic_dec(&tctx->in_idle);
9012}
9013
9014static s64 tctx_inflight(struct io_uring_task *tctx)
9015{
9016 unsigned long index;
9017 struct file *file;
9018 s64 inflight;
9019
9020 inflight = percpu_counter_sum(&tctx->inflight);
9021 if (!tctx->sqpoll)
9022 return inflight;
9023
9024 /*
9025 * If we have SQPOLL rings, then we need to iterate and find them, and
9026 * add the pending count for those.
9027 */
9028 xa_for_each(&tctx->xa, index, file) {
9029 struct io_ring_ctx *ctx = file->private_data;
9030
9031 if (ctx->flags & IORING_SETUP_SQPOLL) {
9032 struct io_uring_task *__tctx = ctx->sqo_task->io_uring;
9033
9034 inflight += percpu_counter_sum(&__tctx->inflight);
9035 }
9036 }
9037
9038 return inflight;
0f212204
JA
9039}
9040
0f212204
JA
9041/*
9042 * Find any io_uring fd that this task has registered or done IO on, and cancel
9043 * requests.
9044 */
9045void __io_uring_task_cancel(void)
9046{
9047 struct io_uring_task *tctx = current->io_uring;
9048 DEFINE_WAIT(wait);
d8a6df10 9049 s64 inflight;
0f212204
JA
9050
9051 /* make sure overflow events are dropped */
fdaf083c 9052 atomic_inc(&tctx->in_idle);
0f212204 9053
d8a6df10 9054 do {
0f212204 9055 /* read completions before cancelations */
fdaf083c 9056 inflight = tctx_inflight(tctx);
d8a6df10
JA
9057 if (!inflight)
9058 break;
0f212204
JA
9059 __io_uring_files_cancel(NULL);
9060
9061 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
9062
9063 /*
9064 * If we've seen completions, retry. This avoids a race where
9065 * a completion comes in before we did prepare_to_wait().
9066 */
fdaf083c 9067 if (inflight != tctx_inflight(tctx))
0f212204 9068 continue;
0f212204 9069 schedule();
d8a6df10 9070 } while (1);
0f212204
JA
9071
9072 finish_wait(&tctx->wait, &wait);
fdaf083c 9073 atomic_dec(&tctx->in_idle);
44e728b8
PB
9074}
9075
fcb323cc
JA
9076static int io_uring_flush(struct file *file, void *data)
9077{
c8fb20b5 9078 io_uring_attempt_task_drop(file);
fcb323cc
JA
9079 return 0;
9080}
9081
6c5c240e
RP
9082static void *io_uring_validate_mmap_request(struct file *file,
9083 loff_t pgoff, size_t sz)
2b188cc1 9084{
2b188cc1 9085 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 9086 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
9087 struct page *page;
9088 void *ptr;
9089
9090 switch (offset) {
9091 case IORING_OFF_SQ_RING:
75b28aff
HV
9092 case IORING_OFF_CQ_RING:
9093 ptr = ctx->rings;
2b188cc1
JA
9094 break;
9095 case IORING_OFF_SQES:
9096 ptr = ctx->sq_sqes;
9097 break;
2b188cc1 9098 default:
6c5c240e 9099 return ERR_PTR(-EINVAL);
2b188cc1
JA
9100 }
9101
9102 page = virt_to_head_page(ptr);
a50b854e 9103 if (sz > page_size(page))
6c5c240e
RP
9104 return ERR_PTR(-EINVAL);
9105
9106 return ptr;
9107}
9108
9109#ifdef CONFIG_MMU
9110
9111static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9112{
9113 size_t sz = vma->vm_end - vma->vm_start;
9114 unsigned long pfn;
9115 void *ptr;
9116
9117 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9118 if (IS_ERR(ptr))
9119 return PTR_ERR(ptr);
2b188cc1
JA
9120
9121 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9122 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9123}
9124
6c5c240e
RP
9125#else /* !CONFIG_MMU */
9126
9127static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9128{
9129 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9130}
9131
9132static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9133{
9134 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9135}
9136
9137static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9138 unsigned long addr, unsigned long len,
9139 unsigned long pgoff, unsigned long flags)
9140{
9141 void *ptr;
9142
9143 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9144 if (IS_ERR(ptr))
9145 return PTR_ERR(ptr);
9146
9147 return (unsigned long) ptr;
9148}
9149
9150#endif /* !CONFIG_MMU */
9151
90554200
JA
9152static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
9153{
9154 DEFINE_WAIT(wait);
9155
9156 do {
9157 if (!io_sqring_full(ctx))
9158 break;
9159
9160 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9161
9162 if (!io_sqring_full(ctx))
9163 break;
9164
9165 schedule();
9166 } while (!signal_pending(current));
9167
9168 finish_wait(&ctx->sqo_sq_wait, &wait);
9169}
9170
2b188cc1
JA
9171SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
9172 u32, min_complete, u32, flags, const sigset_t __user *, sig,
9173 size_t, sigsz)
9174{
9175 struct io_ring_ctx *ctx;
9176 long ret = -EBADF;
9177 int submitted = 0;
9178 struct fd f;
9179
4c6e277c 9180 io_run_task_work();
b41e9852 9181
90554200
JA
9182 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9183 IORING_ENTER_SQ_WAIT))
2b188cc1
JA
9184 return -EINVAL;
9185
9186 f = fdget(fd);
9187 if (!f.file)
9188 return -EBADF;
9189
9190 ret = -EOPNOTSUPP;
9191 if (f.file->f_op != &io_uring_fops)
9192 goto out_fput;
9193
9194 ret = -ENXIO;
9195 ctx = f.file->private_data;
9196 if (!percpu_ref_tryget(&ctx->refs))
9197 goto out_fput;
9198
7e84e1c7
SG
9199 ret = -EBADFD;
9200 if (ctx->flags & IORING_SETUP_R_DISABLED)
9201 goto out;
9202
6c271ce2
JA
9203 /*
9204 * For SQ polling, the thread will do all submissions and completions.
9205 * Just return the requested submit count, and wake the thread if
9206 * we were asked to.
9207 */
b2a9eada 9208 ret = 0;
6c271ce2 9209 if (ctx->flags & IORING_SETUP_SQPOLL) {
c1edbf5f 9210 if (!list_empty_careful(&ctx->cq_overflow_list))
e6c8aa9a 9211 io_cqring_overflow_flush(ctx, false, NULL, NULL);
6c271ce2 9212 if (flags & IORING_ENTER_SQ_WAKEUP)
534ca6d6 9213 wake_up(&ctx->sq_data->wait);
90554200
JA
9214 if (flags & IORING_ENTER_SQ_WAIT)
9215 io_sqpoll_wait_sq(ctx);
6c271ce2 9216 submitted = to_submit;
b2a9eada 9217 } else if (to_submit) {
fdaf083c 9218 ret = io_uring_add_task_file(ctx, f.file);
0f212204
JA
9219 if (unlikely(ret))
9220 goto out;
2b188cc1 9221 mutex_lock(&ctx->uring_lock);
0f212204 9222 submitted = io_submit_sqes(ctx, to_submit);
2b188cc1 9223 mutex_unlock(&ctx->uring_lock);
7c504e65
PB
9224
9225 if (submitted != to_submit)
9226 goto out;
2b188cc1
JA
9227 }
9228 if (flags & IORING_ENTER_GETEVENTS) {
9229 min_complete = min(min_complete, ctx->cq_entries);
9230
32b2244a
XW
9231 /*
9232 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9233 * space applications don't need to do io completion events
9234 * polling again, they can rely on io_sq_thread to do polling
9235 * work, which can reduce cpu usage and uring_lock contention.
9236 */
9237 if (ctx->flags & IORING_SETUP_IOPOLL &&
9238 !(ctx->flags & IORING_SETUP_SQPOLL)) {
7668b92a 9239 ret = io_iopoll_check(ctx, min_complete);
def596e9
JA
9240 } else {
9241 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
9242 }
2b188cc1
JA
9243 }
9244
7c504e65 9245out:
6805b32e 9246 percpu_ref_put(&ctx->refs);
2b188cc1
JA
9247out_fput:
9248 fdput(f);
9249 return submitted ? submitted : ret;
9250}
9251
bebdb65e 9252#ifdef CONFIG_PROC_FS
87ce955b
JA
9253static int io_uring_show_cred(int id, void *p, void *data)
9254{
6b47ab81
JA
9255 struct io_identity *iod = p;
9256 const struct cred *cred = iod->creds;
87ce955b
JA
9257 struct seq_file *m = data;
9258 struct user_namespace *uns = seq_user_ns(m);
9259 struct group_info *gi;
9260 kernel_cap_t cap;
9261 unsigned __capi;
9262 int g;
9263
9264 seq_printf(m, "%5d\n", id);
9265 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9266 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9267 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9268 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9269 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9270 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9271 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9272 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9273 seq_puts(m, "\n\tGroups:\t");
9274 gi = cred->group_info;
9275 for (g = 0; g < gi->ngroups; g++) {
9276 seq_put_decimal_ull(m, g ? " " : "",
9277 from_kgid_munged(uns, gi->gid[g]));
9278 }
9279 seq_puts(m, "\n\tCapEff:\t");
9280 cap = cred->cap_effective;
9281 CAP_FOR_EACH_U32(__capi)
9282 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9283 seq_putc(m, '\n');
9284 return 0;
9285}
9286
9287static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9288{
dbbe9c64 9289 struct io_sq_data *sq = NULL;
fad8e0de 9290 bool has_lock;
87ce955b
JA
9291 int i;
9292
fad8e0de
JA
9293 /*
9294 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9295 * since fdinfo case grabs it in the opposite direction of normal use
9296 * cases. If we fail to get the lock, we just don't iterate any
9297 * structures that could be going away outside the io_uring mutex.
9298 */
9299 has_lock = mutex_trylock(&ctx->uring_lock);
9300
dbbe9c64
JQ
9301 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL))
9302 sq = ctx->sq_data;
9303
9304 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9305 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
87ce955b 9306 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
fad8e0de 9307 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
87ce955b
JA
9308 struct fixed_file_table *table;
9309 struct file *f;
9310
9311 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
9312 f = table->files[i & IORING_FILE_TABLE_MASK];
9313 if (f)
9314 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9315 else
9316 seq_printf(m, "%5u: <none>\n", i);
9317 }
9318 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
fad8e0de 9319 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
87ce955b
JA
9320 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
9321
9322 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
9323 (unsigned int) buf->len);
9324 }
fad8e0de 9325 if (has_lock && !idr_is_empty(&ctx->personality_idr)) {
87ce955b
JA
9326 seq_printf(m, "Personalities:\n");
9327 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
9328 }
d7718a9d
JA
9329 seq_printf(m, "PollList:\n");
9330 spin_lock_irq(&ctx->completion_lock);
9331 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9332 struct hlist_head *list = &ctx->cancel_hash[i];
9333 struct io_kiocb *req;
9334
9335 hlist_for_each_entry(req, list, hash_node)
9336 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9337 req->task->task_works != NULL);
9338 }
9339 spin_unlock_irq(&ctx->completion_lock);
fad8e0de
JA
9340 if (has_lock)
9341 mutex_unlock(&ctx->uring_lock);
87ce955b
JA
9342}
9343
9344static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9345{
9346 struct io_ring_ctx *ctx = f->private_data;
9347
9348 if (percpu_ref_tryget(&ctx->refs)) {
9349 __io_uring_show_fdinfo(ctx, m);
9350 percpu_ref_put(&ctx->refs);
9351 }
9352}
bebdb65e 9353#endif
87ce955b 9354
2b188cc1
JA
9355static const struct file_operations io_uring_fops = {
9356 .release = io_uring_release,
fcb323cc 9357 .flush = io_uring_flush,
2b188cc1 9358 .mmap = io_uring_mmap,
6c5c240e
RP
9359#ifndef CONFIG_MMU
9360 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9361 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9362#endif
2b188cc1
JA
9363 .poll = io_uring_poll,
9364 .fasync = io_uring_fasync,
bebdb65e 9365#ifdef CONFIG_PROC_FS
87ce955b 9366 .show_fdinfo = io_uring_show_fdinfo,
bebdb65e 9367#endif
2b188cc1
JA
9368};
9369
9370static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9371 struct io_uring_params *p)
9372{
75b28aff
HV
9373 struct io_rings *rings;
9374 size_t size, sq_array_offset;
2b188cc1 9375
bd740481
JA
9376 /* make sure these are sane, as we already accounted them */
9377 ctx->sq_entries = p->sq_entries;
9378 ctx->cq_entries = p->cq_entries;
9379
75b28aff
HV
9380 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9381 if (size == SIZE_MAX)
9382 return -EOVERFLOW;
9383
9384 rings = io_mem_alloc(size);
9385 if (!rings)
2b188cc1
JA
9386 return -ENOMEM;
9387
75b28aff
HV
9388 ctx->rings = rings;
9389 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9390 rings->sq_ring_mask = p->sq_entries - 1;
9391 rings->cq_ring_mask = p->cq_entries - 1;
9392 rings->sq_ring_entries = p->sq_entries;
9393 rings->cq_ring_entries = p->cq_entries;
9394 ctx->sq_mask = rings->sq_ring_mask;
9395 ctx->cq_mask = rings->cq_ring_mask;
2b188cc1
JA
9396
9397 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
9398 if (size == SIZE_MAX) {
9399 io_mem_free(ctx->rings);
9400 ctx->rings = NULL;
2b188cc1 9401 return -EOVERFLOW;
eb065d30 9402 }
2b188cc1
JA
9403
9404 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
9405 if (!ctx->sq_sqes) {
9406 io_mem_free(ctx->rings);
9407 ctx->rings = NULL;
2b188cc1 9408 return -ENOMEM;
eb065d30 9409 }
2b188cc1 9410
2b188cc1
JA
9411 return 0;
9412}
9413
9414/*
9415 * Allocate an anonymous fd, this is what constitutes the application
9416 * visible backing of an io_uring instance. The application mmaps this
9417 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9418 * we have to tie this fd to a socket for file garbage collection purposes.
9419 */
9420static int io_uring_get_fd(struct io_ring_ctx *ctx)
9421{
9422 struct file *file;
9423 int ret;
9424
9425#if defined(CONFIG_UNIX)
9426 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9427 &ctx->ring_sock);
9428 if (ret)
9429 return ret;
9430#endif
9431
9432 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9433 if (ret < 0)
9434 goto err;
9435
9436 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9437 O_RDWR | O_CLOEXEC);
9438 if (IS_ERR(file)) {
0f212204 9439err_fd:
2b188cc1
JA
9440 put_unused_fd(ret);
9441 ret = PTR_ERR(file);
9442 goto err;
9443 }
9444
9445#if defined(CONFIG_UNIX)
9446 ctx->ring_sock->file = file;
9447#endif
fdaf083c 9448 if (unlikely(io_uring_add_task_file(ctx, file))) {
0f212204
JA
9449 file = ERR_PTR(-ENOMEM);
9450 goto err_fd;
9451 }
2b188cc1
JA
9452 fd_install(ret, file);
9453 return ret;
9454err:
9455#if defined(CONFIG_UNIX)
9456 sock_release(ctx->ring_sock);
9457 ctx->ring_sock = NULL;
9458#endif
9459 return ret;
9460}
9461
7f13657d
XW
9462static int io_uring_create(unsigned entries, struct io_uring_params *p,
9463 struct io_uring_params __user *params)
2b188cc1
JA
9464{
9465 struct user_struct *user = NULL;
9466 struct io_ring_ctx *ctx;
aad5d8da 9467 bool limit_mem;
2b188cc1
JA
9468 int ret;
9469
8110c1a6 9470 if (!entries)
2b188cc1 9471 return -EINVAL;
8110c1a6
JA
9472 if (entries > IORING_MAX_ENTRIES) {
9473 if (!(p->flags & IORING_SETUP_CLAMP))
9474 return -EINVAL;
9475 entries = IORING_MAX_ENTRIES;
9476 }
2b188cc1
JA
9477
9478 /*
9479 * Use twice as many entries for the CQ ring. It's possible for the
9480 * application to drive a higher depth than the size of the SQ ring,
9481 * since the sqes are only used at submission time. This allows for
33a107f0
JA
9482 * some flexibility in overcommitting a bit. If the application has
9483 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9484 * of CQ ring entries manually.
2b188cc1
JA
9485 */
9486 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
9487 if (p->flags & IORING_SETUP_CQSIZE) {
9488 /*
9489 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9490 * to a power-of-two, if it isn't already. We do NOT impose
9491 * any cq vs sq ring sizing.
9492 */
88ec3211 9493 p->cq_entries = roundup_pow_of_two(p->cq_entries);
8110c1a6 9494 if (p->cq_entries < p->sq_entries)
33a107f0 9495 return -EINVAL;
8110c1a6
JA
9496 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9497 if (!(p->flags & IORING_SETUP_CLAMP))
9498 return -EINVAL;
9499 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9500 }
33a107f0
JA
9501 } else {
9502 p->cq_entries = 2 * p->sq_entries;
9503 }
2b188cc1
JA
9504
9505 user = get_uid(current_user());
aad5d8da 9506 limit_mem = !capable(CAP_IPC_LOCK);
2b188cc1 9507
aad5d8da 9508 if (limit_mem) {
a087e2b5 9509 ret = __io_account_mem(user,
2b188cc1
JA
9510 ring_pages(p->sq_entries, p->cq_entries));
9511 if (ret) {
9512 free_uid(user);
9513 return ret;
9514 }
9515 }
9516
9517 ctx = io_ring_ctx_alloc(p);
9518 if (!ctx) {
aad5d8da 9519 if (limit_mem)
a087e2b5 9520 __io_unaccount_mem(user, ring_pages(p->sq_entries,
2b188cc1
JA
9521 p->cq_entries));
9522 free_uid(user);
9523 return -ENOMEM;
9524 }
9525 ctx->compat = in_compat_syscall();
2b188cc1 9526 ctx->user = user;
0b8c0ec7 9527 ctx->creds = get_current_cred();
4ea33a97
JA
9528#ifdef CONFIG_AUDIT
9529 ctx->loginuid = current->loginuid;
9530 ctx->sessionid = current->sessionid;
9531#endif
2aede0e4
JA
9532 ctx->sqo_task = get_task_struct(current);
9533
9534 /*
9535 * This is just grabbed for accounting purposes. When a process exits,
9536 * the mm is exited and dropped before the files, hence we need to hang
9537 * on to this mm purely for the purposes of being able to unaccount
9538 * memory (locked/pinned vm). It's not used for anything else.
9539 */
6b7898eb 9540 mmgrab(current->mm);
2aede0e4 9541 ctx->mm_account = current->mm;
6b7898eb 9542
91d8f519
DZ
9543#ifdef CONFIG_BLK_CGROUP
9544 /*
9545 * The sq thread will belong to the original cgroup it was inited in.
9546 * If the cgroup goes offline (e.g. disabling the io controller), then
9547 * issued bios will be associated with the closest cgroup later in the
9548 * block layer.
9549 */
9550 rcu_read_lock();
9551 ctx->sqo_blkcg_css = blkcg_css();
9552 ret = css_tryget_online(ctx->sqo_blkcg_css);
9553 rcu_read_unlock();
9554 if (!ret) {
9555 /* don't init against a dying cgroup, have the user try again */
9556 ctx->sqo_blkcg_css = NULL;
9557 ret = -ENODEV;
9558 goto err;
9559 }
9560#endif
6b7898eb 9561
f74441e6
JA
9562 /*
9563 * Account memory _before_ installing the file descriptor. Once
9564 * the descriptor is installed, it can get closed at any time. Also
9565 * do this before hitting the general error path, as ring freeing
9566 * will un-account as well.
9567 */
9568 io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
9569 ACCT_LOCKED);
9570 ctx->limit_mem = limit_mem;
9571
2b188cc1
JA
9572 ret = io_allocate_scq_urings(ctx, p);
9573 if (ret)
9574 goto err;
9575
7e84e1c7 9576 ret = io_sq_offload_create(ctx, p);
2b188cc1
JA
9577 if (ret)
9578 goto err;
9579
7e84e1c7
SG
9580 if (!(p->flags & IORING_SETUP_R_DISABLED))
9581 io_sq_offload_start(ctx);
9582
2b188cc1 9583 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
9584 p->sq_off.head = offsetof(struct io_rings, sq.head);
9585 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9586 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9587 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9588 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9589 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9590 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
9591
9592 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
9593 p->cq_off.head = offsetof(struct io_rings, cq.head);
9594 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9595 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9596 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9597 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9598 p->cq_off.cqes = offsetof(struct io_rings, cqes);
0d9b5b3a 9599 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
ac90f249 9600
7f13657d
XW
9601 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9602 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
5769a351 9603 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
28cea78a 9604 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED;
7f13657d
XW
9605
9606 if (copy_to_user(params, p, sizeof(*p))) {
9607 ret = -EFAULT;
9608 goto err;
9609 }
d1719f70 9610
044c1ab3
JA
9611 /*
9612 * Install ring fd as the very last thing, so we don't risk someone
9613 * having closed it before we finish setup
9614 */
9615 ret = io_uring_get_fd(ctx);
9616 if (ret < 0)
9617 goto err;
9618
c826bd7a 9619 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
9620 return ret;
9621err:
9622 io_ring_ctx_wait_and_kill(ctx);
9623 return ret;
9624}
9625
9626/*
9627 * Sets up an aio uring context, and returns the fd. Applications asks for a
9628 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9629 * params structure passed in.
9630 */
9631static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9632{
9633 struct io_uring_params p;
2b188cc1
JA
9634 int i;
9635
9636 if (copy_from_user(&p, params, sizeof(p)))
9637 return -EFAULT;
9638 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9639 if (p.resv[i])
9640 return -EINVAL;
9641 }
9642
6c271ce2 9643 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 9644 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
7e84e1c7
SG
9645 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9646 IORING_SETUP_R_DISABLED))
2b188cc1
JA
9647 return -EINVAL;
9648
7f13657d 9649 return io_uring_create(entries, &p, params);
2b188cc1
JA
9650}
9651
9652SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9653 struct io_uring_params __user *, params)
9654{
9655 return io_uring_setup(entries, params);
9656}
9657
66f4af93
JA
9658static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9659{
9660 struct io_uring_probe *p;
9661 size_t size;
9662 int i, ret;
9663
9664 size = struct_size(p, ops, nr_args);
9665 if (size == SIZE_MAX)
9666 return -EOVERFLOW;
9667 p = kzalloc(size, GFP_KERNEL);
9668 if (!p)
9669 return -ENOMEM;
9670
9671 ret = -EFAULT;
9672 if (copy_from_user(p, arg, size))
9673 goto out;
9674 ret = -EINVAL;
9675 if (memchr_inv(p, 0, size))
9676 goto out;
9677
9678 p->last_op = IORING_OP_LAST - 1;
9679 if (nr_args > IORING_OP_LAST)
9680 nr_args = IORING_OP_LAST;
9681
9682 for (i = 0; i < nr_args; i++) {
9683 p->ops[i].op = i;
9684 if (!io_op_defs[i].not_supported)
9685 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9686 }
9687 p->ops_len = i;
9688
9689 ret = 0;
9690 if (copy_to_user(arg, p, size))
9691 ret = -EFAULT;
9692out:
9693 kfree(p);
9694 return ret;
9695}
9696
071698e1
JA
9697static int io_register_personality(struct io_ring_ctx *ctx)
9698{
1e6fa521
JA
9699 struct io_identity *id;
9700 int ret;
071698e1 9701
1e6fa521
JA
9702 id = kmalloc(sizeof(*id), GFP_KERNEL);
9703 if (unlikely(!id))
9704 return -ENOMEM;
9705
9706 io_init_identity(id);
9707 id->creds = get_current_cred();
9708
9709 ret = idr_alloc_cyclic(&ctx->personality_idr, id, 1, USHRT_MAX, GFP_KERNEL);
9710 if (ret < 0) {
9711 put_cred(id->creds);
9712 kfree(id);
9713 }
9714 return ret;
071698e1
JA
9715}
9716
9717static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
9718{
1e6fa521 9719 struct io_identity *iod;
071698e1 9720
1e6fa521
JA
9721 iod = idr_remove(&ctx->personality_idr, id);
9722 if (iod) {
9723 put_cred(iod->creds);
9724 if (refcount_dec_and_test(&iod->count))
9725 kfree(iod);
071698e1
JA
9726 return 0;
9727 }
9728
9729 return -EINVAL;
9730}
9731
21b55dbc
SG
9732static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9733 unsigned int nr_args)
9734{
9735 struct io_uring_restriction *res;
9736 size_t size;
9737 int i, ret;
9738
7e84e1c7
SG
9739 /* Restrictions allowed only if rings started disabled */
9740 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9741 return -EBADFD;
9742
21b55dbc 9743 /* We allow only a single restrictions registration */
7e84e1c7 9744 if (ctx->restrictions.registered)
21b55dbc
SG
9745 return -EBUSY;
9746
9747 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9748 return -EINVAL;
9749
9750 size = array_size(nr_args, sizeof(*res));
9751 if (size == SIZE_MAX)
9752 return -EOVERFLOW;
9753
9754 res = memdup_user(arg, size);
9755 if (IS_ERR(res))
9756 return PTR_ERR(res);
9757
9758 ret = 0;
9759
9760 for (i = 0; i < nr_args; i++) {
9761 switch (res[i].opcode) {
9762 case IORING_RESTRICTION_REGISTER_OP:
9763 if (res[i].register_op >= IORING_REGISTER_LAST) {
9764 ret = -EINVAL;
9765 goto out;
9766 }
9767
9768 __set_bit(res[i].register_op,
9769 ctx->restrictions.register_op);
9770 break;
9771 case IORING_RESTRICTION_SQE_OP:
9772 if (res[i].sqe_op >= IORING_OP_LAST) {
9773 ret = -EINVAL;
9774 goto out;
9775 }
9776
9777 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9778 break;
9779 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9780 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9781 break;
9782 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9783 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9784 break;
9785 default:
9786 ret = -EINVAL;
9787 goto out;
9788 }
9789 }
9790
9791out:
9792 /* Reset all restrictions if an error happened */
9793 if (ret != 0)
9794 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9795 else
7e84e1c7 9796 ctx->restrictions.registered = true;
21b55dbc
SG
9797
9798 kfree(res);
9799 return ret;
9800}
9801
7e84e1c7
SG
9802static int io_register_enable_rings(struct io_ring_ctx *ctx)
9803{
9804 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9805 return -EBADFD;
9806
9807 if (ctx->restrictions.registered)
9808 ctx->restricted = 1;
9809
9810 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9811
9812 io_sq_offload_start(ctx);
9813
9814 return 0;
9815}
9816
071698e1
JA
9817static bool io_register_op_must_quiesce(int op)
9818{
9819 switch (op) {
9820 case IORING_UNREGISTER_FILES:
9821 case IORING_REGISTER_FILES_UPDATE:
9822 case IORING_REGISTER_PROBE:
9823 case IORING_REGISTER_PERSONALITY:
9824 case IORING_UNREGISTER_PERSONALITY:
9825 return false;
9826 default:
9827 return true;
9828 }
9829}
9830
edafccee
JA
9831static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
9832 void __user *arg, unsigned nr_args)
b19062a5
JA
9833 __releases(ctx->uring_lock)
9834 __acquires(ctx->uring_lock)
edafccee
JA
9835{
9836 int ret;
9837
35fa71a0
JA
9838 /*
9839 * We're inside the ring mutex, if the ref is already dying, then
9840 * someone else killed the ctx or is already going through
9841 * io_uring_register().
9842 */
9843 if (percpu_ref_is_dying(&ctx->refs))
9844 return -ENXIO;
9845
071698e1 9846 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 9847 percpu_ref_kill(&ctx->refs);
b19062a5 9848
05f3fb3c
JA
9849 /*
9850 * Drop uring mutex before waiting for references to exit. If
9851 * another thread is currently inside io_uring_enter() it might
9852 * need to grab the uring_lock to make progress. If we hold it
9853 * here across the drain wait, then we can deadlock. It's safe
9854 * to drop the mutex here, since no new references will come in
9855 * after we've killed the percpu ref.
9856 */
9857 mutex_unlock(&ctx->uring_lock);
af9c1a44
JA
9858 do {
9859 ret = wait_for_completion_interruptible(&ctx->ref_comp);
9860 if (!ret)
9861 break;
ed6930c9
JA
9862 ret = io_run_task_work_sig();
9863 if (ret < 0)
9864 break;
af9c1a44
JA
9865 } while (1);
9866
05f3fb3c 9867 mutex_lock(&ctx->uring_lock);
af9c1a44 9868
c150368b
JA
9869 if (ret) {
9870 percpu_ref_resurrect(&ctx->refs);
21b55dbc
SG
9871 goto out_quiesce;
9872 }
9873 }
9874
9875 if (ctx->restricted) {
9876 if (opcode >= IORING_REGISTER_LAST) {
9877 ret = -EINVAL;
9878 goto out;
9879 }
9880
9881 if (!test_bit(opcode, ctx->restrictions.register_op)) {
9882 ret = -EACCES;
c150368b
JA
9883 goto out;
9884 }
05f3fb3c 9885 }
edafccee
JA
9886
9887 switch (opcode) {
9888 case IORING_REGISTER_BUFFERS:
9889 ret = io_sqe_buffer_register(ctx, arg, nr_args);
9890 break;
9891 case IORING_UNREGISTER_BUFFERS:
9892 ret = -EINVAL;
9893 if (arg || nr_args)
9894 break;
9895 ret = io_sqe_buffer_unregister(ctx);
9896 break;
6b06314c
JA
9897 case IORING_REGISTER_FILES:
9898 ret = io_sqe_files_register(ctx, arg, nr_args);
9899 break;
9900 case IORING_UNREGISTER_FILES:
9901 ret = -EINVAL;
9902 if (arg || nr_args)
9903 break;
9904 ret = io_sqe_files_unregister(ctx);
9905 break;
c3a31e60
JA
9906 case IORING_REGISTER_FILES_UPDATE:
9907 ret = io_sqe_files_update(ctx, arg, nr_args);
9908 break;
9b402849 9909 case IORING_REGISTER_EVENTFD:
f2842ab5 9910 case IORING_REGISTER_EVENTFD_ASYNC:
9b402849
JA
9911 ret = -EINVAL;
9912 if (nr_args != 1)
9913 break;
9914 ret = io_eventfd_register(ctx, arg);
f2842ab5
JA
9915 if (ret)
9916 break;
9917 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
9918 ctx->eventfd_async = 1;
9919 else
9920 ctx->eventfd_async = 0;
9b402849
JA
9921 break;
9922 case IORING_UNREGISTER_EVENTFD:
9923 ret = -EINVAL;
9924 if (arg || nr_args)
9925 break;
9926 ret = io_eventfd_unregister(ctx);
9927 break;
66f4af93
JA
9928 case IORING_REGISTER_PROBE:
9929 ret = -EINVAL;
9930 if (!arg || nr_args > 256)
9931 break;
9932 ret = io_probe(ctx, arg, nr_args);
9933 break;
071698e1
JA
9934 case IORING_REGISTER_PERSONALITY:
9935 ret = -EINVAL;
9936 if (arg || nr_args)
9937 break;
9938 ret = io_register_personality(ctx);
9939 break;
9940 case IORING_UNREGISTER_PERSONALITY:
9941 ret = -EINVAL;
9942 if (arg)
9943 break;
9944 ret = io_unregister_personality(ctx, nr_args);
9945 break;
7e84e1c7
SG
9946 case IORING_REGISTER_ENABLE_RINGS:
9947 ret = -EINVAL;
9948 if (arg || nr_args)
9949 break;
9950 ret = io_register_enable_rings(ctx);
9951 break;
21b55dbc
SG
9952 case IORING_REGISTER_RESTRICTIONS:
9953 ret = io_register_restrictions(ctx, arg, nr_args);
9954 break;
edafccee
JA
9955 default:
9956 ret = -EINVAL;
9957 break;
9958 }
9959
21b55dbc 9960out:
071698e1 9961 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 9962 /* bring the ctx back to life */
05f3fb3c 9963 percpu_ref_reinit(&ctx->refs);
21b55dbc 9964out_quiesce:
0f158b4c 9965 reinit_completion(&ctx->ref_comp);
05f3fb3c 9966 }
edafccee
JA
9967 return ret;
9968}
9969
9970SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
9971 void __user *, arg, unsigned int, nr_args)
9972{
9973 struct io_ring_ctx *ctx;
9974 long ret = -EBADF;
9975 struct fd f;
9976
9977 f = fdget(fd);
9978 if (!f.file)
9979 return -EBADF;
9980
9981 ret = -EOPNOTSUPP;
9982 if (f.file->f_op != &io_uring_fops)
9983 goto out_fput;
9984
9985 ctx = f.file->private_data;
9986
9987 mutex_lock(&ctx->uring_lock);
9988 ret = __io_uring_register(ctx, opcode, arg, nr_args);
9989 mutex_unlock(&ctx->uring_lock);
c826bd7a
DD
9990 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
9991 ctx->cq_ev_fd != NULL, ret);
edafccee
JA
9992out_fput:
9993 fdput(f);
9994 return ret;
9995}
9996
2b188cc1
JA
9997static int __init io_uring_init(void)
9998{
d7f62e82
SM
9999#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
10000 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
10001 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
10002} while (0)
10003
10004#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
10005 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
10006 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
10007 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
10008 BUILD_BUG_SQE_ELEM(1, __u8, flags);
10009 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
10010 BUILD_BUG_SQE_ELEM(4, __s32, fd);
10011 BUILD_BUG_SQE_ELEM(8, __u64, off);
10012 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
10013 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7d67af2c 10014 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
d7f62e82
SM
10015 BUILD_BUG_SQE_ELEM(24, __u32, len);
10016 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
10017 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
10018 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
10019 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
5769a351
JX
10020 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
10021 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
d7f62e82
SM
10022 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
10023 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
10024 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
10025 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
10026 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
10027 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
10028 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
10029 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7d67af2c 10030 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
d7f62e82
SM
10031 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
10032 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
10033 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7d67af2c 10034 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
d7f62e82 10035
d3656344 10036 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
84557871 10037 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
2b188cc1
JA
10038 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
10039 return 0;
10040};
10041__initcall(io_uring_init);