io_uring: extract a helper for ctx quiesce
[linux-block.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
d068b506 14 * through a control-dependency in io_get_cqe (smp_store_release to
1e84b97b
SB
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
52de1fe1 47#include <net/compat.h>
2b188cc1
JA
48#include <linux/refcount.h>
49#include <linux/uio.h>
6b47ee6e 50#include <linux/bits.h>
2b188cc1
JA
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
2b188cc1
JA
58#include <linux/percpu.h>
59#include <linux/slab.h>
2b188cc1 60#include <linux/blkdev.h>
edafccee 61#include <linux/bvec.h>
2b188cc1
JA
62#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
6b06314c 65#include <net/scm.h>
2b188cc1
JA
66#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
edafccee
JA
70#include <linux/sizes.h>
71#include <linux/hugetlb.h>
aa4c3967 72#include <linux/highmem.h>
15b71abe
JA
73#include <linux/namei.h>
74#include <linux/fsnotify.h>
4840e418 75#include <linux/fadvise.h>
3e4827b0 76#include <linux/eventpoll.h>
7d67af2c 77#include <linux/splice.h>
b41e9852 78#include <linux/task_work.h>
bcf5a063 79#include <linux/pagemap.h>
0f212204 80#include <linux/io_uring.h>
ef98eb04 81#include <linux/tracehook.h>
2b188cc1 82
c826bd7a
DD
83#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
2b188cc1
JA
86#include <uapi/linux/io_uring.h>
87
88#include "internal.h"
561fb04a 89#include "io-wq.h"
2b188cc1 90
5277deaa 91#define IORING_MAX_ENTRIES 32768
33a107f0 92#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
4ce8ad95 93#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
65e19f54 94
042b0d85
PB
95/* 512 entries per page on 64-bit archs, 64 pages max */
96#define IORING_MAX_FIXED_FILES (1U << 15)
21b55dbc
SG
97#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
98 IORING_REGISTER_LAST + IORING_OP_LAST)
2b188cc1 99
2d091d62
PB
100#define IO_RSRC_TAG_TABLE_SHIFT 9
101#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
102#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
103
489809e2
PB
104#define IORING_MAX_REG_BUFFERS (1U << 14)
105
b16fed66
PB
106#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
107 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
108 IOSQE_BUFFER_SELECT)
c854357b
PB
109#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
110 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS)
b16fed66 111
09899b19
PB
112#define IO_TCTX_REFS_CACHE_NR (1U << 10)
113
2b188cc1
JA
114struct io_uring {
115 u32 head ____cacheline_aligned_in_smp;
116 u32 tail ____cacheline_aligned_in_smp;
117};
118
1e84b97b 119/*
75b28aff
HV
120 * This data is shared with the application through the mmap at offsets
121 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
122 *
123 * The offsets to the member fields are published through struct
124 * io_sqring_offsets when calling io_uring_setup.
125 */
75b28aff 126struct io_rings {
1e84b97b
SB
127 /*
128 * Head and tail offsets into the ring; the offsets need to be
129 * masked to get valid indices.
130 *
75b28aff
HV
131 * The kernel controls head of the sq ring and the tail of the cq ring,
132 * and the application controls tail of the sq ring and the head of the
133 * cq ring.
1e84b97b 134 */
75b28aff 135 struct io_uring sq, cq;
1e84b97b 136 /*
75b28aff 137 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
138 * ring_entries - 1)
139 */
75b28aff
HV
140 u32 sq_ring_mask, cq_ring_mask;
141 /* Ring sizes (constant, power of 2) */
142 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
143 /*
144 * Number of invalid entries dropped by the kernel due to
145 * invalid index stored in array
146 *
147 * Written by the kernel, shouldn't be modified by the
148 * application (i.e. get number of "new events" by comparing to
149 * cached value).
150 *
151 * After a new SQ head value was read by the application this
152 * counter includes all submissions that were dropped reaching
153 * the new SQ head (and possibly more).
154 */
75b28aff 155 u32 sq_dropped;
1e84b97b 156 /*
0d9b5b3a 157 * Runtime SQ flags
1e84b97b
SB
158 *
159 * Written by the kernel, shouldn't be modified by the
160 * application.
161 *
162 * The application needs a full memory barrier before checking
163 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
164 */
75b28aff 165 u32 sq_flags;
0d9b5b3a
SG
166 /*
167 * Runtime CQ flags
168 *
169 * Written by the application, shouldn't be modified by the
170 * kernel.
171 */
fe7e3257 172 u32 cq_flags;
1e84b97b
SB
173 /*
174 * Number of completion events lost because the queue was full;
175 * this should be avoided by the application by making sure
0b4295b5 176 * there are not more requests pending than there is space in
1e84b97b
SB
177 * the completion queue.
178 *
179 * Written by the kernel, shouldn't be modified by the
180 * application (i.e. get number of "new events" by comparing to
181 * cached value).
182 *
183 * As completion events come in out of order this counter is not
184 * ordered with any other data.
185 */
75b28aff 186 u32 cq_overflow;
1e84b97b
SB
187 /*
188 * Ring buffer of completion events.
189 *
190 * The kernel writes completion events fresh every time they are
191 * produced, so the application is allowed to modify pending
192 * entries.
193 */
75b28aff 194 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
195};
196
45d189c6
PB
197enum io_uring_cmd_flags {
198 IO_URING_F_NONBLOCK = 1,
889fca73 199 IO_URING_F_COMPLETE_DEFER = 2,
45d189c6
PB
200};
201
edafccee
JA
202struct io_mapped_ubuf {
203 u64 ubuf;
4751f53d 204 u64 ubuf_end;
edafccee 205 unsigned int nr_bvecs;
de293938 206 unsigned long acct_pages;
41edf1a5 207 struct bio_vec bvec[];
edafccee
JA
208};
209
50238531
BM
210struct io_ring_ctx;
211
6c2450ae
PB
212struct io_overflow_cqe {
213 struct io_uring_cqe cqe;
214 struct list_head list;
215};
216
a04b0ac0
PB
217struct io_fixed_file {
218 /* file * with additional FFS_* flags */
219 unsigned long file_ptr;
220};
221
269bbe5f
BM
222struct io_rsrc_put {
223 struct list_head list;
b60c8dce 224 u64 tag;
50238531
BM
225 union {
226 void *rsrc;
227 struct file *file;
bd54b6fe 228 struct io_mapped_ubuf *buf;
50238531 229 };
269bbe5f
BM
230};
231
aeca241b 232struct io_file_table {
042b0d85 233 struct io_fixed_file *files;
31b51510
JA
234};
235
b895c9a6 236struct io_rsrc_node {
05589553
XW
237 struct percpu_ref refs;
238 struct list_head node;
269bbe5f 239 struct list_head rsrc_list;
b895c9a6 240 struct io_rsrc_data *rsrc_data;
4a38aed2 241 struct llist_node llist;
e297822b 242 bool done;
05589553
XW
243};
244
40ae0ff7
PB
245typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
246
b895c9a6 247struct io_rsrc_data {
05f3fb3c
JA
248 struct io_ring_ctx *ctx;
249
2d091d62
PB
250 u64 **tags;
251 unsigned int nr;
40ae0ff7 252 rsrc_put_fn *do_put;
3e942498 253 atomic_t refs;
05f3fb3c 254 struct completion done;
8bad28d8 255 bool quiesce;
05f3fb3c
JA
256};
257
5a2e745d
JA
258struct io_buffer {
259 struct list_head list;
260 __u64 addr;
d1f82808 261 __u32 len;
5a2e745d
JA
262 __u16 bid;
263};
264
21b55dbc
SG
265struct io_restriction {
266 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
267 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
268 u8 sqe_flags_allowed;
269 u8 sqe_flags_required;
7e84e1c7 270 bool registered;
21b55dbc
SG
271};
272
37d1e2e3
JA
273enum {
274 IO_SQ_THREAD_SHOULD_STOP = 0,
275 IO_SQ_THREAD_SHOULD_PARK,
276};
277
534ca6d6
JA
278struct io_sq_data {
279 refcount_t refs;
9e138a48 280 atomic_t park_pending;
09a6f4ef 281 struct mutex lock;
69fb2131
JA
282
283 /* ctx's that are using this sqd */
284 struct list_head ctx_list;
69fb2131 285
534ca6d6
JA
286 struct task_struct *thread;
287 struct wait_queue_head wait;
08369246
XW
288
289 unsigned sq_thread_idle;
37d1e2e3
JA
290 int sq_cpu;
291 pid_t task_pid;
5c2469e0 292 pid_t task_tgid;
37d1e2e3
JA
293
294 unsigned long state;
37d1e2e3 295 struct completion exited;
534ca6d6
JA
296};
297
258b29a9 298#define IO_IOPOLL_BATCH 8
6dd0be1e 299#define IO_COMPL_BATCH 32
6ff119a6 300#define IO_REQ_CACHE_SIZE 32
bf019da7 301#define IO_REQ_ALLOC_BATCH 8
258b29a9
PB
302
303struct io_comp_state {
6dd0be1e 304 struct io_kiocb *reqs[IO_COMPL_BATCH];
1b4c351f 305 unsigned int nr;
c7dae4ba 306 /* inline/task_work completion list, under ->uring_lock */
1b4c351f 307 struct list_head free_list;
258b29a9
PB
308};
309
a1ab7b35
PB
310struct io_submit_link {
311 struct io_kiocb *head;
312 struct io_kiocb *last;
313};
314
258b29a9
PB
315struct io_submit_state {
316 struct blk_plug plug;
a1ab7b35 317 struct io_submit_link link;
258b29a9
PB
318
319 /*
320 * io_kiocb alloc cache
321 */
bf019da7 322 void *reqs[IO_REQ_CACHE_SIZE];
258b29a9
PB
323 unsigned int free_reqs;
324
325 bool plug_started;
326
327 /*
328 * Batch completion logic
329 */
330 struct io_comp_state comp;
331
332 /*
333 * File reference cache
334 */
335 struct file *file;
336 unsigned int fd;
337 unsigned int file_refs;
338 unsigned int ios_left;
339};
340
2b188cc1 341struct io_ring_ctx {
b52ecf8c 342 /* const or read-mostly hot data */
2b188cc1
JA
343 struct {
344 struct percpu_ref refs;
2b188cc1 345
b52ecf8c 346 struct io_rings *rings;
2b188cc1 347 unsigned int flags;
e1d85334 348 unsigned int compat: 1;
e1d85334
RD
349 unsigned int drain_next: 1;
350 unsigned int eventfd_async: 1;
21b55dbc 351 unsigned int restricted: 1;
f18ee4cf 352 unsigned int off_timeout_used: 1;
10c66904 353 unsigned int drain_active: 1;
b52ecf8c 354 } ____cacheline_aligned_in_smp;
2b188cc1 355
7f1129d2 356 /* submission data */
b52ecf8c 357 struct {
0499e582
PB
358 struct mutex uring_lock;
359
75b28aff
HV
360 /*
361 * Ring buffer of indices into array of io_uring_sqe, which is
362 * mmapped by the application using the IORING_OFF_SQES offset.
363 *
364 * This indirection could e.g. be used to assign fixed
365 * io_uring_sqe entries to operations and only submit them to
366 * the queue when needed.
367 *
368 * The kernel modifies neither the indices array nor the entries
369 * array.
370 */
371 u32 *sq_array;
c7af47cf 372 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
373 unsigned cached_sq_head;
374 unsigned sq_entries;
de0617e4 375 struct list_head defer_list;
7f1129d2
PB
376
377 /*
378 * Fixed resources fast path, should be accessed only under
379 * uring_lock, and updated through io_uring_register(2)
380 */
381 struct io_rsrc_node *rsrc_node;
382 struct io_file_table file_table;
383 unsigned nr_user_files;
384 unsigned nr_user_bufs;
385 struct io_mapped_ubuf **user_bufs;
386
387 struct io_submit_state submit_state;
5262f567 388 struct list_head timeout_list;
1d7bb1d5 389 struct list_head cq_overflow_list;
7f1129d2
PB
390 struct xarray io_buffers;
391 struct xarray personalities;
392 u32 pers_next;
393 unsigned sq_thread_idle;
2b188cc1
JA
394 } ____cacheline_aligned_in_smp;
395
d0acdee2
PB
396 /* IRQ completion list, under ->completion_lock */
397 struct list_head locked_free_list;
398 unsigned int locked_free_nr;
3c1a2ead 399
7c30f36a 400 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
534ca6d6
JA
401 struct io_sq_data *sq_data; /* if using sq thread polling */
402
90554200 403 struct wait_queue_head sqo_sq_wait;
69fb2131 404 struct list_head sqd_list;
75b28aff 405
5ed7a37d
PB
406 unsigned long check_cq_overflow;
407
206aefde
JA
408 struct {
409 unsigned cached_cq_tail;
410 unsigned cq_entries;
0499e582 411 struct eventfd_ctx *cq_ev_fd;
311997b3 412 struct wait_queue_head poll_wait;
0499e582
PB
413 struct wait_queue_head cq_wait;
414 unsigned cq_extra;
415 atomic_t cq_timeouts;
206aefde 416 struct fasync_struct *cq_fasync;
0499e582 417 unsigned cq_last_tm_flush;
206aefde 418 } ____cacheline_aligned_in_smp;
2b188cc1 419
2b188cc1
JA
420 struct {
421 spinlock_t completion_lock;
e94f141b 422
def596e9 423 /*
540e32a0 424 * ->iopoll_list is protected by the ctx->uring_lock for
def596e9
JA
425 * io_uring instances that don't use IORING_SETUP_SQPOLL.
426 * For SQPOLL, only the single threaded io_sq_thread() will
427 * manipulate the list, hence no extra locking is needed there.
428 */
540e32a0 429 struct list_head iopoll_list;
78076bb6
JA
430 struct hlist_head *cancel_hash;
431 unsigned cancel_hash_bits;
915b3dde 432 bool poll_multi_queue;
2b188cc1 433 } ____cacheline_aligned_in_smp;
85faa7b8 434
21b55dbc 435 struct io_restriction restrictions;
3c1a2ead 436
b13a8918
PB
437 /* slow path rsrc auxilary data, used by update/register */
438 struct {
439 struct io_rsrc_node *rsrc_backup_node;
440 struct io_mapped_ubuf *dummy_ubuf;
441 struct io_rsrc_data *file_data;
442 struct io_rsrc_data *buf_data;
443
444 struct delayed_work rsrc_put_work;
445 struct llist_head rsrc_put_llist;
446 struct list_head rsrc_ref_list;
447 spinlock_t rsrc_ref_lock;
448 };
449
3c1a2ead 450 /* Keep this last, we don't need it for the fast path */
b986af7e
PB
451 struct {
452 #if defined(CONFIG_UNIX)
453 struct socket *ring_sock;
454 #endif
455 /* hashed buffered write serialization */
456 struct io_wq_hash *hash_map;
457
458 /* Only used for accounting purposes */
459 struct user_struct *user;
460 struct mm_struct *mm_account;
461
462 /* ctx exit and cancelation */
9011bf9a
PB
463 struct llist_head fallback_llist;
464 struct delayed_work fallback_work;
b986af7e
PB
465 struct work_struct exit_work;
466 struct list_head tctx_list;
467 struct completion ref_comp;
468 };
2b188cc1
JA
469};
470
53e043b2
SM
471struct io_uring_task {
472 /* submission side */
09899b19 473 int cached_refs;
53e043b2
SM
474 struct xarray xa;
475 struct wait_queue_head wait;
ee53fb2b
SM
476 const struct io_ring_ctx *last;
477 struct io_wq *io_wq;
53e043b2 478 struct percpu_counter inflight;
b303fe2e 479 atomic_t inflight_tracked;
53e043b2 480 atomic_t in_idle;
53e043b2
SM
481
482 spinlock_t task_lock;
483 struct io_wq_work_list task_list;
484 unsigned long task_state;
485 struct callback_head task_work;
486};
487
09bb8394
JA
488/*
489 * First field must be the file pointer in all the
490 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
491 */
221c5eb2
JA
492struct io_poll_iocb {
493 struct file *file;
018043be 494 struct wait_queue_head *head;
221c5eb2 495 __poll_t events;
8c838788 496 bool done;
221c5eb2 497 bool canceled;
392edb45 498 struct wait_queue_entry wait;
221c5eb2
JA
499};
500
9d805892 501struct io_poll_update {
018043be 502 struct file *file;
9d805892
PB
503 u64 old_user_data;
504 u64 new_user_data;
505 __poll_t events;
b69de288
JA
506 bool update_events;
507 bool update_user_data;
018043be
PB
508};
509
b5dba59e
JA
510struct io_close {
511 struct file *file;
b5dba59e
JA
512 int fd;
513};
514
ad8a48ac
JA
515struct io_timeout_data {
516 struct io_kiocb *req;
517 struct hrtimer timer;
518 struct timespec64 ts;
519 enum hrtimer_mode mode;
520};
521
8ed8d3c3
JA
522struct io_accept {
523 struct file *file;
524 struct sockaddr __user *addr;
525 int __user *addr_len;
526 int flags;
09952e3e 527 unsigned long nofile;
8ed8d3c3
JA
528};
529
530struct io_sync {
531 struct file *file;
532 loff_t len;
533 loff_t off;
534 int flags;
d63d1b5e 535 int mode;
8ed8d3c3
JA
536};
537
fbf23849
JA
538struct io_cancel {
539 struct file *file;
540 u64 addr;
541};
542
b29472ee
JA
543struct io_timeout {
544 struct file *file;
bfe68a22
PB
545 u32 off;
546 u32 target_seq;
135fcde8 547 struct list_head list;
90cd7e42
PB
548 /* head of the link, used by linked timeouts only */
549 struct io_kiocb *head;
b29472ee
JA
550};
551
0bdf7a2d
PB
552struct io_timeout_rem {
553 struct file *file;
554 u64 addr;
9c8e11b3
PB
555
556 /* timeout update */
557 struct timespec64 ts;
558 u32 flags;
0bdf7a2d
PB
559};
560
9adbd45d
JA
561struct io_rw {
562 /* NOTE: kiocb has the file as the first member, so don't do it here */
563 struct kiocb kiocb;
564 u64 addr;
565 u64 len;
566};
567
3fbb51c1
JA
568struct io_connect {
569 struct file *file;
570 struct sockaddr __user *addr;
571 int addr_len;
572};
573
e47293fd
JA
574struct io_sr_msg {
575 struct file *file;
fddaface 576 union {
4af3417a
PB
577 struct compat_msghdr __user *umsg_compat;
578 struct user_msghdr __user *umsg;
579 void __user *buf;
fddaface 580 };
e47293fd 581 int msg_flags;
bcda7baa 582 int bgid;
fddaface 583 size_t len;
bcda7baa 584 struct io_buffer *kbuf;
e47293fd
JA
585};
586
15b71abe
JA
587struct io_open {
588 struct file *file;
589 int dfd;
15b71abe 590 struct filename *filename;
c12cedf2 591 struct open_how how;
4022e7af 592 unsigned long nofile;
15b71abe
JA
593};
594
269bbe5f 595struct io_rsrc_update {
05f3fb3c
JA
596 struct file *file;
597 u64 arg;
598 u32 nr_args;
599 u32 offset;
600};
601
4840e418
JA
602struct io_fadvise {
603 struct file *file;
604 u64 offset;
605 u32 len;
606 u32 advice;
607};
608
c1ca757b
JA
609struct io_madvise {
610 struct file *file;
611 u64 addr;
612 u32 len;
613 u32 advice;
614};
615
3e4827b0
JA
616struct io_epoll {
617 struct file *file;
618 int epfd;
619 int op;
620 int fd;
621 struct epoll_event event;
e47293fd
JA
622};
623
7d67af2c
PB
624struct io_splice {
625 struct file *file_out;
626 struct file *file_in;
627 loff_t off_out;
628 loff_t off_in;
629 u64 len;
630 unsigned int flags;
631};
632
ddf0322d
JA
633struct io_provide_buf {
634 struct file *file;
635 __u64 addr;
38134ada 636 __u32 len;
ddf0322d
JA
637 __u32 bgid;
638 __u16 nbufs;
639 __u16 bid;
640};
641
1d9e1288
BM
642struct io_statx {
643 struct file *file;
644 int dfd;
645 unsigned int mask;
646 unsigned int flags;
e62753e4 647 const char __user *filename;
1d9e1288
BM
648 struct statx __user *buffer;
649};
650
36f4fa68
JA
651struct io_shutdown {
652 struct file *file;
653 int how;
654};
655
80a261fd
JA
656struct io_rename {
657 struct file *file;
658 int old_dfd;
659 int new_dfd;
660 struct filename *oldpath;
661 struct filename *newpath;
662 int flags;
663};
664
14a1143b
JA
665struct io_unlink {
666 struct file *file;
667 int dfd;
668 int flags;
669 struct filename *filename;
670};
671
3ca405eb
PB
672struct io_completion {
673 struct file *file;
674 struct list_head list;
8c3f9cd1 675 u32 cflags;
3ca405eb
PB
676};
677
f499a021
JA
678struct io_async_connect {
679 struct sockaddr_storage address;
680};
681
03b1230c
JA
682struct io_async_msghdr {
683 struct iovec fast_iov[UIO_FASTIOV];
257e84a5
PB
684 /* points to an allocated iov, if NULL we use fast_iov instead */
685 struct iovec *free_iov;
03b1230c
JA
686 struct sockaddr __user *uaddr;
687 struct msghdr msg;
b537916c 688 struct sockaddr_storage addr;
03b1230c
JA
689};
690
f67676d1
JA
691struct io_async_rw {
692 struct iovec fast_iov[UIO_FASTIOV];
ff6165b2
JA
693 const struct iovec *free_iovec;
694 struct iov_iter iter;
227c0c96 695 size_t bytes_done;
bcf5a063 696 struct wait_page_queue wpq;
f67676d1
JA
697};
698
6b47ee6e
PB
699enum {
700 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
701 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
702 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
703 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
704 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
bcda7baa 705 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
6b47ee6e 706
dddca226 707 /* first byte is taken by user flags, shift it to not overlap */
93d2bcd2 708 REQ_F_FAIL_BIT = 8,
6b47ee6e
PB
709 REQ_F_INFLIGHT_BIT,
710 REQ_F_CUR_POS_BIT,
711 REQ_F_NOWAIT_BIT,
6b47ee6e 712 REQ_F_LINK_TIMEOUT_BIT,
99bc4c38 713 REQ_F_NEED_CLEANUP_BIT,
d7718a9d 714 REQ_F_POLLED_BIT,
bcda7baa 715 REQ_F_BUFFER_SELECTED_BIT,
900fad45 716 REQ_F_LTIMEOUT_ACTIVE_BIT,
e342c807 717 REQ_F_COMPLETE_INLINE_BIT,
230d50d4 718 REQ_F_REISSUE_BIT,
8c130827 719 REQ_F_DONT_REISSUE_BIT,
b8e64b53 720 REQ_F_CREDS_BIT,
7b29f92d 721 /* keep async read/write and isreg together and in order */
b191e2df
PB
722 REQ_F_NOWAIT_READ_BIT,
723 REQ_F_NOWAIT_WRITE_BIT,
7b29f92d 724 REQ_F_ISREG_BIT,
84557871
JA
725
726 /* not a real bit, just to check we're not overflowing the space */
727 __REQ_F_LAST_BIT,
6b47ee6e
PB
728};
729
730enum {
731 /* ctx owns file */
732 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
733 /* drain existing IO first */
734 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
735 /* linked sqes */
736 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
737 /* doesn't sever on completion < 0 */
738 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
739 /* IOSQE_ASYNC */
740 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
bcda7baa
JA
741 /* IOSQE_BUFFER_SELECT */
742 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
6b47ee6e 743
6b47ee6e 744 /* fail rest of links */
93d2bcd2 745 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
b05a1bcd 746 /* on inflight list, should be cancelled and waited on exit reliably */
6b47ee6e
PB
747 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
748 /* read/write uses file position */
749 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
750 /* must not punt to workers */
751 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
900fad45 752 /* has or had linked timeout */
6b47ee6e 753 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
99bc4c38
PB
754 /* needs cleanup */
755 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
d7718a9d
JA
756 /* already went through poll handler */
757 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
bcda7baa
JA
758 /* buffer already selected */
759 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
900fad45
PB
760 /* linked timeout is active, i.e. prepared by link's head */
761 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
e342c807
PB
762 /* completion is deferred through io_comp_state */
763 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
230d50d4
JA
764 /* caller should reissue async */
765 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
8c130827
PB
766 /* don't attempt request reissue, see io_rw_reissue() */
767 REQ_F_DONT_REISSUE = BIT(REQ_F_DONT_REISSUE_BIT),
7b29f92d 768 /* supports async reads */
b191e2df 769 REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT),
7b29f92d 770 /* supports async writes */
b191e2df 771 REQ_F_NOWAIT_WRITE = BIT(REQ_F_NOWAIT_WRITE_BIT),
7b29f92d
JA
772 /* regular file */
773 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
b8e64b53
PB
774 /* has creds assigned */
775 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
d7718a9d
JA
776};
777
778struct async_poll {
779 struct io_poll_iocb poll;
807abcb0 780 struct io_poll_iocb *double_poll;
6b47ee6e
PB
781};
782
5b0a6acc
PB
783typedef void (*io_req_tw_func_t)(struct io_kiocb *req);
784
7cbf1722 785struct io_task_work {
5b0a6acc
PB
786 union {
787 struct io_wq_work_node node;
788 struct llist_node fallback_node;
789 };
790 io_req_tw_func_t func;
7cbf1722
JA
791};
792
992da01a
PB
793enum {
794 IORING_RSRC_FILE = 0,
795 IORING_RSRC_BUFFER = 1,
796};
797
09bb8394
JA
798/*
799 * NOTE! Each of the iocb union members has the file pointer
800 * as the first entry in their struct definition. So you can
801 * access the file pointer through any of the sub-structs,
802 * or directly as just 'ki_filp' in this struct.
803 */
2b188cc1 804struct io_kiocb {
221c5eb2 805 union {
09bb8394 806 struct file *file;
9adbd45d 807 struct io_rw rw;
221c5eb2 808 struct io_poll_iocb poll;
9d805892 809 struct io_poll_update poll_update;
8ed8d3c3
JA
810 struct io_accept accept;
811 struct io_sync sync;
fbf23849 812 struct io_cancel cancel;
b29472ee 813 struct io_timeout timeout;
0bdf7a2d 814 struct io_timeout_rem timeout_rem;
3fbb51c1 815 struct io_connect connect;
e47293fd 816 struct io_sr_msg sr_msg;
15b71abe 817 struct io_open open;
b5dba59e 818 struct io_close close;
269bbe5f 819 struct io_rsrc_update rsrc_update;
4840e418 820 struct io_fadvise fadvise;
c1ca757b 821 struct io_madvise madvise;
3e4827b0 822 struct io_epoll epoll;
7d67af2c 823 struct io_splice splice;
ddf0322d 824 struct io_provide_buf pbuf;
1d9e1288 825 struct io_statx statx;
36f4fa68 826 struct io_shutdown shutdown;
80a261fd 827 struct io_rename rename;
14a1143b 828 struct io_unlink unlink;
3ca405eb
PB
829 /* use only after cleaning per-op data, see io_clean_op() */
830 struct io_completion compl;
221c5eb2 831 };
2b188cc1 832
e8c2bc1f
JA
833 /* opcode allocated if it needs to store data for async defer */
834 void *async_data;
d625c6ee 835 u8 opcode;
65a6543d
XW
836 /* polled IO has completed */
837 u8 iopoll_completed;
2b188cc1 838
4f4eeba8 839 u16 buf_index;
9cf7c104 840 u32 result;
4f4eeba8 841
010e8e6b
PB
842 struct io_ring_ctx *ctx;
843 unsigned int flags;
abc54d63 844 atomic_t refs;
010e8e6b
PB
845 struct task_struct *task;
846 u64 user_data;
d7718a9d 847
f2f87370 848 struct io_kiocb *link;
269bbe5f 849 struct percpu_ref *fixed_rsrc_refs;
fcb323cc 850
b303fe2e 851 /* used with ctx->iopoll_list with reads/writes */
010e8e6b 852 struct list_head inflight_entry;
5b0a6acc 853 struct io_task_work io_task_work;
010e8e6b
PB
854 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
855 struct hlist_node hash_node;
856 struct async_poll *apoll;
857 struct io_wq_work work;
fe7e3257 858 const struct cred *creds;
c10d1f98 859
eae071c9
PB
860 /* store used ubuf, so we can prevent reloading */
861 struct io_mapped_ubuf *imu;
2b188cc1 862};
05589553 863
13bf43f5
PB
864struct io_tctx_node {
865 struct list_head ctx_node;
866 struct task_struct *task;
13bf43f5
PB
867 struct io_ring_ctx *ctx;
868};
869
27dc8338
PB
870struct io_defer_entry {
871 struct list_head list;
872 struct io_kiocb *req;
9cf7c104 873 u32 seq;
2b188cc1
JA
874};
875
d3656344 876struct io_op_def {
d3656344
JA
877 /* needs req->file assigned */
878 unsigned needs_file : 1;
d3656344
JA
879 /* hash wq insertion if file is a regular file */
880 unsigned hash_reg_file : 1;
881 /* unbound wq insertion if file is a non-regular file */
882 unsigned unbound_nonreg_file : 1;
66f4af93
JA
883 /* opcode is not supported by this kernel */
884 unsigned not_supported : 1;
8a72758c
JA
885 /* set if opcode supports polled "wait" */
886 unsigned pollin : 1;
887 unsigned pollout : 1;
bcda7baa
JA
888 /* op supports buffer selection */
889 unsigned buffer_select : 1;
26f0505a
PB
890 /* do prep async if is going to be punted */
891 unsigned needs_async_setup : 1;
27926b68
JA
892 /* should block plug */
893 unsigned plug : 1;
e8c2bc1f
JA
894 /* size of async data needed, if any */
895 unsigned short async_size;
d3656344
JA
896};
897
0918682b 898static const struct io_op_def io_op_defs[] = {
0463b6c5
PB
899 [IORING_OP_NOP] = {},
900 [IORING_OP_READV] = {
d3656344
JA
901 .needs_file = 1,
902 .unbound_nonreg_file = 1,
8a72758c 903 .pollin = 1,
4d954c25 904 .buffer_select = 1,
26f0505a 905 .needs_async_setup = 1,
27926b68 906 .plug = 1,
e8c2bc1f 907 .async_size = sizeof(struct io_async_rw),
d3656344 908 },
0463b6c5 909 [IORING_OP_WRITEV] = {
d3656344
JA
910 .needs_file = 1,
911 .hash_reg_file = 1,
912 .unbound_nonreg_file = 1,
8a72758c 913 .pollout = 1,
26f0505a 914 .needs_async_setup = 1,
27926b68 915 .plug = 1,
e8c2bc1f 916 .async_size = sizeof(struct io_async_rw),
d3656344 917 },
0463b6c5 918 [IORING_OP_FSYNC] = {
d3656344
JA
919 .needs_file = 1,
920 },
0463b6c5 921 [IORING_OP_READ_FIXED] = {
d3656344
JA
922 .needs_file = 1,
923 .unbound_nonreg_file = 1,
8a72758c 924 .pollin = 1,
27926b68 925 .plug = 1,
e8c2bc1f 926 .async_size = sizeof(struct io_async_rw),
d3656344 927 },
0463b6c5 928 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
929 .needs_file = 1,
930 .hash_reg_file = 1,
931 .unbound_nonreg_file = 1,
8a72758c 932 .pollout = 1,
27926b68 933 .plug = 1,
e8c2bc1f 934 .async_size = sizeof(struct io_async_rw),
d3656344 935 },
0463b6c5 936 [IORING_OP_POLL_ADD] = {
d3656344
JA
937 .needs_file = 1,
938 .unbound_nonreg_file = 1,
939 },
0463b6c5
PB
940 [IORING_OP_POLL_REMOVE] = {},
941 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344
JA
942 .needs_file = 1,
943 },
0463b6c5 944 [IORING_OP_SENDMSG] = {
d3656344
JA
945 .needs_file = 1,
946 .unbound_nonreg_file = 1,
8a72758c 947 .pollout = 1,
26f0505a 948 .needs_async_setup = 1,
e8c2bc1f 949 .async_size = sizeof(struct io_async_msghdr),
d3656344 950 },
0463b6c5 951 [IORING_OP_RECVMSG] = {
d3656344
JA
952 .needs_file = 1,
953 .unbound_nonreg_file = 1,
8a72758c 954 .pollin = 1,
52de1fe1 955 .buffer_select = 1,
26f0505a 956 .needs_async_setup = 1,
e8c2bc1f 957 .async_size = sizeof(struct io_async_msghdr),
d3656344 958 },
0463b6c5 959 [IORING_OP_TIMEOUT] = {
e8c2bc1f 960 .async_size = sizeof(struct io_timeout_data),
d3656344 961 },
9c8e11b3
PB
962 [IORING_OP_TIMEOUT_REMOVE] = {
963 /* used by timeout updates' prep() */
9c8e11b3 964 },
0463b6c5 965 [IORING_OP_ACCEPT] = {
d3656344
JA
966 .needs_file = 1,
967 .unbound_nonreg_file = 1,
8a72758c 968 .pollin = 1,
d3656344 969 },
0463b6c5
PB
970 [IORING_OP_ASYNC_CANCEL] = {},
971 [IORING_OP_LINK_TIMEOUT] = {
e8c2bc1f 972 .async_size = sizeof(struct io_timeout_data),
d3656344 973 },
0463b6c5 974 [IORING_OP_CONNECT] = {
d3656344
JA
975 .needs_file = 1,
976 .unbound_nonreg_file = 1,
8a72758c 977 .pollout = 1,
26f0505a 978 .needs_async_setup = 1,
e8c2bc1f 979 .async_size = sizeof(struct io_async_connect),
d3656344 980 },
0463b6c5 981 [IORING_OP_FALLOCATE] = {
d3656344 982 .needs_file = 1,
d3656344 983 },
44526bed
JA
984 [IORING_OP_OPENAT] = {},
985 [IORING_OP_CLOSE] = {},
986 [IORING_OP_FILES_UPDATE] = {},
987 [IORING_OP_STATX] = {},
0463b6c5 988 [IORING_OP_READ] = {
3a6820f2
JA
989 .needs_file = 1,
990 .unbound_nonreg_file = 1,
8a72758c 991 .pollin = 1,
bcda7baa 992 .buffer_select = 1,
27926b68 993 .plug = 1,
e8c2bc1f 994 .async_size = sizeof(struct io_async_rw),
3a6820f2 995 },
0463b6c5 996 [IORING_OP_WRITE] = {
3a6820f2
JA
997 .needs_file = 1,
998 .unbound_nonreg_file = 1,
8a72758c 999 .pollout = 1,
27926b68 1000 .plug = 1,
e8c2bc1f 1001 .async_size = sizeof(struct io_async_rw),
3a6820f2 1002 },
0463b6c5 1003 [IORING_OP_FADVISE] = {
4840e418 1004 .needs_file = 1,
c1ca757b 1005 },
44526bed 1006 [IORING_OP_MADVISE] = {},
0463b6c5 1007 [IORING_OP_SEND] = {
fddaface
JA
1008 .needs_file = 1,
1009 .unbound_nonreg_file = 1,
8a72758c 1010 .pollout = 1,
fddaface 1011 },
0463b6c5 1012 [IORING_OP_RECV] = {
fddaface
JA
1013 .needs_file = 1,
1014 .unbound_nonreg_file = 1,
8a72758c 1015 .pollin = 1,
bcda7baa 1016 .buffer_select = 1,
fddaface 1017 },
0463b6c5 1018 [IORING_OP_OPENAT2] = {
cebdb986 1019 },
3e4827b0
JA
1020 [IORING_OP_EPOLL_CTL] = {
1021 .unbound_nonreg_file = 1,
3e4827b0 1022 },
7d67af2c
PB
1023 [IORING_OP_SPLICE] = {
1024 .needs_file = 1,
1025 .hash_reg_file = 1,
1026 .unbound_nonreg_file = 1,
ddf0322d
JA
1027 },
1028 [IORING_OP_PROVIDE_BUFFERS] = {},
067524e9 1029 [IORING_OP_REMOVE_BUFFERS] = {},
f2a8d5c7
PB
1030 [IORING_OP_TEE] = {
1031 .needs_file = 1,
1032 .hash_reg_file = 1,
1033 .unbound_nonreg_file = 1,
1034 },
36f4fa68
JA
1035 [IORING_OP_SHUTDOWN] = {
1036 .needs_file = 1,
1037 },
44526bed
JA
1038 [IORING_OP_RENAMEAT] = {},
1039 [IORING_OP_UNLINKAT] = {},
d3656344
JA
1040};
1041
7a612350 1042static bool io_disarm_next(struct io_kiocb *req);
eef51daa 1043static void io_uring_del_tctx_node(unsigned long index);
9936c7c2
PB
1044static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1045 struct task_struct *task,
3dd0c97a 1046 bool cancel_all);
78cc687b 1047static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
b895c9a6 1048static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
1ffc5422 1049
d4d19c19
PB
1050static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1051 long res, unsigned int cflags);
ec9c02ad 1052static void io_put_req(struct io_kiocb *req);
216578e5 1053static void io_put_req_deferred(struct io_kiocb *req, int nr);
c7dae4ba
JA
1054static void io_dismantle_req(struct io_kiocb *req);
1055static void io_put_task(struct task_struct *task, int nr);
94ae5e77
JA
1056static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
1057static void io_queue_linked_timeout(struct io_kiocb *req);
fdecb662 1058static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
c3bdad02 1059 struct io_uring_rsrc_update2 *up,
98f0b3b4 1060 unsigned nr_args);
68fb8979 1061static void io_clean_op(struct io_kiocb *req);
ac177053
PB
1062static struct file *io_file_get(struct io_ring_ctx *ctx,
1063 struct io_submit_state *state,
8371adf5 1064 struct io_kiocb *req, int fd, bool fixed);
c5eef2b9 1065static void __io_queue_sqe(struct io_kiocb *req);
269bbe5f 1066static void io_rsrc_put_work(struct work_struct *work);
de0617e4 1067
907d1df3 1068static void io_req_task_queue(struct io_kiocb *req);
2a2758f2 1069static void io_submit_flush_completions(struct io_ring_ctx *ctx);
5082620f 1070static bool io_poll_remove_waitqs(struct io_kiocb *req);
179ae0d1 1071static int io_req_prep_async(struct io_kiocb *req);
de0617e4 1072
9011bf9a
PB
1073static void io_fallback_req_func(struct work_struct *unused);
1074
2b188cc1
JA
1075static struct kmem_cache *req_cachep;
1076
0918682b 1077static const struct file_operations io_uring_fops;
2b188cc1
JA
1078
1079struct sock *io_uring_get_socket(struct file *file)
1080{
1081#if defined(CONFIG_UNIX)
1082 if (file->f_op == &io_uring_fops) {
1083 struct io_ring_ctx *ctx = file->private_data;
1084
1085 return ctx->ring_sock->sk;
1086 }
1087#endif
1088 return NULL;
1089}
1090EXPORT_SYMBOL(io_uring_get_socket);
1091
f2f87370
PB
1092#define io_for_each_link(pos, head) \
1093 for (pos = (head); pos; pos = pos->link)
1094
b895c9a6 1095static inline void io_req_set_rsrc_node(struct io_kiocb *req)
36f72fe2
PB
1096{
1097 struct io_ring_ctx *ctx = req->ctx;
1098
269bbe5f 1099 if (!req->fixed_rsrc_refs) {
a7f0ed5a 1100 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
269bbe5f 1101 percpu_ref_get(req->fixed_rsrc_refs);
36f72fe2
PB
1102 }
1103}
1104
f70865db
PB
1105static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1106{
1107 bool got = percpu_ref_tryget(ref);
1108
1109 /* already at zero, wait for ->release() */
1110 if (!got)
1111 wait_for_completion(compl);
1112 percpu_ref_resurrect(ref);
1113 if (got)
1114 percpu_ref_put(ref);
1115}
1116
3dd0c97a
PB
1117static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1118 bool cancel_all)
08d23634
PB
1119{
1120 struct io_kiocb *req;
1121
68207680 1122 if (task && head->task != task)
08d23634 1123 return false;
3dd0c97a 1124 if (cancel_all)
08d23634
PB
1125 return true;
1126
1127 io_for_each_link(req, head) {
b05a1bcd 1128 if (req->flags & REQ_F_INFLIGHT)
02a13674 1129 return true;
08d23634
PB
1130 }
1131 return false;
1132}
1133
93d2bcd2 1134static inline void req_set_fail(struct io_kiocb *req)
c40f6379 1135{
93d2bcd2 1136 req->flags |= REQ_F_FAIL;
c40f6379 1137}
4a38aed2 1138
2b188cc1
JA
1139static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1140{
1141 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1142
0f158b4c 1143 complete(&ctx->ref_comp);
2b188cc1
JA
1144}
1145
8eb7e2d0
PB
1146static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1147{
1148 return !req->timeout.off;
1149}
1150
2b188cc1
JA
1151static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1152{
1153 struct io_ring_ctx *ctx;
78076bb6 1154 int hash_bits;
2b188cc1
JA
1155
1156 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1157 if (!ctx)
1158 return NULL;
1159
78076bb6
JA
1160 /*
1161 * Use 5 bits less than the max cq entries, that should give us around
1162 * 32 entries per hash list if totally full and uniformly spread.
1163 */
1164 hash_bits = ilog2(p->cq_entries);
1165 hash_bits -= 5;
1166 if (hash_bits <= 0)
1167 hash_bits = 1;
1168 ctx->cancel_hash_bits = hash_bits;
1169 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1170 GFP_KERNEL);
1171 if (!ctx->cancel_hash)
1172 goto err;
1173 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1174
6224843d
PB
1175 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1176 if (!ctx->dummy_ubuf)
1177 goto err;
1178 /* set invalid range, so io_import_fixed() fails meeting it */
1179 ctx->dummy_ubuf->ubuf = -1UL;
1180
21482896 1181 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
1182 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1183 goto err;
2b188cc1
JA
1184
1185 ctx->flags = p->flags;
90554200 1186 init_waitqueue_head(&ctx->sqo_sq_wait);
69fb2131 1187 INIT_LIST_HEAD(&ctx->sqd_list);
311997b3 1188 init_waitqueue_head(&ctx->poll_wait);
1d7bb1d5 1189 INIT_LIST_HEAD(&ctx->cq_overflow_list);
0f158b4c 1190 init_completion(&ctx->ref_comp);
9e15c3a0 1191 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
61cf9370 1192 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
2b188cc1 1193 mutex_init(&ctx->uring_lock);
311997b3 1194 init_waitqueue_head(&ctx->cq_wait);
2b188cc1 1195 spin_lock_init(&ctx->completion_lock);
540e32a0 1196 INIT_LIST_HEAD(&ctx->iopoll_list);
de0617e4 1197 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 1198 INIT_LIST_HEAD(&ctx->timeout_list);
d67d2263
BM
1199 spin_lock_init(&ctx->rsrc_ref_lock);
1200 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
269bbe5f
BM
1201 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1202 init_llist_head(&ctx->rsrc_put_llist);
13bf43f5 1203 INIT_LIST_HEAD(&ctx->tctx_list);
1b4c351f 1204 INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
d0acdee2 1205 INIT_LIST_HEAD(&ctx->locked_free_list);
9011bf9a 1206 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
2b188cc1 1207 return ctx;
206aefde 1208err:
6224843d 1209 kfree(ctx->dummy_ubuf);
78076bb6 1210 kfree(ctx->cancel_hash);
206aefde
JA
1211 kfree(ctx);
1212 return NULL;
2b188cc1
JA
1213}
1214
8f6ed49a
PB
1215static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1216{
1217 struct io_rings *r = ctx->rings;
1218
1219 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1220 ctx->cq_extra--;
1221}
1222
9cf7c104 1223static bool req_need_defer(struct io_kiocb *req, u32 seq)
7adf4eaf 1224{
2bc9930e
JA
1225 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1226 struct io_ring_ctx *ctx = req->ctx;
a197f664 1227
8f6ed49a 1228 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
2bc9930e 1229 }
de0617e4 1230
9d858b21 1231 return false;
de0617e4
JA
1232}
1233
c97d8a0f
PB
1234#define FFS_ASYNC_READ 0x1UL
1235#define FFS_ASYNC_WRITE 0x2UL
1236#ifdef CONFIG_64BIT
1237#define FFS_ISREG 0x4UL
1238#else
1239#define FFS_ISREG 0x0UL
1240#endif
1241#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
1242
1243static inline bool io_req_ffs_set(struct io_kiocb *req)
1244{
1245 return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE);
1246}
1247
ce3d5aae
PB
1248static void io_req_track_inflight(struct io_kiocb *req)
1249{
ce3d5aae 1250 if (!(req->flags & REQ_F_INFLIGHT)) {
ce3d5aae 1251 req->flags |= REQ_F_INFLIGHT;
b303fe2e 1252 atomic_inc(&current->io_uring->inflight_tracked);
ce3d5aae
PB
1253 }
1254}
1255
1e6fa521
JA
1256static void io_prep_async_work(struct io_kiocb *req)
1257{
1258 const struct io_op_def *def = &io_op_defs[req->opcode];
1e6fa521
JA
1259 struct io_ring_ctx *ctx = req->ctx;
1260
b8e64b53
PB
1261 if (!(req->flags & REQ_F_CREDS)) {
1262 req->flags |= REQ_F_CREDS;
c10d1f98 1263 req->creds = get_current_cred();
b8e64b53 1264 }
003e8dcc 1265
e1d675df
PB
1266 req->work.list.next = NULL;
1267 req->work.flags = 0;
feaadc4f
PB
1268 if (req->flags & REQ_F_FORCE_ASYNC)
1269 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1270
1e6fa521
JA
1271 if (req->flags & REQ_F_ISREG) {
1272 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
1273 io_wq_hash_work(&req->work, file_inode(req->file));
4b982bd0 1274 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
1e6fa521
JA
1275 if (def->unbound_nonreg_file)
1276 req->work.flags |= IO_WQ_WORK_UNBOUND;
1277 }
e1d675df
PB
1278
1279 switch (req->opcode) {
1280 case IORING_OP_SPLICE:
1281 case IORING_OP_TEE:
e1d675df
PB
1282 if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
1283 req->work.flags |= IO_WQ_WORK_UNBOUND;
1284 break;
1285 }
561fb04a 1286}
cccf0ee8 1287
cbdcb435 1288static void io_prep_async_link(struct io_kiocb *req)
561fb04a 1289{
cbdcb435 1290 struct io_kiocb *cur;
54a91f3b 1291
44eff40a
PB
1292 if (req->flags & REQ_F_LINK_TIMEOUT) {
1293 struct io_ring_ctx *ctx = req->ctx;
1294
1295 spin_lock_irq(&ctx->completion_lock);
1296 io_for_each_link(cur, req)
1297 io_prep_async_work(cur);
1298 spin_unlock_irq(&ctx->completion_lock);
1299 } else {
1300 io_for_each_link(cur, req)
1301 io_prep_async_work(cur);
1302 }
561fb04a
JA
1303}
1304
ebf93667 1305static void io_queue_async_work(struct io_kiocb *req)
561fb04a 1306{
a197f664 1307 struct io_ring_ctx *ctx = req->ctx;
cbdcb435 1308 struct io_kiocb *link = io_prep_linked_timeout(req);
5aa75ed5 1309 struct io_uring_task *tctx = req->task->io_uring;
561fb04a 1310
3bfe6106
JA
1311 BUG_ON(!tctx);
1312 BUG_ON(!tctx->io_wq);
561fb04a 1313
cbdcb435
PB
1314 /* init ->work of the whole link before punting */
1315 io_prep_async_link(req);
991468dc
JA
1316
1317 /*
1318 * Not expected to happen, but if we do have a bug where this _can_
1319 * happen, catch it here and ensure the request is marked as
1320 * canceled. That will make io-wq go through the usual work cancel
1321 * procedure rather than attempt to run this request (or create a new
1322 * worker for it).
1323 */
1324 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
1325 req->work.flags |= IO_WQ_WORK_CANCEL;
1326
d07f1e8a
PB
1327 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1328 &req->work, req->flags);
ebf93667 1329 io_wq_enqueue(tctx->io_wq, &req->work);
7271ef3a
JA
1330 if (link)
1331 io_queue_linked_timeout(link);
cbdcb435
PB
1332}
1333
1ee4160c 1334static void io_kill_timeout(struct io_kiocb *req, int status)
8c855885 1335 __must_hold(&req->ctx->completion_lock)
5262f567 1336{
e8c2bc1f 1337 struct io_timeout_data *io = req->async_data;
5262f567 1338
fd9c7bc5 1339 if (hrtimer_try_to_cancel(&io->timer) != -1) {
01cec8c1
PB
1340 atomic_set(&req->ctx->cq_timeouts,
1341 atomic_read(&req->ctx->cq_timeouts) + 1);
135fcde8 1342 list_del_init(&req->timeout.list);
d4d19c19 1343 io_cqring_fill_event(req->ctx, req->user_data, status, 0);
216578e5 1344 io_put_req_deferred(req, 1);
5262f567
JA
1345 }
1346}
1347
441b8a78 1348static void io_queue_deferred(struct io_ring_ctx *ctx)
de0617e4 1349{
441b8a78 1350 while (!list_empty(&ctx->defer_list)) {
27dc8338
PB
1351 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1352 struct io_defer_entry, list);
de0617e4 1353
9cf7c104 1354 if (req_need_defer(de->req, de->seq))
04518945 1355 break;
27dc8338 1356 list_del_init(&de->list);
907d1df3 1357 io_req_task_queue(de->req);
27dc8338 1358 kfree(de);
441b8a78 1359 }
04518945
PB
1360}
1361
360428f8 1362static void io_flush_timeouts(struct io_ring_ctx *ctx)
de0617e4 1363{
441b8a78 1364 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
f010505b 1365
f18ee4cf 1366 while (!list_empty(&ctx->timeout_list)) {
f010505b 1367 u32 events_needed, events_got;
360428f8 1368 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
135fcde8 1369 struct io_kiocb, timeout.list);
de0617e4 1370
8eb7e2d0 1371 if (io_is_timeout_noseq(req))
360428f8 1372 break;
f010505b
MDG
1373
1374 /*
1375 * Since seq can easily wrap around over time, subtract
1376 * the last seq at which timeouts were flushed before comparing.
1377 * Assuming not more than 2^31-1 events have happened since,
1378 * these subtractions won't have wrapped, so we can check if
1379 * target is in [last_seq, current_seq] by comparing the two.
1380 */
1381 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1382 events_got = seq - ctx->cq_last_tm_flush;
1383 if (events_got < events_needed)
360428f8 1384 break;
bfe68a22 1385
135fcde8 1386 list_del_init(&req->timeout.list);
1ee4160c 1387 io_kill_timeout(req, 0);
f18ee4cf 1388 }
f010505b 1389 ctx->cq_last_tm_flush = seq;
360428f8 1390}
5262f567 1391
2335f6f5 1392static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
360428f8 1393{
2335f6f5
PB
1394 if (ctx->off_timeout_used)
1395 io_flush_timeouts(ctx);
1396 if (ctx->drain_active)
1397 io_queue_deferred(ctx);
1398}
1399
1400static inline void io_commit_cqring(struct io_ring_ctx *ctx)
1401{
1402 if (unlikely(ctx->off_timeout_used || ctx->drain_active))
1403 __io_commit_cqring_flush(ctx);
ec30e04b
PB
1404 /* order cqe stores with ring update */
1405 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
de0617e4
JA
1406}
1407
90554200
JA
1408static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1409{
1410 struct io_rings *r = ctx->rings;
1411
a566c556 1412 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
90554200
JA
1413}
1414
888aae2e
PB
1415static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1416{
1417 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1418}
1419
d068b506 1420static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
2b188cc1 1421{
75b28aff 1422 struct io_rings *rings = ctx->rings;
ea5ab3b5 1423 unsigned tail, mask = ctx->cq_entries - 1;
2b188cc1 1424
115e12e5
SB
1425 /*
1426 * writes to the cq entry need to come after reading head; the
1427 * control dependency is enough as we're using WRITE_ONCE to
1428 * fill the cq entry
1429 */
a566c556 1430 if (__io_cqring_events(ctx) == ctx->cq_entries)
2b188cc1
JA
1431 return NULL;
1432
888aae2e 1433 tail = ctx->cached_cq_tail++;
ea5ab3b5 1434 return &rings->cqes[tail & mask];
2b188cc1
JA
1435}
1436
f2842ab5
JA
1437static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1438{
44c769de 1439 if (likely(!ctx->cq_ev_fd))
f0b493e6 1440 return false;
7e55a19c
SG
1441 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1442 return false;
44c769de 1443 return !ctx->eventfd_async || io_wq_current_is_worker();
f2842ab5
JA
1444}
1445
b41e9852 1446static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1d7bb1d5 1447{
5fd46178
JA
1448 /*
1449 * wake_up_all() may seem excessive, but io_wake_function() and
1450 * io_should_wake() handle the termination of the loop and only
1451 * wake as many waiters as we need to.
1452 */
1453 if (wq_has_sleeper(&ctx->cq_wait))
1454 wake_up_all(&ctx->cq_wait);
534ca6d6
JA
1455 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1456 wake_up(&ctx->sq_data->wait);
b41e9852 1457 if (io_should_trigger_evfd(ctx))
1d7bb1d5 1458 eventfd_signal(ctx->cq_ev_fd, 1);
311997b3
PB
1459 if (waitqueue_active(&ctx->poll_wait)) {
1460 wake_up_interruptible(&ctx->poll_wait);
4aa84f2f
PB
1461 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1462 }
1d7bb1d5
JA
1463}
1464
80c18e4a
PB
1465static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1466{
1467 if (ctx->flags & IORING_SETUP_SQPOLL) {
5fd46178
JA
1468 if (wq_has_sleeper(&ctx->cq_wait))
1469 wake_up_all(&ctx->cq_wait);
80c18e4a
PB
1470 }
1471 if (io_should_trigger_evfd(ctx))
1472 eventfd_signal(ctx->cq_ev_fd, 1);
311997b3
PB
1473 if (waitqueue_active(&ctx->poll_wait)) {
1474 wake_up_interruptible(&ctx->poll_wait);
4aa84f2f
PB
1475 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1476 }
80c18e4a
PB
1477}
1478
c4a2ed72 1479/* Returns true if there are no backlogged entries after the flush */
6c2450ae 1480static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1d7bb1d5 1481{
1d7bb1d5 1482 unsigned long flags;
b18032bb 1483 bool all_flushed, posted;
1d7bb1d5 1484
a566c556 1485 if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
e23de15f 1486 return false;
1d7bb1d5 1487
b18032bb 1488 posted = false;
1d7bb1d5 1489 spin_lock_irqsave(&ctx->completion_lock, flags);
6c2450ae 1490 while (!list_empty(&ctx->cq_overflow_list)) {
d068b506 1491 struct io_uring_cqe *cqe = io_get_cqe(ctx);
6c2450ae 1492 struct io_overflow_cqe *ocqe;
e6c8aa9a 1493
1d7bb1d5
JA
1494 if (!cqe && !force)
1495 break;
6c2450ae
PB
1496 ocqe = list_first_entry(&ctx->cq_overflow_list,
1497 struct io_overflow_cqe, list);
1498 if (cqe)
1499 memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1500 else
8f6ed49a
PB
1501 io_account_cq_overflow(ctx);
1502
b18032bb 1503 posted = true;
6c2450ae
PB
1504 list_del(&ocqe->list);
1505 kfree(ocqe);
1d7bb1d5
JA
1506 }
1507
09e88404
PB
1508 all_flushed = list_empty(&ctx->cq_overflow_list);
1509 if (all_flushed) {
5ed7a37d 1510 clear_bit(0, &ctx->check_cq_overflow);
20c0b380
NA
1511 WRITE_ONCE(ctx->rings->sq_flags,
1512 ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
09e88404 1513 }
46930143 1514
b18032bb
JA
1515 if (posted)
1516 io_commit_cqring(ctx);
1d7bb1d5 1517 spin_unlock_irqrestore(&ctx->completion_lock, flags);
b18032bb
JA
1518 if (posted)
1519 io_cqring_ev_posted(ctx);
09e88404 1520 return all_flushed;
1d7bb1d5
JA
1521}
1522
6c2450ae 1523static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
6c503150 1524{
ca0a2651
JA
1525 bool ret = true;
1526
5ed7a37d 1527 if (test_bit(0, &ctx->check_cq_overflow)) {
6c503150
PB
1528 /* iopoll syncs against uring_lock, not completion_lock */
1529 if (ctx->flags & IORING_SETUP_IOPOLL)
1530 mutex_lock(&ctx->uring_lock);
6c2450ae 1531 ret = __io_cqring_overflow_flush(ctx, force);
6c503150
PB
1532 if (ctx->flags & IORING_SETUP_IOPOLL)
1533 mutex_unlock(&ctx->uring_lock);
1534 }
ca0a2651
JA
1535
1536 return ret;
6c503150
PB
1537}
1538
abc54d63
JA
1539/*
1540 * Shamelessly stolen from the mm implementation of page reference checking,
1541 * see commit f958d7b528b1 for details.
1542 */
1543#define req_ref_zero_or_close_to_overflow(req) \
1544 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1545
de9b4cca
JA
1546static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1547{
abc54d63 1548 return atomic_inc_not_zero(&req->refs);
de9b4cca
JA
1549}
1550
1551static inline bool req_ref_sub_and_test(struct io_kiocb *req, int refs)
1552{
abc54d63
JA
1553 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1554 return atomic_sub_and_test(refs, &req->refs);
de9b4cca
JA
1555}
1556
1557static inline bool req_ref_put_and_test(struct io_kiocb *req)
1558{
abc54d63
JA
1559 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1560 return atomic_dec_and_test(&req->refs);
de9b4cca
JA
1561}
1562
1563static inline void req_ref_put(struct io_kiocb *req)
1564{
abc54d63 1565 WARN_ON_ONCE(req_ref_put_and_test(req));
de9b4cca
JA
1566}
1567
1568static inline void req_ref_get(struct io_kiocb *req)
1569{
abc54d63
JA
1570 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1571 atomic_inc(&req->refs);
de9b4cca
JA
1572}
1573
d4d19c19
PB
1574static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
1575 long res, unsigned int cflags)
2b188cc1 1576{
cce4b8b0 1577 struct io_overflow_cqe *ocqe;
2b188cc1 1578
cce4b8b0
PB
1579 ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
1580 if (!ocqe) {
1581 /*
1582 * If we're in ring overflow flush mode, or in task cancel mode,
1583 * or cannot allocate an overflow entry, then we need to drop it
1584 * on the floor.
1585 */
8f6ed49a 1586 io_account_cq_overflow(ctx);
cce4b8b0 1587 return false;
2b188cc1 1588 }
cce4b8b0 1589 if (list_empty(&ctx->cq_overflow_list)) {
5ed7a37d 1590 set_bit(0, &ctx->check_cq_overflow);
20c0b380
NA
1591 WRITE_ONCE(ctx->rings->sq_flags,
1592 ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
1593
cce4b8b0 1594 }
d4d19c19 1595 ocqe->cqe.user_data = user_data;
cce4b8b0
PB
1596 ocqe->cqe.res = res;
1597 ocqe->cqe.flags = cflags;
1598 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1599 return true;
2b188cc1
JA
1600}
1601
d4d19c19
PB
1602static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1603 long res, unsigned int cflags)
2b188cc1
JA
1604{
1605 struct io_uring_cqe *cqe;
1606
d4d19c19 1607 trace_io_uring_complete(ctx, user_data, res, cflags);
51c3ff62 1608
2b188cc1
JA
1609 /*
1610 * If we can't get a cq entry, userspace overflowed the
1611 * submission (by quite a lot). Increment the overflow count in
1612 * the ring.
1613 */
d068b506 1614 cqe = io_get_cqe(ctx);
1d7bb1d5 1615 if (likely(cqe)) {
d4d19c19 1616 WRITE_ONCE(cqe->user_data, user_data);
2b188cc1 1617 WRITE_ONCE(cqe->res, res);
bcda7baa 1618 WRITE_ONCE(cqe->flags, cflags);
8d13326e 1619 return true;
2b188cc1 1620 }
d4d19c19 1621 return io_cqring_event_overflow(ctx, user_data, res, cflags);
2b188cc1
JA
1622}
1623
8d13326e 1624/* not as hot to bloat with inlining */
d4d19c19
PB
1625static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1626 long res, unsigned int cflags)
bcda7baa 1627{
d4d19c19 1628 return __io_cqring_fill_event(ctx, user_data, res, cflags);
bcda7baa
JA
1629}
1630
7a612350
PB
1631static void io_req_complete_post(struct io_kiocb *req, long res,
1632 unsigned int cflags)
2b188cc1 1633{
78e19bbe 1634 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1635 unsigned long flags;
1636
1637 spin_lock_irqsave(&ctx->completion_lock, flags);
d4d19c19 1638 __io_cqring_fill_event(ctx, req->user_data, res, cflags);
c7dae4ba
JA
1639 /*
1640 * If we're the last reference to this request, add to our locked
1641 * free_list cache.
1642 */
de9b4cca 1643 if (req_ref_put_and_test(req)) {
7a612350 1644 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
93d2bcd2 1645 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL))
7a612350
PB
1646 io_disarm_next(req);
1647 if (req->link) {
1648 io_req_task_queue(req->link);
1649 req->link = NULL;
1650 }
1651 }
c7dae4ba
JA
1652 io_dismantle_req(req);
1653 io_put_task(req->task, 1);
d0acdee2
PB
1654 list_add(&req->compl.list, &ctx->locked_free_list);
1655 ctx->locked_free_nr++;
180f829f
PB
1656 } else {
1657 if (!percpu_ref_tryget(&ctx->refs))
1658 req = NULL;
1659 }
7a612350 1660 io_commit_cqring(ctx);
2b188cc1 1661 spin_unlock_irqrestore(&ctx->completion_lock, flags);
7a612350 1662
180f829f
PB
1663 if (req) {
1664 io_cqring_ev_posted(ctx);
c7dae4ba 1665 percpu_ref_put(&ctx->refs);
180f829f 1666 }
229a7b63
JA
1667}
1668
4e3d9ff9
JA
1669static inline bool io_req_needs_clean(struct io_kiocb *req)
1670{
c854357b 1671 return req->flags & IO_REQ_CLEAN_FLAGS;
4e3d9ff9
JA
1672}
1673
a38d68db 1674static void io_req_complete_state(struct io_kiocb *req, long res,
889fca73 1675 unsigned int cflags)
229a7b63 1676{
4e3d9ff9 1677 if (io_req_needs_clean(req))
68fb8979 1678 io_clean_op(req);
a38d68db
PB
1679 req->result = res;
1680 req->compl.cflags = cflags;
e342c807 1681 req->flags |= REQ_F_COMPLETE_INLINE;
e1e16097
JA
1682}
1683
889fca73
PB
1684static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1685 long res, unsigned cflags)
bcda7baa 1686{
889fca73
PB
1687 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1688 io_req_complete_state(req, res, cflags);
a38d68db 1689 else
c7dae4ba 1690 io_req_complete_post(req, res, cflags);
bcda7baa
JA
1691}
1692
a38d68db 1693static inline void io_req_complete(struct io_kiocb *req, long res)
0ddf92e8 1694{
889fca73 1695 __io_req_complete(req, 0, res, 0);
0ddf92e8
JA
1696}
1697
f41db273
PB
1698static void io_req_complete_failed(struct io_kiocb *req, long res)
1699{
93d2bcd2 1700 req_set_fail(req);
f41db273
PB
1701 io_put_req(req);
1702 io_req_complete_post(req, res, 0);
1703}
1704
864ea921
PB
1705/*
1706 * Don't initialise the fields below on every allocation, but do that in
1707 * advance and keep them valid across allocations.
1708 */
1709static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1710{
1711 req->ctx = ctx;
1712 req->link = NULL;
1713 req->async_data = NULL;
1714 /* not necessary, but safer to zero */
1715 req->result = 0;
1716}
1717
dac7a098
PB
1718static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
1719 struct io_comp_state *cs)
1720{
1721 spin_lock_irq(&ctx->completion_lock);
d0acdee2
PB
1722 list_splice_init(&ctx->locked_free_list, &cs->free_list);
1723 ctx->locked_free_nr = 0;
dac7a098
PB
1724 spin_unlock_irq(&ctx->completion_lock);
1725}
1726
dd78f492 1727/* Returns true IFF there are requests in the cache */
c7dae4ba 1728static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
0ddf92e8 1729{
c7dae4ba
JA
1730 struct io_submit_state *state = &ctx->submit_state;
1731 struct io_comp_state *cs = &state->comp;
dd78f492 1732 int nr;
0ddf92e8 1733
c7dae4ba
JA
1734 /*
1735 * If we have more than a batch's worth of requests in our IRQ side
1736 * locked cache, grab the lock and move them over to our submission
1737 * side cache.
1738 */
d0acdee2 1739 if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
dac7a098 1740 io_flush_cached_locked_reqs(ctx, cs);
0ddf92e8 1741
dd78f492 1742 nr = state->free_reqs;
c7dae4ba 1743 while (!list_empty(&cs->free_list)) {
dd78f492
PB
1744 struct io_kiocb *req = list_first_entry(&cs->free_list,
1745 struct io_kiocb, compl.list);
1746
1b4c351f 1747 list_del(&req->compl.list);
dd78f492
PB
1748 state->reqs[nr++] = req;
1749 if (nr == ARRAY_SIZE(state->reqs))
e5d1bc0a 1750 break;
1b4c351f
JA
1751 }
1752
dd78f492
PB
1753 state->free_reqs = nr;
1754 return nr != 0;
0ddf92e8
JA
1755}
1756
e5d1bc0a 1757static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
2b188cc1 1758{
e5d1bc0a 1759 struct io_submit_state *state = &ctx->submit_state;
864ea921
PB
1760 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1761 int ret, i;
e5d1bc0a 1762
fe7e3257 1763 BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
e5d1bc0a 1764
864ea921
PB
1765 if (likely(state->free_reqs || io_flush_cached_reqs(ctx)))
1766 goto got_req;
e5d1bc0a 1767
864ea921
PB
1768 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1769 state->reqs);
fd6fab2c 1770
864ea921
PB
1771 /*
1772 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1773 * retry single alloc to be on the safe side.
1774 */
1775 if (unlikely(ret <= 0)) {
1776 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1777 if (!state->reqs[0])
1778 return NULL;
1779 ret = 1;
2b188cc1 1780 }
864ea921
PB
1781
1782 for (i = 0; i < ret; i++)
1783 io_preinit_req(state->reqs[i], ctx);
1784 state->free_reqs = ret;
e5d1bc0a 1785got_req:
291b2821
PB
1786 state->free_reqs--;
1787 return state->reqs[state->free_reqs];
2b188cc1
JA
1788}
1789
e1d767f0 1790static inline void io_put_file(struct file *file)
8da11c19 1791{
e1d767f0 1792 if (file)
8da11c19
PB
1793 fput(file);
1794}
1795
4edf20f9 1796static void io_dismantle_req(struct io_kiocb *req)
2b188cc1 1797{
094bae49 1798 unsigned int flags = req->flags;
929a3af9 1799
3a0a6902
PB
1800 if (io_req_needs_clean(req))
1801 io_clean_op(req);
e1d767f0
PB
1802 if (!(flags & REQ_F_FIXED_FILE))
1803 io_put_file(req->file);
269bbe5f
BM
1804 if (req->fixed_rsrc_refs)
1805 percpu_ref_put(req->fixed_rsrc_refs);
99ebe4ef 1806 if (req->async_data) {
094bae49 1807 kfree(req->async_data);
99ebe4ef
PB
1808 req->async_data = NULL;
1809 }
e65ef56d
JA
1810}
1811
b23fcf47 1812/* must to be called somewhat shortly after putting a request */
7c660731
PB
1813static inline void io_put_task(struct task_struct *task, int nr)
1814{
1815 struct io_uring_task *tctx = task->io_uring;
1816
1817 percpu_counter_sub(&tctx->inflight, nr);
1818 if (unlikely(atomic_read(&tctx->in_idle)))
1819 wake_up(&tctx->wait);
1820 put_task_struct_many(task, nr);
1821}
1822
216578e5 1823static void __io_free_req(struct io_kiocb *req)
c6ca97b3 1824{
51a4cc11 1825 struct io_ring_ctx *ctx = req->ctx;
c6ca97b3 1826
216578e5 1827 io_dismantle_req(req);
7c660731 1828 io_put_task(req->task, 1);
c6ca97b3 1829
3893f39f 1830 kmem_cache_free(req_cachep, req);
ecfc5177 1831 percpu_ref_put(&ctx->refs);
e65ef56d
JA
1832}
1833
f2f87370
PB
1834static inline void io_remove_next_linked(struct io_kiocb *req)
1835{
1836 struct io_kiocb *nxt = req->link;
1837
1838 req->link = nxt->link;
1839 nxt->link = NULL;
1840}
1841
33cc89a9
PB
1842static bool io_kill_linked_timeout(struct io_kiocb *req)
1843 __must_hold(&req->ctx->completion_lock)
2665abfd 1844{
33cc89a9 1845 struct io_kiocb *link = req->link;
f2f87370 1846
900fad45
PB
1847 /*
1848 * Can happen if a linked timeout fired and link had been like
1849 * req -> link t-out -> link t-out [-> ...]
1850 */
c9abd7ad
PB
1851 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1852 struct io_timeout_data *io = link->async_data;
7c86ffee 1853
f2f87370 1854 io_remove_next_linked(req);
90cd7e42 1855 link->timeout.head = NULL;
fd9c7bc5 1856 if (hrtimer_try_to_cancel(&io->timer) != -1) {
d4d19c19
PB
1857 io_cqring_fill_event(link->ctx, link->user_data,
1858 -ECANCELED, 0);
33cc89a9 1859 io_put_req_deferred(link, 1);
d4729fbd 1860 return true;
c9abd7ad
PB
1861 }
1862 }
d4729fbd 1863 return false;
7c86ffee
PB
1864}
1865
d148ca4b 1866static void io_fail_links(struct io_kiocb *req)
33cc89a9 1867 __must_hold(&req->ctx->completion_lock)
9e645e11 1868{
33cc89a9 1869 struct io_kiocb *nxt, *link = req->link;
9e645e11 1870
f2f87370 1871 req->link = NULL;
f2f87370
PB
1872 while (link) {
1873 nxt = link->link;
1874 link->link = NULL;
2665abfd 1875
f2f87370 1876 trace_io_uring_fail_link(req, link);
d4d19c19 1877 io_cqring_fill_event(link->ctx, link->user_data, -ECANCELED, 0);
1575f21a 1878 io_put_req_deferred(link, 2);
f2f87370 1879 link = nxt;
9e645e11 1880 }
33cc89a9 1881}
9e645e11 1882
33cc89a9
PB
1883static bool io_disarm_next(struct io_kiocb *req)
1884 __must_hold(&req->ctx->completion_lock)
1885{
1886 bool posted = false;
1887
1888 if (likely(req->flags & REQ_F_LINK_TIMEOUT))
1889 posted = io_kill_linked_timeout(req);
93d2bcd2 1890 if (unlikely((req->flags & REQ_F_FAIL) &&
e4335ed3 1891 !(req->flags & REQ_F_HARDLINK))) {
33cc89a9
PB
1892 posted |= (req->link != NULL);
1893 io_fail_links(req);
1894 }
1895 return posted;
9e645e11
JA
1896}
1897
3fa5e0f3 1898static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
c69f8dbe 1899{
33cc89a9 1900 struct io_kiocb *nxt;
944e58bf 1901
9e645e11
JA
1902 /*
1903 * If LINK is set, we have dependent requests in this chain. If we
1904 * didn't fail this request, queue the first one up, moving any other
1905 * dependencies to the next request. In case of failure, fail the rest
1906 * of the chain.
1907 */
93d2bcd2 1908 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL)) {
33cc89a9
PB
1909 struct io_ring_ctx *ctx = req->ctx;
1910 unsigned long flags;
1911 bool posted;
1912
1913 spin_lock_irqsave(&ctx->completion_lock, flags);
1914 posted = io_disarm_next(req);
1915 if (posted)
1916 io_commit_cqring(req->ctx);
1917 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1918 if (posted)
1919 io_cqring_ev_posted(ctx);
f2f87370 1920 }
33cc89a9
PB
1921 nxt = req->link;
1922 req->link = NULL;
1923 return nxt;
4d7dd462 1924}
9e645e11 1925
f2f87370 1926static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
3fa5e0f3 1927{
cdbff982 1928 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
3fa5e0f3
PB
1929 return NULL;
1930 return __io_req_find_next(req);
1931}
1932
2c32395d
PB
1933static void ctx_flush_and_put(struct io_ring_ctx *ctx)
1934{
1935 if (!ctx)
1936 return;
1937 if (ctx->submit_state.comp.nr) {
1938 mutex_lock(&ctx->uring_lock);
2a2758f2 1939 io_submit_flush_completions(ctx);
2c32395d
PB
1940 mutex_unlock(&ctx->uring_lock);
1941 }
1942 percpu_ref_put(&ctx->refs);
1943}
1944
7cbf1722 1945static void tctx_task_work(struct callback_head *cb)
c40f6379 1946{
ebd0df2e 1947 struct io_ring_ctx *ctx = NULL;
3f18407d
PB
1948 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
1949 task_work);
c40f6379 1950
16f72070 1951 while (1) {
3f18407d
PB
1952 struct io_wq_work_node *node;
1953
1954 spin_lock_irq(&tctx->task_lock);
c6538be9 1955 node = tctx->task_list.first;
3f18407d
PB
1956 INIT_WQ_LIST(&tctx->task_list);
1957 spin_unlock_irq(&tctx->task_lock);
1958
3f18407d
PB
1959 while (node) {
1960 struct io_wq_work_node *next = node->next;
1961 struct io_kiocb *req = container_of(node, struct io_kiocb,
1962 io_task_work.node);
1963
1964 if (req->ctx != ctx) {
1965 ctx_flush_and_put(ctx);
1966 ctx = req->ctx;
1967 percpu_ref_get(&ctx->refs);
1968 }
5b0a6acc 1969 req->io_task_work.func(req);
3f18407d
PB
1970 node = next;
1971 }
7a778f9d 1972 if (wq_list_empty(&tctx->task_list)) {
110aa25c 1973 spin_lock_irq(&tctx->task_lock);
7a778f9d 1974 clear_bit(0, &tctx->task_state);
110aa25c
JA
1975 if (wq_list_empty(&tctx->task_list)) {
1976 spin_unlock_irq(&tctx->task_lock);
7a778f9d 1977 break;
110aa25c
JA
1978 }
1979 spin_unlock_irq(&tctx->task_lock);
7a778f9d
PB
1980 /* another tctx_task_work() is enqueued, yield */
1981 if (test_and_set_bit(0, &tctx->task_state))
1982 break;
1983 }
7cbf1722 1984 cond_resched();
3f18407d 1985 }
ebd0df2e
PB
1986
1987 ctx_flush_and_put(ctx);
7cbf1722
JA
1988}
1989
e09ee510 1990static void io_req_task_work_add(struct io_kiocb *req)
7cbf1722 1991{
c15b79de 1992 struct task_struct *tsk = req->task;
7cbf1722 1993 struct io_uring_task *tctx = tsk->io_uring;
c15b79de 1994 enum task_work_notify_mode notify;
e09ee510 1995 struct io_wq_work_node *node;
0b81e80c 1996 unsigned long flags;
7cbf1722
JA
1997
1998 WARN_ON_ONCE(!tctx);
1999
0b81e80c 2000 spin_lock_irqsave(&tctx->task_lock, flags);
7cbf1722 2001 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
0b81e80c 2002 spin_unlock_irqrestore(&tctx->task_lock, flags);
7cbf1722
JA
2003
2004 /* task_work already pending, we're done */
2005 if (test_bit(0, &tctx->task_state) ||
2006 test_and_set_bit(0, &tctx->task_state))
e09ee510 2007 return;
7cbf1722 2008
c15b79de
PB
2009 /*
2010 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2011 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2012 * processing task_work. There's no reliable way to tell if TWA_RESUME
2013 * will do the job.
2014 */
2015 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
c15b79de
PB
2016 if (!task_work_add(tsk, &tctx->task_work, notify)) {
2017 wake_up_process(tsk);
e09ee510 2018 return;
c15b79de 2019 }
2215bed9 2020
e09ee510 2021 clear_bit(0, &tctx->task_state);
0b81e80c 2022 spin_lock_irqsave(&tctx->task_lock, flags);
e09ee510
PB
2023 node = tctx->task_list.first;
2024 INIT_WQ_LIST(&tctx->task_list);
0b81e80c 2025 spin_unlock_irqrestore(&tctx->task_lock, flags);
7cbf1722 2026
e09ee510
PB
2027 while (node) {
2028 req = container_of(node, struct io_kiocb, io_task_work.node);
2029 node = node->next;
2030 if (llist_add(&req->io_task_work.fallback_node,
2031 &req->ctx->fallback_llist))
2032 schedule_delayed_work(&req->ctx->fallback_work, 1);
2033 }
eab30c4d
PB
2034}
2035
5b0a6acc 2036static void io_req_task_cancel(struct io_kiocb *req)
c40f6379 2037{
87ceb6a6 2038 struct io_ring_ctx *ctx = req->ctx;
c40f6379 2039
e83acd7d 2040 /* ctx is guaranteed to stay alive while we hold uring_lock */
792bb6eb 2041 mutex_lock(&ctx->uring_lock);
2593553a 2042 io_req_complete_failed(req, req->result);
792bb6eb 2043 mutex_unlock(&ctx->uring_lock);
c40f6379
JA
2044}
2045
5b0a6acc 2046static void io_req_task_submit(struct io_kiocb *req)
c40f6379
JA
2047{
2048 struct io_ring_ctx *ctx = req->ctx;
2049
04fc6c80 2050 /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
81b6d05c 2051 mutex_lock(&ctx->uring_lock);
9c688260 2052 if (!(req->task->flags & PF_EXITING) && !req->task->in_execve)
c5eef2b9 2053 __io_queue_sqe(req);
81b6d05c 2054 else
2593553a 2055 io_req_complete_failed(req, -EFAULT);
81b6d05c 2056 mutex_unlock(&ctx->uring_lock);
c40f6379
JA
2057}
2058
2c4b8eb6 2059static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
c40f6379 2060{
2c4b8eb6 2061 req->result = ret;
5b0a6acc 2062 req->io_task_work.func = io_req_task_cancel;
e09ee510 2063 io_req_task_work_add(req);
c40f6379
JA
2064}
2065
2c4b8eb6 2066static void io_req_task_queue(struct io_kiocb *req)
a3df7698 2067{
5b0a6acc 2068 req->io_task_work.func = io_req_task_submit;
e09ee510 2069 io_req_task_work_add(req);
a3df7698
PB
2070}
2071
773af691
JA
2072static void io_req_task_queue_reissue(struct io_kiocb *req)
2073{
2074 req->io_task_work.func = io_queue_async_work;
2075 io_req_task_work_add(req);
2076}
2077
f2f87370 2078static inline void io_queue_next(struct io_kiocb *req)
c69f8dbe 2079{
9b5f7bd9 2080 struct io_kiocb *nxt = io_req_find_next(req);
944e58bf
PB
2081
2082 if (nxt)
906a8c3f 2083 io_req_task_queue(nxt);
c69f8dbe
JL
2084}
2085
c3524383 2086static void io_free_req(struct io_kiocb *req)
7a743e22 2087{
c3524383
PB
2088 io_queue_next(req);
2089 __io_free_req(req);
2090}
8766dd51 2091
2d6500d4 2092struct req_batch {
5af1d13e
PB
2093 struct task_struct *task;
2094 int task_refs;
1b4c351f 2095 int ctx_refs;
2d6500d4
PB
2096};
2097
5af1d13e
PB
2098static inline void io_init_req_batch(struct req_batch *rb)
2099{
5af1d13e 2100 rb->task_refs = 0;
9ae72463 2101 rb->ctx_refs = 0;
5af1d13e
PB
2102 rb->task = NULL;
2103}
2104
2d6500d4
PB
2105static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2106 struct req_batch *rb)
2107{
6e833d53 2108 if (rb->task)
7c660731 2109 io_put_task(rb->task, rb->task_refs);
9ae72463
PB
2110 if (rb->ctx_refs)
2111 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
2d6500d4
PB
2112}
2113
6ff119a6
PB
2114static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2115 struct io_submit_state *state)
2d6500d4 2116{
f2f87370 2117 io_queue_next(req);
96670657 2118 io_dismantle_req(req);
2d6500d4 2119
e3bc8e9d 2120 if (req->task != rb->task) {
7c660731
PB
2121 if (rb->task)
2122 io_put_task(rb->task, rb->task_refs);
e3bc8e9d
JA
2123 rb->task = req->task;
2124 rb->task_refs = 0;
5af1d13e 2125 }
e3bc8e9d 2126 rb->task_refs++;
9ae72463 2127 rb->ctx_refs++;
5af1d13e 2128
bd759045 2129 if (state->free_reqs != ARRAY_SIZE(state->reqs))
6ff119a6 2130 state->reqs[state->free_reqs++] = req;
bd759045
PB
2131 else
2132 list_add(&req->compl.list, &state->comp.free_list);
7a743e22
PB
2133}
2134
2a2758f2 2135static void io_submit_flush_completions(struct io_ring_ctx *ctx)
282cdc86 2136 __must_hold(&req->ctx->uring_lock)
905c172f 2137{
2a2758f2 2138 struct io_comp_state *cs = &ctx->submit_state.comp;
905c172f 2139 int i, nr = cs->nr;
905c172f
PB
2140 struct req_batch rb;
2141
905c172f
PB
2142 spin_lock_irq(&ctx->completion_lock);
2143 for (i = 0; i < nr; i++) {
5182ed2e
PB
2144 struct io_kiocb *req = cs->reqs[i];
2145
d4d19c19
PB
2146 __io_cqring_fill_event(ctx, req->user_data, req->result,
2147 req->compl.cflags);
905c172f
PB
2148 }
2149 io_commit_cqring(ctx);
2150 spin_unlock_irq(&ctx->completion_lock);
905c172f 2151 io_cqring_ev_posted(ctx);
5182ed2e
PB
2152
2153 io_init_req_batch(&rb);
905c172f 2154 for (i = 0; i < nr; i++) {
5182ed2e 2155 struct io_kiocb *req = cs->reqs[i];
905c172f
PB
2156
2157 /* submission and completion refs */
de9b4cca 2158 if (req_ref_sub_and_test(req, 2))
6ff119a6 2159 io_req_free_batch(&rb, req, &ctx->submit_state);
905c172f
PB
2160 }
2161
2162 io_req_free_batch_finish(ctx, &rb);
2163 cs->nr = 0;
7a743e22
PB
2164}
2165
ba816ad6
JA
2166/*
2167 * Drop reference to request, return next in chain (if there is one) if this
2168 * was the last reference to this request.
2169 */
0d85035a 2170static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
e65ef56d 2171{
9b5f7bd9
PB
2172 struct io_kiocb *nxt = NULL;
2173
de9b4cca 2174 if (req_ref_put_and_test(req)) {
9b5f7bd9 2175 nxt = io_req_find_next(req);
4d7dd462 2176 __io_free_req(req);
2a44f467 2177 }
9b5f7bd9 2178 return nxt;
2b188cc1
JA
2179}
2180
0d85035a 2181static inline void io_put_req(struct io_kiocb *req)
e65ef56d 2182{
de9b4cca 2183 if (req_ref_put_and_test(req))
e65ef56d 2184 io_free_req(req);
2b188cc1
JA
2185}
2186
216578e5
PB
2187static void io_free_req_deferred(struct io_kiocb *req)
2188{
5b0a6acc 2189 req->io_task_work.func = io_free_req;
e09ee510 2190 io_req_task_work_add(req);
216578e5
PB
2191}
2192
2193static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2194{
de9b4cca 2195 if (req_ref_sub_and_test(req, refs))
216578e5
PB
2196 io_free_req_deferred(req);
2197}
2198
6c503150 2199static unsigned io_cqring_events(struct io_ring_ctx *ctx)
a3a0e43f
JA
2200{
2201 /* See comment at the top of this file */
2202 smp_rmb();
e23de15f 2203 return __io_cqring_events(ctx);
a3a0e43f
JA
2204}
2205
fb5ccc98
PB
2206static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2207{
2208 struct io_rings *rings = ctx->rings;
2209
2210 /* make sure SQ entry isn't read before tail */
2211 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2212}
2213
8ff069bf 2214static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
e94f141b 2215{
8ff069bf 2216 unsigned int cflags;
e94f141b 2217
bcda7baa
JA
2218 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2219 cflags |= IORING_CQE_F_BUFFER;
0e1b6fe3 2220 req->flags &= ~REQ_F_BUFFER_SELECTED;
bcda7baa
JA
2221 kfree(kbuf);
2222 return cflags;
e94f141b
JA
2223}
2224
8ff069bf 2225static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
bcda7baa 2226{
4d954c25 2227 struct io_buffer *kbuf;
bcda7baa 2228
4d954c25 2229 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
8ff069bf
PB
2230 return io_put_kbuf(req, kbuf);
2231}
2232
4c6e277c
JA
2233static inline bool io_run_task_work(void)
2234{
ef98eb04 2235 if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
4c6e277c 2236 __set_current_state(TASK_RUNNING);
ef98eb04 2237 tracehook_notify_signal();
4c6e277c
JA
2238 return true;
2239 }
2240
2241 return false;
bcda7baa
JA
2242}
2243
def596e9
JA
2244/*
2245 * Find and free completed poll iocbs
2246 */
2247static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
3c30ef0f 2248 struct list_head *done, bool resubmit)
def596e9 2249{
8237e045 2250 struct req_batch rb;
def596e9 2251 struct io_kiocb *req;
bbde017a
XW
2252
2253 /* order with ->result store in io_complete_rw_iopoll() */
2254 smp_rmb();
def596e9 2255
5af1d13e 2256 io_init_req_batch(&rb);
def596e9 2257 while (!list_empty(done)) {
bcda7baa
JA
2258 int cflags = 0;
2259
d21ffe7e 2260 req = list_first_entry(done, struct io_kiocb, inflight_entry);
f161340d
PB
2261 list_del(&req->inflight_entry);
2262
3c30ef0f 2263 if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
8c130827 2264 !(req->flags & REQ_F_DONT_REISSUE)) {
bbde017a 2265 req->iopoll_completed = 0;
8c130827 2266 req_ref_get(req);
773af691 2267 io_req_task_queue_reissue(req);
8c130827 2268 continue;
bbde017a 2269 }
def596e9 2270
bcda7baa 2271 if (req->flags & REQ_F_BUFFER_SELECTED)
8ff069bf 2272 cflags = io_put_rw_kbuf(req);
bcda7baa 2273
d4d19c19 2274 __io_cqring_fill_event(ctx, req->user_data, req->result, cflags);
def596e9
JA
2275 (*nr_events)++;
2276
de9b4cca 2277 if (req_ref_put_and_test(req))
6ff119a6 2278 io_req_free_batch(&rb, req, &ctx->submit_state);
def596e9 2279 }
def596e9 2280
09bb8394 2281 io_commit_cqring(ctx);
80c18e4a 2282 io_cqring_ev_posted_iopoll(ctx);
2d6500d4 2283 io_req_free_batch_finish(ctx, &rb);
581f9810
BM
2284}
2285
def596e9 2286static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
3c30ef0f 2287 long min, bool resubmit)
def596e9
JA
2288{
2289 struct io_kiocb *req, *tmp;
2290 LIST_HEAD(done);
2291 bool spin;
def596e9
JA
2292
2293 /*
2294 * Only spin for completions if we don't have multiple devices hanging
2295 * off our complete list, and we're under the requested amount.
2296 */
915b3dde 2297 spin = !ctx->poll_multi_queue && *nr_events < min;
def596e9 2298
d21ffe7e 2299 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
9adbd45d 2300 struct kiocb *kiocb = &req->rw.kiocb;
a2416e1e 2301 int ret;
def596e9
JA
2302
2303 /*
581f9810
BM
2304 * Move completed and retryable entries to our local lists.
2305 * If we find a request that requires polling, break out
2306 * and complete those lists first, if we have entries there.
def596e9 2307 */
65a6543d 2308 if (READ_ONCE(req->iopoll_completed)) {
d21ffe7e 2309 list_move_tail(&req->inflight_entry, &done);
def596e9
JA
2310 continue;
2311 }
2312 if (!list_empty(&done))
2313 break;
2314
2315 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
a2416e1e
PB
2316 if (unlikely(ret < 0))
2317 return ret;
2318 else if (ret)
2319 spin = false;
def596e9 2320
3aadc23e
PB
2321 /* iopoll may have completed current req */
2322 if (READ_ONCE(req->iopoll_completed))
d21ffe7e 2323 list_move_tail(&req->inflight_entry, &done);
def596e9
JA
2324 }
2325
2326 if (!list_empty(&done))
3c30ef0f 2327 io_iopoll_complete(ctx, nr_events, &done, resubmit);
def596e9 2328
a2416e1e 2329 return 0;
def596e9
JA
2330}
2331
def596e9
JA
2332/*
2333 * We can't just wait for polled events to come to us, we have to actively
2334 * find and complete them.
2335 */
b2edc0a7 2336static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
def596e9
JA
2337{
2338 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2339 return;
2340
2341 mutex_lock(&ctx->uring_lock);
540e32a0 2342 while (!list_empty(&ctx->iopoll_list)) {
def596e9
JA
2343 unsigned int nr_events = 0;
2344
3c30ef0f 2345 io_do_iopoll(ctx, &nr_events, 0, false);
08f5439f 2346
b2edc0a7
PB
2347 /* let it sleep and repeat later if can't complete a request */
2348 if (nr_events == 0)
2349 break;
08f5439f
JA
2350 /*
2351 * Ensure we allow local-to-the-cpu processing to take place,
2352 * in this case we need to ensure that we reap all events.
3fcee5a6 2353 * Also let task_work, etc. to progress by releasing the mutex
08f5439f 2354 */
3fcee5a6
PB
2355 if (need_resched()) {
2356 mutex_unlock(&ctx->uring_lock);
2357 cond_resched();
2358 mutex_lock(&ctx->uring_lock);
2359 }
def596e9
JA
2360 }
2361 mutex_unlock(&ctx->uring_lock);
2362}
2363
7668b92a 2364static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
def596e9 2365{
7668b92a 2366 unsigned int nr_events = 0;
e9979b36 2367 int ret = 0;
500f9fba 2368
c7849be9
XW
2369 /*
2370 * We disallow the app entering submit/complete with polling, but we
2371 * still need to lock the ring to prevent racing with polled issue
2372 * that got punted to a workqueue.
2373 */
2374 mutex_lock(&ctx->uring_lock);
f39c8a5b
PB
2375 /*
2376 * Don't enter poll loop if we already have events pending.
2377 * If we do, we can potentially be spinning for commands that
2378 * already triggered a CQE (eg in error).
2379 */
5ed7a37d 2380 if (test_bit(0, &ctx->check_cq_overflow))
f39c8a5b
PB
2381 __io_cqring_overflow_flush(ctx, false);
2382 if (io_cqring_events(ctx))
2383 goto out;
def596e9 2384 do {
500f9fba
JA
2385 /*
2386 * If a submit got punted to a workqueue, we can have the
2387 * application entering polling for a command before it gets
2388 * issued. That app will hold the uring_lock for the duration
2389 * of the poll right here, so we need to take a breather every
2390 * now and then to ensure that the issue has a chance to add
2391 * the poll to the issued list. Otherwise we can spin here
2392 * forever, while the workqueue is stuck trying to acquire the
2393 * very same mutex.
2394 */
e9979b36 2395 if (list_empty(&ctx->iopoll_list)) {
8f487ef2
PB
2396 u32 tail = ctx->cached_cq_tail;
2397
500f9fba 2398 mutex_unlock(&ctx->uring_lock);
4c6e277c 2399 io_run_task_work();
500f9fba 2400 mutex_lock(&ctx->uring_lock);
def596e9 2401
8f487ef2
PB
2402 /* some requests don't go through iopoll_list */
2403 if (tail != ctx->cached_cq_tail ||
2404 list_empty(&ctx->iopoll_list))
e9979b36 2405 break;
500f9fba 2406 }
3c30ef0f 2407 ret = io_do_iopoll(ctx, &nr_events, min, true);
f39c8a5b
PB
2408 } while (!ret && nr_events < min && !need_resched());
2409out:
500f9fba 2410 mutex_unlock(&ctx->uring_lock);
def596e9
JA
2411 return ret;
2412}
2413
491381ce 2414static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 2415{
491381ce
JA
2416 /*
2417 * Tell lockdep we inherited freeze protection from submission
2418 * thread.
2419 */
2420 if (req->flags & REQ_F_ISREG) {
1c98679d 2421 struct super_block *sb = file_inode(req->file)->i_sb;
2b188cc1 2422
1c98679d
PB
2423 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
2424 sb_end_write(sb);
2b188cc1
JA
2425 }
2426}
2427
b63534c4 2428#ifdef CONFIG_BLOCK
dc2a6e9a 2429static bool io_resubmit_prep(struct io_kiocb *req)
b63534c4 2430{
ab454438 2431 struct io_async_rw *rw = req->async_data;
b63534c4 2432
ab454438
PB
2433 if (!rw)
2434 return !io_req_prep_async(req);
2435 /* may have left rw->iter inconsistent on -EIOCBQUEUED */
2436 iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
2437 return true;
b63534c4 2438}
b63534c4 2439
3e6a0d3c 2440static bool io_rw_should_reissue(struct io_kiocb *req)
b63534c4 2441{
355afaeb 2442 umode_t mode = file_inode(req->file)->i_mode;
3e6a0d3c 2443 struct io_ring_ctx *ctx = req->ctx;
b63534c4 2444
355afaeb
JA
2445 if (!S_ISBLK(mode) && !S_ISREG(mode))
2446 return false;
3e6a0d3c
JA
2447 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2448 !(ctx->flags & IORING_SETUP_IOPOLL)))
b63534c4 2449 return false;
7c977a58
JA
2450 /*
2451 * If ref is dying, we might be running poll reap from the exit work.
2452 * Don't attempt to reissue from that path, just let it fail with
2453 * -EAGAIN.
2454 */
3e6a0d3c
JA
2455 if (percpu_ref_is_dying(&ctx->refs))
2456 return false;
ef046888
JA
2457 /*
2458 * Play it safe and assume not safe to re-import and reissue if we're
2459 * not in the original thread group (or in task context).
2460 */
2461 if (!same_thread_group(req->task, current) || !in_task())
2462 return false;
3e6a0d3c
JA
2463 return true;
2464}
e82ad485 2465#else
a1ff1e3f 2466static bool io_resubmit_prep(struct io_kiocb *req)
e82ad485
JA
2467{
2468 return false;
2469}
e82ad485 2470static bool io_rw_should_reissue(struct io_kiocb *req)
3e6a0d3c 2471{
b63534c4
JA
2472 return false;
2473}
3e6a0d3c 2474#endif
b63534c4 2475
9011bf9a
PB
2476static void io_fallback_req_func(struct work_struct *work)
2477{
2478 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
2479 fallback_work.work);
2480 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
2481 struct io_kiocb *req, *tmp;
2482
9cb0073b 2483 percpu_ref_get(&ctx->refs);
5b0a6acc
PB
2484 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
2485 req->io_task_work.func(req);
9cb0073b 2486 percpu_ref_put(&ctx->refs);
9011bf9a
PB
2487}
2488
a1d7c393 2489static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
889fca73 2490 unsigned int issue_flags)
a1d7c393 2491{
2f8e45f1
PB
2492 int cflags = 0;
2493
b65c128f
PB
2494 if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2495 kiocb_end_write(req);
9532b99b
PB
2496 if (res != req->result) {
2497 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
2498 io_rw_should_reissue(req)) {
2499 req->flags |= REQ_F_REISSUE;
2500 return;
2501 }
93d2bcd2 2502 req_set_fail(req);
9532b99b 2503 }
2f8e45f1
PB
2504 if (req->flags & REQ_F_BUFFER_SELECTED)
2505 cflags = io_put_rw_kbuf(req);
2506 __io_req_complete(req, issue_flags, res, cflags);
ba816ad6
JA
2507}
2508
2509static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2510{
9adbd45d 2511 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6 2512
889fca73 2513 __io_complete_rw(req, res, res2, 0);
2b188cc1
JA
2514}
2515
def596e9
JA
2516static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2517{
9adbd45d 2518 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 2519
491381ce
JA
2520 if (kiocb->ki_flags & IOCB_WRITE)
2521 kiocb_end_write(req);
9532b99b 2522 if (unlikely(res != req->result)) {
a1ff1e3f
JA
2523 if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
2524 io_resubmit_prep(req))) {
93d2bcd2 2525 req_set_fail(req);
9532b99b
PB
2526 req->flags |= REQ_F_DONT_REISSUE;
2527 }
8c130827 2528 }
bbde017a
XW
2529
2530 WRITE_ONCE(req->result, res);
b9b0e0d3 2531 /* order with io_iopoll_complete() checking ->result */
cd664b0e
PB
2532 smp_wmb();
2533 WRITE_ONCE(req->iopoll_completed, 1);
def596e9
JA
2534}
2535
2536/*
2537 * After the iocb has been issued, it's safe to be found on the poll list.
2538 * Adding the kiocb to the list AFTER submission ensures that we don't
f39c8a5b 2539 * find it from a io_do_iopoll() thread before the issuer is done
def596e9
JA
2540 * accessing the kiocb cookie.
2541 */
cb3d8972 2542static void io_iopoll_req_issued(struct io_kiocb *req)
def596e9
JA
2543{
2544 struct io_ring_ctx *ctx = req->ctx;
cb3d8972
PB
2545 const bool in_async = io_wq_current_is_worker();
2546
2547 /* workqueue context doesn't hold uring_lock, grab it now */
2548 if (unlikely(in_async))
2549 mutex_lock(&ctx->uring_lock);
def596e9
JA
2550
2551 /*
2552 * Track whether we have multiple files in our lists. This will impact
2553 * how we do polling eventually, not spinning if we're on potentially
2554 * different devices.
2555 */
540e32a0 2556 if (list_empty(&ctx->iopoll_list)) {
915b3dde
HX
2557 ctx->poll_multi_queue = false;
2558 } else if (!ctx->poll_multi_queue) {
def596e9 2559 struct io_kiocb *list_req;
915b3dde 2560 unsigned int queue_num0, queue_num1;
def596e9 2561
540e32a0 2562 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
d21ffe7e 2563 inflight_entry);
915b3dde
HX
2564
2565 if (list_req->file != req->file) {
2566 ctx->poll_multi_queue = true;
2567 } else {
2568 queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
2569 queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
2570 if (queue_num0 != queue_num1)
2571 ctx->poll_multi_queue = true;
2572 }
def596e9
JA
2573 }
2574
2575 /*
2576 * For fast devices, IO may have already completed. If it has, add
2577 * it to the front so we find it first.
2578 */
65a6543d 2579 if (READ_ONCE(req->iopoll_completed))
d21ffe7e 2580 list_add(&req->inflight_entry, &ctx->iopoll_list);
def596e9 2581 else
d21ffe7e 2582 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
bdcd3eab 2583
cb3d8972
PB
2584 if (unlikely(in_async)) {
2585 /*
2586 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
2587 * in sq thread task context or in io worker task context. If
2588 * current task context is sq thread, we don't need to check
2589 * whether should wake up sq thread.
2590 */
2591 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2592 wq_has_sleeper(&ctx->sq_data->wait))
2593 wake_up(&ctx->sq_data->wait);
2594
2595 mutex_unlock(&ctx->uring_lock);
2596 }
def596e9
JA
2597}
2598
9f13c35b
PB
2599static inline void io_state_file_put(struct io_submit_state *state)
2600{
02b23a9a
PB
2601 if (state->file_refs) {
2602 fput_many(state->file, state->file_refs);
2603 state->file_refs = 0;
2604 }
9a56a232
JA
2605}
2606
2607/*
2608 * Get as many references to a file as we have IOs left in this submission,
2609 * assuming most submissions are for one file, or at least that each file
2610 * has more than one submission.
2611 */
8da11c19 2612static struct file *__io_file_get(struct io_submit_state *state, int fd)
9a56a232
JA
2613{
2614 if (!state)
2615 return fget(fd);
2616
6e1271e6 2617 if (state->file_refs) {
9a56a232 2618 if (state->fd == fd) {
6e1271e6 2619 state->file_refs--;
9a56a232
JA
2620 return state->file;
2621 }
02b23a9a 2622 io_state_file_put(state);
9a56a232
JA
2623 }
2624 state->file = fget_many(fd, state->ios_left);
6e1271e6 2625 if (unlikely(!state->file))
9a56a232
JA
2626 return NULL;
2627
2628 state->fd = fd;
6e1271e6 2629 state->file_refs = state->ios_left - 1;
9a56a232
JA
2630 return state->file;
2631}
2632
4503b767
JA
2633static bool io_bdev_nowait(struct block_device *bdev)
2634{
9ba0d0c8 2635 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
4503b767
JA
2636}
2637
2b188cc1
JA
2638/*
2639 * If we tracked the file through the SCM inflight mechanism, we could support
2640 * any file. For now, just ensure that anything potentially problematic is done
2641 * inline.
2642 */
b191e2df 2643static bool __io_file_supports_nowait(struct file *file, int rw)
2b188cc1
JA
2644{
2645 umode_t mode = file_inode(file)->i_mode;
2646
4503b767 2647 if (S_ISBLK(mode)) {
4e7b5671
CH
2648 if (IS_ENABLED(CONFIG_BLOCK) &&
2649 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
4503b767
JA
2650 return true;
2651 return false;
2652 }
976517f1 2653 if (S_ISSOCK(mode))
2b188cc1 2654 return true;
4503b767 2655 if (S_ISREG(mode)) {
4e7b5671
CH
2656 if (IS_ENABLED(CONFIG_BLOCK) &&
2657 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
4503b767
JA
2658 file->f_op != &io_uring_fops)
2659 return true;
2660 return false;
2661 }
2b188cc1 2662
c5b85625
JA
2663 /* any ->read/write should understand O_NONBLOCK */
2664 if (file->f_flags & O_NONBLOCK)
2665 return true;
2666
af197f50
JA
2667 if (!(file->f_mode & FMODE_NOWAIT))
2668 return false;
2669
2670 if (rw == READ)
2671 return file->f_op->read_iter != NULL;
2672
2673 return file->f_op->write_iter != NULL;
2b188cc1
JA
2674}
2675
b191e2df 2676static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
7b29f92d 2677{
b191e2df 2678 if (rw == READ && (req->flags & REQ_F_NOWAIT_READ))
7b29f92d 2679 return true;
b191e2df 2680 else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE))
7b29f92d
JA
2681 return true;
2682
b191e2df 2683 return __io_file_supports_nowait(req->file, rw);
7b29f92d
JA
2684}
2685
a88fc400 2686static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2b188cc1 2687{
def596e9 2688 struct io_ring_ctx *ctx = req->ctx;
9adbd45d 2689 struct kiocb *kiocb = &req->rw.kiocb;
75c668cd 2690 struct file *file = req->file;
09bb8394
JA
2691 unsigned ioprio;
2692 int ret;
2b188cc1 2693
c97d8a0f 2694 if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode))
491381ce
JA
2695 req->flags |= REQ_F_ISREG;
2696
2b188cc1 2697 kiocb->ki_pos = READ_ONCE(sqe->off);
75c668cd 2698 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
ba04291e 2699 req->flags |= REQ_F_CUR_POS;
75c668cd 2700 kiocb->ki_pos = file->f_pos;
ba04291e 2701 }
2b188cc1 2702 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
3e577dcd
PB
2703 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2704 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2705 if (unlikely(ret))
2706 return ret;
2b188cc1 2707
75c668cd
PB
2708 /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
2709 if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
2710 req->flags |= REQ_F_NOWAIT;
2711
2b188cc1
JA
2712 ioprio = READ_ONCE(sqe->ioprio);
2713 if (ioprio) {
2714 ret = ioprio_check_cap(ioprio);
2715 if (ret)
09bb8394 2716 return ret;
2b188cc1
JA
2717
2718 kiocb->ki_ioprio = ioprio;
2719 } else
2720 kiocb->ki_ioprio = get_current_ioprio();
2721
def596e9 2722 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
2723 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2724 !kiocb->ki_filp->f_op->iopoll)
09bb8394 2725 return -EOPNOTSUPP;
2b188cc1 2726
def596e9
JA
2727 kiocb->ki_flags |= IOCB_HIPRI;
2728 kiocb->ki_complete = io_complete_rw_iopoll;
65a6543d 2729 req->iopoll_completed = 0;
def596e9 2730 } else {
09bb8394
JA
2731 if (kiocb->ki_flags & IOCB_HIPRI)
2732 return -EINVAL;
def596e9
JA
2733 kiocb->ki_complete = io_complete_rw;
2734 }
9adbd45d 2735
eae071c9
PB
2736 if (req->opcode == IORING_OP_READ_FIXED ||
2737 req->opcode == IORING_OP_WRITE_FIXED) {
2738 req->imu = NULL;
2739 io_req_set_rsrc_node(req);
2740 }
2741
3529d8c2
JA
2742 req->rw.addr = READ_ONCE(sqe->addr);
2743 req->rw.len = READ_ONCE(sqe->len);
4f4eeba8 2744 req->buf_index = READ_ONCE(sqe->buf_index);
2b188cc1 2745 return 0;
2b188cc1
JA
2746}
2747
2748static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2749{
2750 switch (ret) {
2751 case -EIOCBQUEUED:
2752 break;
2753 case -ERESTARTSYS:
2754 case -ERESTARTNOINTR:
2755 case -ERESTARTNOHAND:
2756 case -ERESTART_RESTARTBLOCK:
2757 /*
2758 * We can't just restart the syscall, since previously
2759 * submitted sqes may already be in progress. Just fail this
2760 * IO with EINTR.
2761 */
2762 ret = -EINTR;
df561f66 2763 fallthrough;
2b188cc1
JA
2764 default:
2765 kiocb->ki_complete(kiocb, ret, 0);
2766 }
2767}
2768
a1d7c393 2769static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
889fca73 2770 unsigned int issue_flags)
ba816ad6 2771{
ba04291e 2772 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
e8c2bc1f 2773 struct io_async_rw *io = req->async_data;
97284637 2774 bool check_reissue = kiocb->ki_complete == io_complete_rw;
ba04291e 2775
227c0c96 2776 /* add previously done IO, if any */
e8c2bc1f 2777 if (io && io->bytes_done > 0) {
227c0c96 2778 if (ret < 0)
e8c2bc1f 2779 ret = io->bytes_done;
227c0c96 2780 else
e8c2bc1f 2781 ret += io->bytes_done;
227c0c96
JA
2782 }
2783
ba04291e
JA
2784 if (req->flags & REQ_F_CUR_POS)
2785 req->file->f_pos = kiocb->ki_pos;
e149bd74 2786 if (ret >= 0 && check_reissue)
889fca73 2787 __io_complete_rw(req, ret, 0, issue_flags);
ba816ad6
JA
2788 else
2789 io_rw_done(kiocb, ret);
97284637 2790
fe7e3257 2791 if (check_reissue && (req->flags & REQ_F_REISSUE)) {
97284637 2792 req->flags &= ~REQ_F_REISSUE;
a7be7c23 2793 if (io_resubmit_prep(req)) {
8c130827 2794 req_ref_get(req);
773af691 2795 io_req_task_queue_reissue(req);
8c130827 2796 } else {
97284637
PB
2797 int cflags = 0;
2798
93d2bcd2 2799 req_set_fail(req);
97284637
PB
2800 if (req->flags & REQ_F_BUFFER_SELECTED)
2801 cflags = io_put_rw_kbuf(req);
2802 __io_req_complete(req, issue_flags, ret, cflags);
2803 }
2804 }
ba816ad6
JA
2805}
2806
eae071c9
PB
2807static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
2808 struct io_mapped_ubuf *imu)
edafccee 2809{
9adbd45d 2810 size_t len = req->rw.len;
75769e3f 2811 u64 buf_end, buf_addr = req->rw.addr;
edafccee 2812 size_t offset;
edafccee 2813
75769e3f 2814 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
edafccee
JA
2815 return -EFAULT;
2816 /* not inside the mapped region */
4751f53d 2817 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
edafccee
JA
2818 return -EFAULT;
2819
2820 /*
2821 * May not be a start of buffer, set size appropriately
2822 * and advance us to the beginning.
2823 */
2824 offset = buf_addr - imu->ubuf;
2825 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
2826
2827 if (offset) {
2828 /*
2829 * Don't use iov_iter_advance() here, as it's really slow for
2830 * using the latter parts of a big fixed buffer - it iterates
2831 * over each segment manually. We can cheat a bit here, because
2832 * we know that:
2833 *
2834 * 1) it's a BVEC iter, we set it up
2835 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2836 * first and last bvec
2837 *
2838 * So just find our index, and adjust the iterator afterwards.
2839 * If the offset is within the first bvec (or the whole first
2840 * bvec, just use iov_iter_advance(). This makes it easier
2841 * since we can just skip the first segment, which may not
2842 * be PAGE_SIZE aligned.
2843 */
2844 const struct bio_vec *bvec = imu->bvec;
2845
2846 if (offset <= bvec->bv_len) {
2847 iov_iter_advance(iter, offset);
2848 } else {
2849 unsigned long seg_skip;
2850
2851 /* skip first vec */
2852 offset -= bvec->bv_len;
2853 seg_skip = 1 + (offset >> PAGE_SHIFT);
2854
2855 iter->bvec = bvec + seg_skip;
2856 iter->nr_segs -= seg_skip;
99c79f66 2857 iter->count -= bvec->bv_len + offset;
bd11b3a3 2858 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
2859 }
2860 }
2861
847595de 2862 return 0;
edafccee
JA
2863}
2864
eae071c9
PB
2865static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
2866{
2867 struct io_ring_ctx *ctx = req->ctx;
2868 struct io_mapped_ubuf *imu = req->imu;
2869 u16 index, buf_index = req->buf_index;
2870
2871 if (likely(!imu)) {
2872 if (unlikely(buf_index >= ctx->nr_user_bufs))
2873 return -EFAULT;
2874 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2875 imu = READ_ONCE(ctx->user_bufs[index]);
2876 req->imu = imu;
2877 }
2878 return __io_import_fixed(req, rw, iter, imu);
2879}
2880
bcda7baa
JA
2881static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2882{
2883 if (needs_lock)
2884 mutex_unlock(&ctx->uring_lock);
2885}
2886
2887static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2888{
2889 /*
2890 * "Normal" inline submissions always hold the uring_lock, since we
2891 * grab it from the system call. Same is true for the SQPOLL offload.
2892 * The only exception is when we've detached the request and issue it
2893 * from an async worker thread, grab the lock for that case.
2894 */
2895 if (needs_lock)
2896 mutex_lock(&ctx->uring_lock);
2897}
2898
2899static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2900 int bgid, struct io_buffer *kbuf,
2901 bool needs_lock)
2902{
2903 struct io_buffer *head;
2904
2905 if (req->flags & REQ_F_BUFFER_SELECTED)
2906 return kbuf;
2907
2908 io_ring_submit_lock(req->ctx, needs_lock);
2909
2910 lockdep_assert_held(&req->ctx->uring_lock);
2911
9e15c3a0 2912 head = xa_load(&req->ctx->io_buffers, bgid);
bcda7baa
JA
2913 if (head) {
2914 if (!list_empty(&head->list)) {
2915 kbuf = list_last_entry(&head->list, struct io_buffer,
2916 list);
2917 list_del(&kbuf->list);
2918 } else {
2919 kbuf = head;
9e15c3a0 2920 xa_erase(&req->ctx->io_buffers, bgid);
bcda7baa
JA
2921 }
2922 if (*len > kbuf->len)
2923 *len = kbuf->len;
2924 } else {
2925 kbuf = ERR_PTR(-ENOBUFS);
2926 }
2927
2928 io_ring_submit_unlock(req->ctx, needs_lock);
2929
2930 return kbuf;
2931}
2932
4d954c25
JA
2933static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2934 bool needs_lock)
2935{
2936 struct io_buffer *kbuf;
4f4eeba8 2937 u16 bgid;
4d954c25
JA
2938
2939 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
4f4eeba8 2940 bgid = req->buf_index;
4d954c25
JA
2941 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2942 if (IS_ERR(kbuf))
2943 return kbuf;
2944 req->rw.addr = (u64) (unsigned long) kbuf;
2945 req->flags |= REQ_F_BUFFER_SELECTED;
2946 return u64_to_user_ptr(kbuf->addr);
2947}
2948
2949#ifdef CONFIG_COMPAT
2950static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2951 bool needs_lock)
2952{
2953 struct compat_iovec __user *uiov;
2954 compat_ssize_t clen;
2955 void __user *buf;
2956 ssize_t len;
2957
2958 uiov = u64_to_user_ptr(req->rw.addr);
2959 if (!access_ok(uiov, sizeof(*uiov)))
2960 return -EFAULT;
2961 if (__get_user(clen, &uiov->iov_len))
2962 return -EFAULT;
2963 if (clen < 0)
2964 return -EINVAL;
2965
2966 len = clen;
2967 buf = io_rw_buffer_select(req, &len, needs_lock);
2968 if (IS_ERR(buf))
2969 return PTR_ERR(buf);
2970 iov[0].iov_base = buf;
2971 iov[0].iov_len = (compat_size_t) len;
2972 return 0;
2973}
2974#endif
2975
2976static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2977 bool needs_lock)
2978{
2979 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2980 void __user *buf;
2981 ssize_t len;
2982
2983 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2984 return -EFAULT;
2985
2986 len = iov[0].iov_len;
2987 if (len < 0)
2988 return -EINVAL;
2989 buf = io_rw_buffer_select(req, &len, needs_lock);
2990 if (IS_ERR(buf))
2991 return PTR_ERR(buf);
2992 iov[0].iov_base = buf;
2993 iov[0].iov_len = len;
2994 return 0;
2995}
2996
2997static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2998 bool needs_lock)
2999{
dddb3e26
JA
3000 if (req->flags & REQ_F_BUFFER_SELECTED) {
3001 struct io_buffer *kbuf;
3002
3003 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3004 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3005 iov[0].iov_len = kbuf->len;
4d954c25 3006 return 0;
dddb3e26 3007 }
dd201662 3008 if (req->rw.len != 1)
4d954c25
JA
3009 return -EINVAL;
3010
3011#ifdef CONFIG_COMPAT
3012 if (req->ctx->compat)
3013 return io_compat_import(req, iov, needs_lock);
3014#endif
3015
3016 return __io_iov_buffer_select(req, iov, needs_lock);
3017}
3018
847595de
PB
3019static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3020 struct iov_iter *iter, bool needs_lock)
2b188cc1 3021{
9adbd45d
JA
3022 void __user *buf = u64_to_user_ptr(req->rw.addr);
3023 size_t sqe_len = req->rw.len;
847595de 3024 u8 opcode = req->opcode;
4d954c25 3025 ssize_t ret;
edafccee 3026
7d009165 3027 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
edafccee 3028 *iovec = NULL;
9adbd45d 3029 return io_import_fixed(req, rw, iter);
edafccee 3030 }
2b188cc1 3031
bcda7baa 3032 /* buffer index only valid with fixed read/write, or buffer select */
4f4eeba8 3033 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
9adbd45d
JA
3034 return -EINVAL;
3035
3a6820f2 3036 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
bcda7baa 3037 if (req->flags & REQ_F_BUFFER_SELECT) {
4d954c25 3038 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
867a23ea 3039 if (IS_ERR(buf))
4d954c25 3040 return PTR_ERR(buf);
3f9d6441 3041 req->rw.len = sqe_len;
bcda7baa
JA
3042 }
3043
3a6820f2
JA
3044 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3045 *iovec = NULL;
10fc72e4 3046 return ret;
3a6820f2
JA
3047 }
3048
4d954c25
JA
3049 if (req->flags & REQ_F_BUFFER_SELECT) {
3050 ret = io_iov_buffer_select(req, *iovec, needs_lock);
847595de
PB
3051 if (!ret)
3052 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
4d954c25
JA
3053 *iovec = NULL;
3054 return ret;
3055 }
3056
89cd35c5
CH
3057 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3058 req->ctx->compat);
2b188cc1
JA
3059}
3060
0fef9483
JA
3061static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3062{
5b09e37e 3063 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
0fef9483
JA
3064}
3065
31b51510 3066/*
32960613
JA
3067 * For files that don't have ->read_iter() and ->write_iter(), handle them
3068 * by looping over ->read() or ->write() manually.
31b51510 3069 */
4017eb91 3070static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
32960613 3071{
4017eb91
JA
3072 struct kiocb *kiocb = &req->rw.kiocb;
3073 struct file *file = req->file;
32960613
JA
3074 ssize_t ret = 0;
3075
3076 /*
3077 * Don't support polled IO through this interface, and we can't
3078 * support non-blocking either. For the latter, this just causes
3079 * the kiocb to be handled from an async context.
3080 */
3081 if (kiocb->ki_flags & IOCB_HIPRI)
3082 return -EOPNOTSUPP;
3083 if (kiocb->ki_flags & IOCB_NOWAIT)
3084 return -EAGAIN;
3085
3086 while (iov_iter_count(iter)) {
311ae9e1 3087 struct iovec iovec;
32960613
JA
3088 ssize_t nr;
3089
311ae9e1
PB
3090 if (!iov_iter_is_bvec(iter)) {
3091 iovec = iov_iter_iovec(iter);
3092 } else {
4017eb91
JA
3093 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3094 iovec.iov_len = req->rw.len;
311ae9e1
PB
3095 }
3096
32960613
JA
3097 if (rw == READ) {
3098 nr = file->f_op->read(file, iovec.iov_base,
0fef9483 3099 iovec.iov_len, io_kiocb_ppos(kiocb));
32960613
JA
3100 } else {
3101 nr = file->f_op->write(file, iovec.iov_base,
0fef9483 3102 iovec.iov_len, io_kiocb_ppos(kiocb));
32960613
JA
3103 }
3104
3105 if (nr < 0) {
3106 if (!ret)
3107 ret = nr;
3108 break;
3109 }
3110 ret += nr;
3111 if (nr != iovec.iov_len)
3112 break;
4017eb91
JA
3113 req->rw.len -= nr;
3114 req->rw.addr += nr;
32960613
JA
3115 iov_iter_advance(iter, nr);
3116 }
3117
3118 return ret;
3119}
3120
ff6165b2
JA
3121static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3122 const struct iovec *fast_iov, struct iov_iter *iter)
f67676d1 3123{
e8c2bc1f 3124 struct io_async_rw *rw = req->async_data;
b64e3444 3125
ff6165b2 3126 memcpy(&rw->iter, iter, sizeof(*iter));
afb87658 3127 rw->free_iovec = iovec;
227c0c96 3128 rw->bytes_done = 0;
ff6165b2 3129 /* can only be fixed buffers, no need to do anything */
9c3a205c 3130 if (iov_iter_is_bvec(iter))
ff6165b2 3131 return;
b64e3444 3132 if (!iovec) {
ff6165b2
JA
3133 unsigned iov_off = 0;
3134
3135 rw->iter.iov = rw->fast_iov;
3136 if (iter->iov != fast_iov) {
3137 iov_off = iter->iov - fast_iov;
3138 rw->iter.iov += iov_off;
3139 }
3140 if (rw->fast_iov != fast_iov)
3141 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
45097dae 3142 sizeof(struct iovec) * iter->nr_segs);
99bc4c38
PB
3143 } else {
3144 req->flags |= REQ_F_NEED_CLEANUP;
f67676d1
JA
3145 }
3146}
3147
6cb78689 3148static inline int io_alloc_async_data(struct io_kiocb *req)
3d9932a8 3149{
e8c2bc1f
JA
3150 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3151 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3152 return req->async_data == NULL;
3d9932a8
XW
3153}
3154
ff6165b2
JA
3155static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3156 const struct iovec *fast_iov,
227c0c96 3157 struct iov_iter *iter, bool force)
b7bb4f7d 3158{
26f0505a 3159 if (!force && !io_op_defs[req->opcode].needs_async_setup)
74566df3 3160 return 0;
e8c2bc1f 3161 if (!req->async_data) {
6cb78689 3162 if (io_alloc_async_data(req)) {
6bf985dc 3163 kfree(iovec);
5d204bcf 3164 return -ENOMEM;
6bf985dc 3165 }
b7bb4f7d 3166
ff6165b2 3167 io_req_map_rw(req, iovec, fast_iov, iter);
5d204bcf 3168 }
b7bb4f7d 3169 return 0;
f67676d1
JA
3170}
3171
73debe68 3172static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
c3e330a4 3173{
e8c2bc1f 3174 struct io_async_rw *iorw = req->async_data;
f4bff104 3175 struct iovec *iov = iorw->fast_iov;
847595de 3176 int ret;
c3e330a4 3177
2846c481 3178 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
c3e330a4
PB
3179 if (unlikely(ret < 0))
3180 return ret;
3181
ab0b196c
PB
3182 iorw->bytes_done = 0;
3183 iorw->free_iovec = iov;
3184 if (iov)
3185 req->flags |= REQ_F_NEED_CLEANUP;
c3e330a4
PB
3186 return 0;
3187}
3188
73debe68 3189static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1 3190{
3529d8c2
JA
3191 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3192 return -EBADF;
93642ef8 3193 return io_prep_rw(req, sqe);
f67676d1
JA
3194}
3195
c1dd91d1
JA
3196/*
3197 * This is our waitqueue callback handler, registered through lock_page_async()
3198 * when we initially tried to do the IO with the iocb armed our waitqueue.
3199 * This gets called when the page is unlocked, and we generally expect that to
3200 * happen when the page IO is completed and the page is now uptodate. This will
3201 * queue a task_work based retry of the operation, attempting to copy the data
3202 * again. If the latter fails because the page was NOT uptodate, then we will
3203 * do a thread based blocking retry of the operation. That's the unexpected
3204 * slow path.
3205 */
bcf5a063
JA
3206static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3207 int sync, void *arg)
3208{
3209 struct wait_page_queue *wpq;
3210 struct io_kiocb *req = wait->private;
bcf5a063 3211 struct wait_page_key *key = arg;
bcf5a063
JA
3212
3213 wpq = container_of(wait, struct wait_page_queue, wait);
3214
cdc8fcb4
LT
3215 if (!wake_page_match(wpq, key))
3216 return 0;
3217
c8d317aa 3218 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
bcf5a063
JA
3219 list_del_init(&wait->entry);
3220
bcf5a063 3221 /* submit ref gets dropped, acquire a new one */
de9b4cca 3222 req_ref_get(req);
921b9054 3223 io_req_task_queue(req);
bcf5a063
JA
3224 return 1;
3225}
3226
c1dd91d1
JA
3227/*
3228 * This controls whether a given IO request should be armed for async page
3229 * based retry. If we return false here, the request is handed to the async
3230 * worker threads for retry. If we're doing buffered reads on a regular file,
3231 * we prepare a private wait_page_queue entry and retry the operation. This
3232 * will either succeed because the page is now uptodate and unlocked, or it
3233 * will register a callback when the page is unlocked at IO completion. Through
3234 * that callback, io_uring uses task_work to setup a retry of the operation.
3235 * That retry will attempt the buffered read again. The retry will generally
3236 * succeed, or in rare cases where it fails, we then fall back to using the
3237 * async worker threads for a blocking retry.
3238 */
227c0c96 3239static bool io_rw_should_retry(struct io_kiocb *req)
f67676d1 3240{
e8c2bc1f
JA
3241 struct io_async_rw *rw = req->async_data;
3242 struct wait_page_queue *wait = &rw->wpq;
bcf5a063 3243 struct kiocb *kiocb = &req->rw.kiocb;
f67676d1 3244
bcf5a063
JA
3245 /* never retry for NOWAIT, we just complete with -EAGAIN */
3246 if (req->flags & REQ_F_NOWAIT)
3247 return false;
f67676d1 3248
227c0c96 3249 /* Only for buffered IO */
3b2a4439 3250 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
bcf5a063 3251 return false;
3b2a4439 3252
bcf5a063
JA
3253 /*
3254 * just use poll if we can, and don't attempt if the fs doesn't
3255 * support callback based unlocks
3256 */
3257 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3258 return false;
f67676d1 3259
3b2a4439
JA
3260 wait->wait.func = io_async_buf_func;
3261 wait->wait.private = req;
3262 wait->wait.flags = 0;
3263 INIT_LIST_HEAD(&wait->wait.entry);
3264 kiocb->ki_flags |= IOCB_WAITQ;
c8d317aa 3265 kiocb->ki_flags &= ~IOCB_NOWAIT;
3b2a4439 3266 kiocb->ki_waitq = wait;
3b2a4439 3267 return true;
bcf5a063
JA
3268}
3269
aeab9506 3270static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
bcf5a063
JA
3271{
3272 if (req->file->f_op->read_iter)
3273 return call_read_iter(req->file, &req->rw.kiocb, iter);
2dd2111d 3274 else if (req->file->f_op->read)
4017eb91 3275 return loop_rw_iter(READ, req, iter);
2dd2111d
GH
3276 else
3277 return -EINVAL;
f67676d1
JA
3278}
3279
889fca73 3280static int io_read(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1
JA
3281{
3282 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 3283 struct kiocb *kiocb = &req->rw.kiocb;
ff6165b2 3284 struct iov_iter __iter, *iter = &__iter;
e8c2bc1f 3285 struct io_async_rw *rw = req->async_data;
227c0c96 3286 ssize_t io_size, ret, ret2;
45d189c6 3287 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ff6165b2 3288
2846c481 3289 if (rw) {
e8c2bc1f 3290 iter = &rw->iter;
2846c481
PB
3291 iovec = NULL;
3292 } else {
3293 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3294 if (ret < 0)
3295 return ret;
3296 }
632546c4 3297 io_size = iov_iter_count(iter);
fa15bafb 3298 req->result = io_size;
2b188cc1 3299
fd6c2e4c
JA
3300 /* Ensure we clear previously set non-block flag */
3301 if (!force_nonblock)
29de5f6a 3302 kiocb->ki_flags &= ~IOCB_NOWAIT;
a88fc400
PB
3303 else
3304 kiocb->ki_flags |= IOCB_NOWAIT;
3305
24c74678 3306 /* If the file doesn't support async, just async punt */
b191e2df 3307 if (force_nonblock && !io_file_supports_nowait(req, READ)) {
6713e7a6 3308 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
6bf985dc 3309 return ret ?: -EAGAIN;
6713e7a6 3310 }
9e645e11 3311
632546c4 3312 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
5ea5dd45
PB
3313 if (unlikely(ret)) {
3314 kfree(iovec);
3315 return ret;
3316 }
2b188cc1 3317
227c0c96 3318 ret = io_iter_do_read(req, iter);
32960613 3319
230d50d4 3320 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
6ad7f233 3321 req->flags &= ~REQ_F_REISSUE;
eefdf30f
JA
3322 /* IOPOLL retry should happen for io-wq threads */
3323 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
f91daf56 3324 goto done;
75c668cd
PB
3325 /* no retry on NONBLOCK nor RWF_NOWAIT */
3326 if (req->flags & REQ_F_NOWAIT)
355afaeb 3327 goto done;
84216315 3328 /* some cases will consume bytes even on error returns */
632546c4 3329 iov_iter_revert(iter, io_size - iov_iter_count(iter));
f38c7e3a 3330 ret = 0;
230d50d4
JA
3331 } else if (ret == -EIOCBQUEUED) {
3332 goto out_free;
7335e3bf 3333 } else if (ret <= 0 || ret == io_size || !force_nonblock ||
75c668cd 3334 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
7335e3bf 3335 /* read all, failed, already did sync or don't want to retry */
00d23d51 3336 goto done;
227c0c96
JA
3337 }
3338
227c0c96 3339 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
6bf985dc
PB
3340 if (ret2)
3341 return ret2;
3342
fe1cdd55 3343 iovec = NULL;
e8c2bc1f 3344 rw = req->async_data;
227c0c96 3345 /* now use our persistent iterator, if we aren't already */
e8c2bc1f 3346 iter = &rw->iter;
227c0c96 3347
b23df91b
PB
3348 do {
3349 io_size -= ret;
3350 rw->bytes_done += ret;
3351 /* if we can retry, do so with the callbacks armed */
3352 if (!io_rw_should_retry(req)) {
3353 kiocb->ki_flags &= ~IOCB_WAITQ;
3354 return -EAGAIN;
3355 }
3356
3357 /*
3358 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3359 * we get -EIOCBQUEUED, then we'll get a notification when the
3360 * desired page gets unlocked. We can also get a partial read
3361 * here, and if we do, then just retry at the new offset.
3362 */
3363 ret = io_iter_do_read(req, iter);
3364 if (ret == -EIOCBQUEUED)
3365 return 0;
227c0c96 3366 /* we got some bytes, but not all. retry. */
b5b0ecb7 3367 kiocb->ki_flags &= ~IOCB_WAITQ;
b23df91b 3368 } while (ret > 0 && ret < io_size);
227c0c96 3369done:
889fca73 3370 kiocb_done(kiocb, ret, issue_flags);
fe1cdd55
PB
3371out_free:
3372 /* it's faster to check here then delegate to kfree */
3373 if (iovec)
3374 kfree(iovec);
5ea5dd45 3375 return 0;
2b188cc1
JA
3376}
3377
73debe68 3378static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1 3379{
3529d8c2
JA
3380 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3381 return -EBADF;
93642ef8 3382 return io_prep_rw(req, sqe);
f67676d1
JA
3383}
3384
889fca73 3385static int io_write(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1
JA
3386{
3387 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 3388 struct kiocb *kiocb = &req->rw.kiocb;
ff6165b2 3389 struct iov_iter __iter, *iter = &__iter;
e8c2bc1f 3390 struct io_async_rw *rw = req->async_data;
fa15bafb 3391 ssize_t ret, ret2, io_size;
45d189c6 3392 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
2b188cc1 3393
2846c481 3394 if (rw) {
e8c2bc1f 3395 iter = &rw->iter;
2846c481
PB
3396 iovec = NULL;
3397 } else {
3398 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3399 if (ret < 0)
3400 return ret;
3401 }
632546c4 3402 io_size = iov_iter_count(iter);
fa15bafb 3403 req->result = io_size;
2b188cc1 3404
fd6c2e4c
JA
3405 /* Ensure we clear previously set non-block flag */
3406 if (!force_nonblock)
a88fc400
PB
3407 kiocb->ki_flags &= ~IOCB_NOWAIT;
3408 else
3409 kiocb->ki_flags |= IOCB_NOWAIT;
fd6c2e4c 3410
24c74678 3411 /* If the file doesn't support async, just async punt */
b191e2df 3412 if (force_nonblock && !io_file_supports_nowait(req, WRITE))
f67676d1 3413 goto copy_iov;
31b51510 3414
10d59345
JA
3415 /* file path doesn't support NOWAIT for non-direct_IO */
3416 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3417 (req->flags & REQ_F_ISREG))
f67676d1 3418 goto copy_iov;
31b51510 3419
632546c4 3420 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
fa15bafb
PB
3421 if (unlikely(ret))
3422 goto out_free;
4ed734b0 3423
fa15bafb
PB
3424 /*
3425 * Open-code file_start_write here to grab freeze protection,
3426 * which will be released by another thread in
3427 * io_complete_rw(). Fool lockdep by telling it the lock got
3428 * released so that it doesn't complain about the held lock when
3429 * we return to userspace.
3430 */
3431 if (req->flags & REQ_F_ISREG) {
8a3c84b6 3432 sb_start_write(file_inode(req->file)->i_sb);
fa15bafb
PB
3433 __sb_writers_release(file_inode(req->file)->i_sb,
3434 SB_FREEZE_WRITE);
3435 }
3436 kiocb->ki_flags |= IOCB_WRITE;
4ed734b0 3437
fa15bafb 3438 if (req->file->f_op->write_iter)
ff6165b2 3439 ret2 = call_write_iter(req->file, kiocb, iter);
2dd2111d 3440 else if (req->file->f_op->write)
4017eb91 3441 ret2 = loop_rw_iter(WRITE, req, iter);
2dd2111d
GH
3442 else
3443 ret2 = -EINVAL;
4ed734b0 3444
6ad7f233
PB
3445 if (req->flags & REQ_F_REISSUE) {
3446 req->flags &= ~REQ_F_REISSUE;
230d50d4 3447 ret2 = -EAGAIN;
6ad7f233 3448 }
230d50d4 3449
fa15bafb
PB
3450 /*
3451 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3452 * retry them without IOCB_NOWAIT.
3453 */
3454 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3455 ret2 = -EAGAIN;
75c668cd
PB
3456 /* no retry on NONBLOCK nor RWF_NOWAIT */
3457 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
355afaeb 3458 goto done;
fa15bafb 3459 if (!force_nonblock || ret2 != -EAGAIN) {
eefdf30f
JA
3460 /* IOPOLL retry should happen for io-wq threads */
3461 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3462 goto copy_iov;
355afaeb 3463done:
889fca73 3464 kiocb_done(kiocb, ret2, issue_flags);
fa15bafb 3465 } else {
f67676d1 3466copy_iov:
84216315 3467 /* some cases will consume bytes even on error returns */
632546c4 3468 iov_iter_revert(iter, io_size - iov_iter_count(iter));
227c0c96 3469 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
6bf985dc 3470 return ret ?: -EAGAIN;
2b188cc1 3471 }
31b51510 3472out_free:
f261c168 3473 /* it's reportedly faster than delegating the null check to kfree() */
252917c3 3474 if (iovec)
6f2cc166 3475 kfree(iovec);
2b188cc1
JA
3476 return ret;
3477}
3478
80a261fd
JA
3479static int io_renameat_prep(struct io_kiocb *req,
3480 const struct io_uring_sqe *sqe)
3481{
3482 struct io_rename *ren = &req->rename;
3483 const char __user *oldf, *newf;
3484
ed7eb259
JA
3485 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3486 return -EINVAL;
3487 if (sqe->ioprio || sqe->buf_index)
3488 return -EINVAL;
80a261fd
JA
3489 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3490 return -EBADF;
3491
3492 ren->old_dfd = READ_ONCE(sqe->fd);
3493 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3494 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3495 ren->new_dfd = READ_ONCE(sqe->len);
3496 ren->flags = READ_ONCE(sqe->rename_flags);
3497
3498 ren->oldpath = getname(oldf);
3499 if (IS_ERR(ren->oldpath))
3500 return PTR_ERR(ren->oldpath);
3501
3502 ren->newpath = getname(newf);
3503 if (IS_ERR(ren->newpath)) {
3504 putname(ren->oldpath);
3505 return PTR_ERR(ren->newpath);
3506 }
3507
3508 req->flags |= REQ_F_NEED_CLEANUP;
3509 return 0;
3510}
3511
45d189c6 3512static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
80a261fd
JA
3513{
3514 struct io_rename *ren = &req->rename;
3515 int ret;
3516
45d189c6 3517 if (issue_flags & IO_URING_F_NONBLOCK)
80a261fd
JA
3518 return -EAGAIN;
3519
3520 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3521 ren->newpath, ren->flags);
3522
3523 req->flags &= ~REQ_F_NEED_CLEANUP;
3524 if (ret < 0)
93d2bcd2 3525 req_set_fail(req);
80a261fd
JA
3526 io_req_complete(req, ret);
3527 return 0;
3528}
3529
14a1143b
JA
3530static int io_unlinkat_prep(struct io_kiocb *req,
3531 const struct io_uring_sqe *sqe)
3532{
3533 struct io_unlink *un = &req->unlink;
3534 const char __user *fname;
3535
22634bc5
JA
3536 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3537 return -EINVAL;
3538 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
3539 return -EINVAL;
14a1143b
JA
3540 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3541 return -EBADF;
3542
3543 un->dfd = READ_ONCE(sqe->fd);
3544
3545 un->flags = READ_ONCE(sqe->unlink_flags);
3546 if (un->flags & ~AT_REMOVEDIR)
3547 return -EINVAL;
3548
3549 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3550 un->filename = getname(fname);
3551 if (IS_ERR(un->filename))
3552 return PTR_ERR(un->filename);
3553
3554 req->flags |= REQ_F_NEED_CLEANUP;
3555 return 0;
3556}
3557
45d189c6 3558static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
14a1143b
JA
3559{
3560 struct io_unlink *un = &req->unlink;
3561 int ret;
3562
45d189c6 3563 if (issue_flags & IO_URING_F_NONBLOCK)
14a1143b
JA
3564 return -EAGAIN;
3565
3566 if (un->flags & AT_REMOVEDIR)
3567 ret = do_rmdir(un->dfd, un->filename);
3568 else
3569 ret = do_unlinkat(un->dfd, un->filename);
3570
3571 req->flags &= ~REQ_F_NEED_CLEANUP;
3572 if (ret < 0)
93d2bcd2 3573 req_set_fail(req);
14a1143b
JA
3574 io_req_complete(req, ret);
3575 return 0;
3576}
3577
36f4fa68
JA
3578static int io_shutdown_prep(struct io_kiocb *req,
3579 const struct io_uring_sqe *sqe)
3580{
3581#if defined(CONFIG_NET)
3582 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3583 return -EINVAL;
3584 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3585 sqe->buf_index)
3586 return -EINVAL;
3587
3588 req->shutdown.how = READ_ONCE(sqe->len);
3589 return 0;
3590#else
3591 return -EOPNOTSUPP;
3592#endif
3593}
3594
45d189c6 3595static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
36f4fa68
JA
3596{
3597#if defined(CONFIG_NET)
3598 struct socket *sock;
3599 int ret;
3600
45d189c6 3601 if (issue_flags & IO_URING_F_NONBLOCK)
36f4fa68
JA
3602 return -EAGAIN;
3603
48aba79b 3604 sock = sock_from_file(req->file);
36f4fa68 3605 if (unlikely(!sock))
48aba79b 3606 return -ENOTSOCK;
36f4fa68
JA
3607
3608 ret = __sys_shutdown_sock(sock, req->shutdown.how);
a146468d 3609 if (ret < 0)
93d2bcd2 3610 req_set_fail(req);
36f4fa68
JA
3611 io_req_complete(req, ret);
3612 return 0;
3613#else
3614 return -EOPNOTSUPP;
3615#endif
3616}
3617
f2a8d5c7
PB
3618static int __io_splice_prep(struct io_kiocb *req,
3619 const struct io_uring_sqe *sqe)
7d67af2c 3620{
fe7e3257 3621 struct io_splice *sp = &req->splice;
7d67af2c 3622 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
7d67af2c 3623
3232dd02
PB
3624 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3625 return -EINVAL;
7d67af2c
PB
3626
3627 sp->file_in = NULL;
7d67af2c
PB
3628 sp->len = READ_ONCE(sqe->len);
3629 sp->flags = READ_ONCE(sqe->splice_flags);
3630
3631 if (unlikely(sp->flags & ~valid_flags))
3632 return -EINVAL;
3633
ac177053
PB
3634 sp->file_in = io_file_get(req->ctx, NULL, req,
3635 READ_ONCE(sqe->splice_fd_in),
8371adf5
PB
3636 (sp->flags & SPLICE_F_FD_IN_FIXED));
3637 if (!sp->file_in)
3638 return -EBADF;
7d67af2c 3639 req->flags |= REQ_F_NEED_CLEANUP;
7d67af2c
PB
3640 return 0;
3641}
3642
f2a8d5c7
PB
3643static int io_tee_prep(struct io_kiocb *req,
3644 const struct io_uring_sqe *sqe)
3645{
3646 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3647 return -EINVAL;
3648 return __io_splice_prep(req, sqe);
3649}
3650
45d189c6 3651static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
f2a8d5c7
PB
3652{
3653 struct io_splice *sp = &req->splice;
3654 struct file *in = sp->file_in;
3655 struct file *out = sp->file_out;
3656 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3657 long ret = 0;
3658
45d189c6 3659 if (issue_flags & IO_URING_F_NONBLOCK)
f2a8d5c7
PB
3660 return -EAGAIN;
3661 if (sp->len)
3662 ret = do_tee(in, out, sp->len, flags);
3663
e1d767f0
PB
3664 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3665 io_put_file(in);
f2a8d5c7
PB
3666 req->flags &= ~REQ_F_NEED_CLEANUP;
3667
f2a8d5c7 3668 if (ret != sp->len)
93d2bcd2 3669 req_set_fail(req);
e1e16097 3670 io_req_complete(req, ret);
f2a8d5c7
PB
3671 return 0;
3672}
3673
3674static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3675{
fe7e3257 3676 struct io_splice *sp = &req->splice;
f2a8d5c7
PB
3677
3678 sp->off_in = READ_ONCE(sqe->splice_off_in);
3679 sp->off_out = READ_ONCE(sqe->off);
3680 return __io_splice_prep(req, sqe);
3681}
3682
45d189c6 3683static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
7d67af2c
PB
3684{
3685 struct io_splice *sp = &req->splice;
3686 struct file *in = sp->file_in;
3687 struct file *out = sp->file_out;
3688 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3689 loff_t *poff_in, *poff_out;
c9687426 3690 long ret = 0;
7d67af2c 3691
45d189c6 3692 if (issue_flags & IO_URING_F_NONBLOCK)
2fb3e822 3693 return -EAGAIN;
7d67af2c
PB
3694
3695 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3696 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
c9687426 3697
948a7749 3698 if (sp->len)
c9687426 3699 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
7d67af2c 3700
e1d767f0
PB
3701 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3702 io_put_file(in);
7d67af2c
PB
3703 req->flags &= ~REQ_F_NEED_CLEANUP;
3704
7d67af2c 3705 if (ret != sp->len)
93d2bcd2 3706 req_set_fail(req);
e1e16097 3707 io_req_complete(req, ret);
7d67af2c
PB
3708 return 0;
3709}
3710
2b188cc1
JA
3711/*
3712 * IORING_OP_NOP just posts a completion event, nothing else.
3713 */
889fca73 3714static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1
JA
3715{
3716 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 3717
def596e9
JA
3718 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3719 return -EINVAL;
3720
889fca73 3721 __io_req_complete(req, issue_flags, 0, 0);
2b188cc1
JA
3722 return 0;
3723}
3724
1155c76a 3725static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 3726{
6b06314c 3727 struct io_ring_ctx *ctx = req->ctx;
c992fe29 3728
09bb8394
JA
3729 if (!req->file)
3730 return -EBADF;
c992fe29 3731
6b06314c 3732 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 3733 return -EINVAL;
edafccee 3734 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
3735 return -EINVAL;
3736
8ed8d3c3
JA
3737 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3738 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3739 return -EINVAL;
3740
3741 req->sync.off = READ_ONCE(sqe->off);
3742 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
3743 return 0;
3744}
3745
45d189c6 3746static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3 3747{
8ed8d3c3 3748 loff_t end = req->sync.off + req->sync.len;
8ed8d3c3
JA
3749 int ret;
3750
ac45abc0 3751 /* fsync always requires a blocking context */
45d189c6 3752 if (issue_flags & IO_URING_F_NONBLOCK)
ac45abc0
PB
3753 return -EAGAIN;
3754
9adbd45d 3755 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
3756 end > 0 ? end : LLONG_MAX,
3757 req->sync.flags & IORING_FSYNC_DATASYNC);
3758 if (ret < 0)
93d2bcd2 3759 req_set_fail(req);
e1e16097 3760 io_req_complete(req, ret);
c992fe29
CH
3761 return 0;
3762}
3763
d63d1b5e
JA
3764static int io_fallocate_prep(struct io_kiocb *req,
3765 const struct io_uring_sqe *sqe)
3766{
3767 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3768 return -EINVAL;
3232dd02
PB
3769 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3770 return -EINVAL;
d63d1b5e
JA
3771
3772 req->sync.off = READ_ONCE(sqe->off);
3773 req->sync.len = READ_ONCE(sqe->addr);
3774 req->sync.mode = READ_ONCE(sqe->len);
3775 return 0;
3776}
3777
45d189c6 3778static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
5d17b4a4 3779{
ac45abc0
PB
3780 int ret;
3781
d63d1b5e 3782 /* fallocate always requiring blocking context */
45d189c6 3783 if (issue_flags & IO_URING_F_NONBLOCK)
5d17b4a4 3784 return -EAGAIN;
ac45abc0
PB
3785 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3786 req->sync.len);
ac45abc0 3787 if (ret < 0)
93d2bcd2 3788 req_set_fail(req);
e1e16097 3789 io_req_complete(req, ret);
5d17b4a4
JA
3790 return 0;
3791}
3792
ec65fea5 3793static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 3794{
f8748881 3795 const char __user *fname;
15b71abe 3796 int ret;
b7bb4f7d 3797
ec65fea5 3798 if (unlikely(sqe->ioprio || sqe->buf_index))
15b71abe 3799 return -EINVAL;
ec65fea5 3800 if (unlikely(req->flags & REQ_F_FIXED_FILE))
cf3040ca 3801 return -EBADF;
03b1230c 3802
ec65fea5
PB
3803 /* open.how should be already initialised */
3804 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
08a1d26e 3805 req->open.how.flags |= O_LARGEFILE;
3529d8c2 3806
25e72d10
PB
3807 req->open.dfd = READ_ONCE(sqe->fd);
3808 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
f8748881 3809 req->open.filename = getname(fname);
15b71abe
JA
3810 if (IS_ERR(req->open.filename)) {
3811 ret = PTR_ERR(req->open.filename);
3812 req->open.filename = NULL;
3813 return ret;
3814 }
4022e7af 3815 req->open.nofile = rlimit(RLIMIT_NOFILE);
8fef80bf 3816 req->flags |= REQ_F_NEED_CLEANUP;
15b71abe 3817 return 0;
03b1230c
JA
3818}
3819
ec65fea5
PB
3820static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3821{
3822 u64 flags, mode;
3823
14587a46 3824 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4eb8dded 3825 return -EINVAL;
ec65fea5
PB
3826 mode = READ_ONCE(sqe->len);
3827 flags = READ_ONCE(sqe->open_flags);
3828 req->open.how = build_open_how(flags, mode);
3829 return __io_openat_prep(req, sqe);
3830}
3831
cebdb986 3832static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 3833{
cebdb986 3834 struct open_how __user *how;
cebdb986 3835 size_t len;
0fa03c62
JA
3836 int ret;
3837
14587a46 3838 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4eb8dded 3839 return -EINVAL;
cebdb986
JA
3840 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3841 len = READ_ONCE(sqe->len);
cebdb986
JA
3842 if (len < OPEN_HOW_SIZE_VER0)
3843 return -EINVAL;
3529d8c2 3844
cebdb986
JA
3845 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3846 len);
3847 if (ret)
3848 return ret;
3529d8c2 3849
ec65fea5 3850 return __io_openat_prep(req, sqe);
cebdb986
JA
3851}
3852
45d189c6 3853static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
15b71abe
JA
3854{
3855 struct open_flags op;
15b71abe 3856 struct file *file;
3a81fd02
JA
3857 bool nonblock_set;
3858 bool resolve_nonblock;
15b71abe
JA
3859 int ret;
3860
cebdb986 3861 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
3862 if (ret)
3863 goto err;
3a81fd02
JA
3864 nonblock_set = op.open_flag & O_NONBLOCK;
3865 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
45d189c6 3866 if (issue_flags & IO_URING_F_NONBLOCK) {
3a81fd02
JA
3867 /*
3868 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
3869 * it'll always -EAGAIN
3870 */
3871 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
3872 return -EAGAIN;
3873 op.lookup_flags |= LOOKUP_CACHED;
3874 op.open_flag |= O_NONBLOCK;
3875 }
15b71abe 3876
4022e7af 3877 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
15b71abe
JA
3878 if (ret < 0)
3879 goto err;
3880
3881 file = do_filp_open(req->open.dfd, req->open.filename, &op);
12dcb58a 3882 if (IS_ERR(file)) {
944d1444 3883 /*
12dcb58a
PB
3884 * We could hang on to this 'fd' on retrying, but seems like
3885 * marginal gain for something that is now known to be a slower
3886 * path. So just put it, and we'll get a new one when we retry.
944d1444 3887 */
3a81fd02 3888 put_unused_fd(ret);
3a81fd02 3889
15b71abe 3890 ret = PTR_ERR(file);
12dcb58a
PB
3891 /* only retry if RESOLVE_CACHED wasn't already set by application */
3892 if (ret == -EAGAIN &&
3893 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
3894 return -EAGAIN;
3895 goto err;
15b71abe 3896 }
12dcb58a
PB
3897
3898 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
3899 file->f_flags &= ~O_NONBLOCK;
3900 fsnotify_open(file);
3901 fd_install(ret, file);
15b71abe
JA
3902err:
3903 putname(req->open.filename);
8fef80bf 3904 req->flags &= ~REQ_F_NEED_CLEANUP;
15b71abe 3905 if (ret < 0)
93d2bcd2 3906 req_set_fail(req);
0bdf3398 3907 __io_req_complete(req, issue_flags, ret, 0);
15b71abe
JA
3908 return 0;
3909}
3910
45d189c6 3911static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
cebdb986 3912{
e45cff58 3913 return io_openat2(req, issue_flags);
cebdb986
JA
3914}
3915
067524e9
JA
3916static int io_remove_buffers_prep(struct io_kiocb *req,
3917 const struct io_uring_sqe *sqe)
3918{
3919 struct io_provide_buf *p = &req->pbuf;
3920 u64 tmp;
3921
3922 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3923 return -EINVAL;
3924
3925 tmp = READ_ONCE(sqe->fd);
3926 if (!tmp || tmp > USHRT_MAX)
3927 return -EINVAL;
3928
3929 memset(p, 0, sizeof(*p));
3930 p->nbufs = tmp;
3931 p->bgid = READ_ONCE(sqe->buf_group);
3932 return 0;
3933}
3934
3935static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3936 int bgid, unsigned nbufs)
3937{
3938 unsigned i = 0;
3939
3940 /* shouldn't happen */
3941 if (!nbufs)
3942 return 0;
3943
3944 /* the head kbuf is the list itself */
3945 while (!list_empty(&buf->list)) {
3946 struct io_buffer *nxt;
3947
3948 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3949 list_del(&nxt->list);
3950 kfree(nxt);
3951 if (++i == nbufs)
3952 return i;
3953 }
3954 i++;
3955 kfree(buf);
9e15c3a0 3956 xa_erase(&ctx->io_buffers, bgid);
067524e9
JA
3957
3958 return i;
3959}
3960
889fca73 3961static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
067524e9
JA
3962{
3963 struct io_provide_buf *p = &req->pbuf;
3964 struct io_ring_ctx *ctx = req->ctx;
3965 struct io_buffer *head;
3966 int ret = 0;
45d189c6 3967 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
067524e9
JA
3968
3969 io_ring_submit_lock(ctx, !force_nonblock);
3970
3971 lockdep_assert_held(&ctx->uring_lock);
3972
3973 ret = -ENOENT;
9e15c3a0 3974 head = xa_load(&ctx->io_buffers, p->bgid);
067524e9
JA
3975 if (head)
3976 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
067524e9 3977 if (ret < 0)
93d2bcd2 3978 req_set_fail(req);
067524e9 3979
9fb8cb49
PB
3980 /* complete before unlock, IOPOLL may need the lock */
3981 __io_req_complete(req, issue_flags, ret, 0);
3982 io_ring_submit_unlock(ctx, !force_nonblock);
067524e9
JA
3983 return 0;
3984}
3985
ddf0322d
JA
3986static int io_provide_buffers_prep(struct io_kiocb *req,
3987 const struct io_uring_sqe *sqe)
3988{
38134ada 3989 unsigned long size, tmp_check;
ddf0322d
JA
3990 struct io_provide_buf *p = &req->pbuf;
3991 u64 tmp;
3992
3993 if (sqe->ioprio || sqe->rw_flags)
3994 return -EINVAL;
3995
3996 tmp = READ_ONCE(sqe->fd);
3997 if (!tmp || tmp > USHRT_MAX)
3998 return -E2BIG;
3999 p->nbufs = tmp;
4000 p->addr = READ_ONCE(sqe->addr);
4001 p->len = READ_ONCE(sqe->len);
4002
38134ada
PB
4003 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
4004 &size))
4005 return -EOVERFLOW;
4006 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
4007 return -EOVERFLOW;
4008
d81269fe
PB
4009 size = (unsigned long)p->len * p->nbufs;
4010 if (!access_ok(u64_to_user_ptr(p->addr), size))
ddf0322d
JA
4011 return -EFAULT;
4012
4013 p->bgid = READ_ONCE(sqe->buf_group);
4014 tmp = READ_ONCE(sqe->off);
4015 if (tmp > USHRT_MAX)
4016 return -E2BIG;
4017 p->bid = tmp;
4018 return 0;
4019}
4020
4021static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4022{
4023 struct io_buffer *buf;
4024 u64 addr = pbuf->addr;
4025 int i, bid = pbuf->bid;
4026
4027 for (i = 0; i < pbuf->nbufs; i++) {
4028 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
4029 if (!buf)
4030 break;
4031
4032 buf->addr = addr;
d1f82808 4033 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
ddf0322d
JA
4034 buf->bid = bid;
4035 addr += pbuf->len;
4036 bid++;
4037 if (!*head) {
4038 INIT_LIST_HEAD(&buf->list);
4039 *head = buf;
4040 } else {
4041 list_add_tail(&buf->list, &(*head)->list);
4042 }
4043 }
4044
4045 return i ? i : -ENOMEM;
4046}
4047
889fca73 4048static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
ddf0322d
JA
4049{
4050 struct io_provide_buf *p = &req->pbuf;
4051 struct io_ring_ctx *ctx = req->ctx;
4052 struct io_buffer *head, *list;
4053 int ret = 0;
45d189c6 4054 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ddf0322d
JA
4055
4056 io_ring_submit_lock(ctx, !force_nonblock);
4057
4058 lockdep_assert_held(&ctx->uring_lock);
4059
9e15c3a0 4060 list = head = xa_load(&ctx->io_buffers, p->bgid);
ddf0322d
JA
4061
4062 ret = io_add_buffers(p, &head);
9e15c3a0
JA
4063 if (ret >= 0 && !list) {
4064 ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
4065 if (ret < 0)
067524e9 4066 __io_remove_buffers(ctx, head, p->bgid, -1U);
ddf0322d 4067 }
ddf0322d 4068 if (ret < 0)
93d2bcd2 4069 req_set_fail(req);
9fb8cb49
PB
4070 /* complete before unlock, IOPOLL may need the lock */
4071 __io_req_complete(req, issue_flags, ret, 0);
4072 io_ring_submit_unlock(ctx, !force_nonblock);
ddf0322d 4073 return 0;
cebdb986
JA
4074}
4075
3e4827b0
JA
4076static int io_epoll_ctl_prep(struct io_kiocb *req,
4077 const struct io_uring_sqe *sqe)
4078{
4079#if defined(CONFIG_EPOLL)
4080 if (sqe->ioprio || sqe->buf_index)
4081 return -EINVAL;
2d74d042 4082 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3232dd02 4083 return -EINVAL;
3e4827b0
JA
4084
4085 req->epoll.epfd = READ_ONCE(sqe->fd);
4086 req->epoll.op = READ_ONCE(sqe->len);
4087 req->epoll.fd = READ_ONCE(sqe->off);
4088
4089 if (ep_op_has_event(req->epoll.op)) {
4090 struct epoll_event __user *ev;
4091
4092 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4093 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4094 return -EFAULT;
4095 }
4096
4097 return 0;
4098#else
4099 return -EOPNOTSUPP;
4100#endif
4101}
4102
889fca73 4103static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
3e4827b0
JA
4104{
4105#if defined(CONFIG_EPOLL)
4106 struct io_epoll *ie = &req->epoll;
4107 int ret;
45d189c6 4108 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3e4827b0
JA
4109
4110 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4111 if (force_nonblock && ret == -EAGAIN)
4112 return -EAGAIN;
4113
4114 if (ret < 0)
93d2bcd2 4115 req_set_fail(req);
889fca73 4116 __io_req_complete(req, issue_flags, ret, 0);
3e4827b0
JA
4117 return 0;
4118#else
4119 return -EOPNOTSUPP;
4120#endif
4121}
4122
c1ca757b
JA
4123static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4124{
4125#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4126 if (sqe->ioprio || sqe->buf_index || sqe->off)
4127 return -EINVAL;
3232dd02
PB
4128 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4129 return -EINVAL;
c1ca757b
JA
4130
4131 req->madvise.addr = READ_ONCE(sqe->addr);
4132 req->madvise.len = READ_ONCE(sqe->len);
4133 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4134 return 0;
4135#else
4136 return -EOPNOTSUPP;
4137#endif
4138}
4139
45d189c6 4140static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
c1ca757b
JA
4141{
4142#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4143 struct io_madvise *ma = &req->madvise;
4144 int ret;
4145
45d189c6 4146 if (issue_flags & IO_URING_F_NONBLOCK)
c1ca757b
JA
4147 return -EAGAIN;
4148
0726b01e 4149 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
c1ca757b 4150 if (ret < 0)
93d2bcd2 4151 req_set_fail(req);
e1e16097 4152 io_req_complete(req, ret);
c1ca757b
JA
4153 return 0;
4154#else
4155 return -EOPNOTSUPP;
4156#endif
4157}
4158
4840e418
JA
4159static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4160{
4161 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4162 return -EINVAL;
3232dd02
PB
4163 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4164 return -EINVAL;
4840e418
JA
4165
4166 req->fadvise.offset = READ_ONCE(sqe->off);
4167 req->fadvise.len = READ_ONCE(sqe->len);
4168 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4169 return 0;
4170}
4171
45d189c6 4172static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
4840e418
JA
4173{
4174 struct io_fadvise *fa = &req->fadvise;
4175 int ret;
4176
45d189c6 4177 if (issue_flags & IO_URING_F_NONBLOCK) {
3e69426d
JA
4178 switch (fa->advice) {
4179 case POSIX_FADV_NORMAL:
4180 case POSIX_FADV_RANDOM:
4181 case POSIX_FADV_SEQUENTIAL:
4182 break;
4183 default:
4184 return -EAGAIN;
4185 }
4186 }
4840e418
JA
4187
4188 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4189 if (ret < 0)
93d2bcd2 4190 req_set_fail(req);
0bdf3398 4191 __io_req_complete(req, issue_flags, ret, 0);
4840e418
JA
4192 return 0;
4193}
4194
eddc7ef5
JA
4195static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4196{
2d74d042 4197 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3232dd02 4198 return -EINVAL;
eddc7ef5
JA
4199 if (sqe->ioprio || sqe->buf_index)
4200 return -EINVAL;
9c280f90 4201 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 4202 return -EBADF;
eddc7ef5 4203
1d9e1288
BM
4204 req->statx.dfd = READ_ONCE(sqe->fd);
4205 req->statx.mask = READ_ONCE(sqe->len);
e62753e4 4206 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
1d9e1288
BM
4207 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4208 req->statx.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5
JA
4209
4210 return 0;
4211}
4212
45d189c6 4213static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
eddc7ef5 4214{
1d9e1288 4215 struct io_statx *ctx = &req->statx;
eddc7ef5
JA
4216 int ret;
4217
59d70013 4218 if (issue_flags & IO_URING_F_NONBLOCK)
eddc7ef5
JA
4219 return -EAGAIN;
4220
e62753e4
BM
4221 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4222 ctx->buffer);
eddc7ef5 4223
eddc7ef5 4224 if (ret < 0)
93d2bcd2 4225 req_set_fail(req);
e1e16097 4226 io_req_complete(req, ret);
eddc7ef5
JA
4227 return 0;
4228}
4229
b5dba59e
JA
4230static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4231{
14587a46 4232 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3232dd02 4233 return -EINVAL;
b5dba59e
JA
4234 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4235 sqe->rw_flags || sqe->buf_index)
4236 return -EINVAL;
9c280f90 4237 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 4238 return -EBADF;
b5dba59e
JA
4239
4240 req->close.fd = READ_ONCE(sqe->fd);
b5dba59e 4241 return 0;
b5dba59e
JA
4242}
4243
889fca73 4244static int io_close(struct io_kiocb *req, unsigned int issue_flags)
b5dba59e 4245{
9eac1904 4246 struct files_struct *files = current->files;
3af73b28 4247 struct io_close *close = &req->close;
9eac1904 4248 struct fdtable *fdt;
a1fde923
PB
4249 struct file *file = NULL;
4250 int ret = -EBADF;
b5dba59e 4251
9eac1904
JA
4252 spin_lock(&files->file_lock);
4253 fdt = files_fdtable(files);
4254 if (close->fd >= fdt->max_fds) {
4255 spin_unlock(&files->file_lock);
4256 goto err;
4257 }
4258 file = fdt->fd[close->fd];
a1fde923 4259 if (!file || file->f_op == &io_uring_fops) {
9eac1904
JA
4260 spin_unlock(&files->file_lock);
4261 file = NULL;
4262 goto err;
3af73b28 4263 }
b5dba59e
JA
4264
4265 /* if the file has a flush method, be safe and punt to async */
45d189c6 4266 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
9eac1904 4267 spin_unlock(&files->file_lock);
0bf0eefd 4268 return -EAGAIN;
a2100672 4269 }
b5dba59e 4270
9eac1904
JA
4271 ret = __close_fd_get_file(close->fd, &file);
4272 spin_unlock(&files->file_lock);
4273 if (ret < 0) {
4274 if (ret == -ENOENT)
4275 ret = -EBADF;
4276 goto err;
4277 }
4278
3af73b28 4279 /* No ->flush() or already async, safely close from here */
9eac1904
JA
4280 ret = filp_close(file, current->files);
4281err:
3af73b28 4282 if (ret < 0)
93d2bcd2 4283 req_set_fail(req);
9eac1904
JA
4284 if (file)
4285 fput(file);
889fca73 4286 __io_req_complete(req, issue_flags, ret, 0);
1a417f4e 4287 return 0;
b5dba59e
JA
4288}
4289
1155c76a 4290static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4
JA
4291{
4292 struct io_ring_ctx *ctx = req->ctx;
5d17b4a4 4293
5d17b4a4
JA
4294 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4295 return -EINVAL;
4296 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4297 return -EINVAL;
4298
8ed8d3c3
JA
4299 req->sync.off = READ_ONCE(sqe->off);
4300 req->sync.len = READ_ONCE(sqe->len);
4301 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
4302 return 0;
4303}
4304
45d189c6 4305static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3 4306{
8ed8d3c3
JA
4307 int ret;
4308
ac45abc0 4309 /* sync_file_range always requires a blocking context */
45d189c6 4310 if (issue_flags & IO_URING_F_NONBLOCK)
ac45abc0
PB
4311 return -EAGAIN;
4312
9adbd45d 4313 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3
JA
4314 req->sync.flags);
4315 if (ret < 0)
93d2bcd2 4316 req_set_fail(req);
e1e16097 4317 io_req_complete(req, ret);
5d17b4a4
JA
4318 return 0;
4319}
4320
469956e8 4321#if defined(CONFIG_NET)
02d27d89
PB
4322static int io_setup_async_msg(struct io_kiocb *req,
4323 struct io_async_msghdr *kmsg)
4324{
e8c2bc1f
JA
4325 struct io_async_msghdr *async_msg = req->async_data;
4326
4327 if (async_msg)
02d27d89 4328 return -EAGAIN;
e8c2bc1f 4329 if (io_alloc_async_data(req)) {
257e84a5 4330 kfree(kmsg->free_iov);
02d27d89
PB
4331 return -ENOMEM;
4332 }
e8c2bc1f 4333 async_msg = req->async_data;
02d27d89 4334 req->flags |= REQ_F_NEED_CLEANUP;
e8c2bc1f 4335 memcpy(async_msg, kmsg, sizeof(*kmsg));
2a780802 4336 async_msg->msg.msg_name = &async_msg->addr;
257e84a5
PB
4337 /* if were using fast_iov, set it to the new one */
4338 if (!async_msg->free_iov)
4339 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4340
02d27d89
PB
4341 return -EAGAIN;
4342}
4343
2ae523ed
PB
4344static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4345 struct io_async_msghdr *iomsg)
4346{
2ae523ed 4347 iomsg->msg.msg_name = &iomsg->addr;
257e84a5 4348 iomsg->free_iov = iomsg->fast_iov;
2ae523ed 4349 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
257e84a5 4350 req->sr_msg.msg_flags, &iomsg->free_iov);
2ae523ed
PB
4351}
4352
93642ef8
PB
4353static int io_sendmsg_prep_async(struct io_kiocb *req)
4354{
4355 int ret;
4356
93642ef8
PB
4357 ret = io_sendmsg_copy_hdr(req, req->async_data);
4358 if (!ret)
4359 req->flags |= REQ_F_NEED_CLEANUP;
4360 return ret;
4361}
4362
3529d8c2 4363static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 4364{
e47293fd 4365 struct io_sr_msg *sr = &req->sr_msg;
03b1230c 4366
d2b6f48b
PB
4367 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4368 return -EINVAL;
4369
270a5940 4370 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 4371 sr->len = READ_ONCE(sqe->len);
04411806
PB
4372 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4373 if (sr->msg_flags & MSG_DONTWAIT)
4374 req->flags |= REQ_F_NOWAIT;
3529d8c2 4375
d8768362
JA
4376#ifdef CONFIG_COMPAT
4377 if (req->ctx->compat)
4378 sr->msg_flags |= MSG_CMSG_COMPAT;
4379#endif
93642ef8 4380 return 0;
03b1230c
JA
4381}
4382
889fca73 4383static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
aa1fa28f 4384{
6b754c8b 4385 struct io_async_msghdr iomsg, *kmsg;
0fa03c62 4386 struct socket *sock;
7a7cacba 4387 unsigned flags;
0031275d 4388 int min_ret = 0;
0fa03c62
JA
4389 int ret;
4390
dba4a925 4391 sock = sock_from_file(req->file);
7a7cacba 4392 if (unlikely(!sock))
dba4a925 4393 return -ENOTSOCK;
3529d8c2 4394
257e84a5
PB
4395 kmsg = req->async_data;
4396 if (!kmsg) {
7a7cacba
PB
4397 ret = io_sendmsg_copy_hdr(req, &iomsg);
4398 if (ret)
4399 return ret;
4400 kmsg = &iomsg;
0fa03c62 4401 }
0fa03c62 4402
04411806
PB
4403 flags = req->sr_msg.msg_flags;
4404 if (issue_flags & IO_URING_F_NONBLOCK)
7a7cacba 4405 flags |= MSG_DONTWAIT;
0031275d
SM
4406 if (flags & MSG_WAITALL)
4407 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4408
7a7cacba 4409 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
45d189c6 4410 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
7a7cacba
PB
4411 return io_setup_async_msg(req, kmsg);
4412 if (ret == -ERESTARTSYS)
4413 ret = -EINTR;
0fa03c62 4414
257e84a5
PB
4415 /* fast path, check for non-NULL to avoid function call */
4416 if (kmsg->free_iov)
4417 kfree(kmsg->free_iov);
99bc4c38 4418 req->flags &= ~REQ_F_NEED_CLEANUP;
0031275d 4419 if (ret < min_ret)
93d2bcd2 4420 req_set_fail(req);
889fca73 4421 __io_req_complete(req, issue_flags, ret, 0);
5d17b4a4 4422 return 0;
03b1230c 4423}
aa1fa28f 4424
889fca73 4425static int io_send(struct io_kiocb *req, unsigned int issue_flags)
fddaface 4426{
7a7cacba
PB
4427 struct io_sr_msg *sr = &req->sr_msg;
4428 struct msghdr msg;
4429 struct iovec iov;
fddaface 4430 struct socket *sock;
7a7cacba 4431 unsigned flags;
0031275d 4432 int min_ret = 0;
fddaface
JA
4433 int ret;
4434
dba4a925 4435 sock = sock_from_file(req->file);
7a7cacba 4436 if (unlikely(!sock))
dba4a925 4437 return -ENOTSOCK;
fddaface 4438
7a7cacba
PB
4439 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4440 if (unlikely(ret))
14db8411 4441 return ret;
fddaface 4442
7a7cacba
PB
4443 msg.msg_name = NULL;
4444 msg.msg_control = NULL;
4445 msg.msg_controllen = 0;
4446 msg.msg_namelen = 0;
fddaface 4447
04411806
PB
4448 flags = req->sr_msg.msg_flags;
4449 if (issue_flags & IO_URING_F_NONBLOCK)
7a7cacba 4450 flags |= MSG_DONTWAIT;
0031275d
SM
4451 if (flags & MSG_WAITALL)
4452 min_ret = iov_iter_count(&msg.msg_iter);
4453
7a7cacba
PB
4454 msg.msg_flags = flags;
4455 ret = sock_sendmsg(sock, &msg);
45d189c6 4456 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
7a7cacba
PB
4457 return -EAGAIN;
4458 if (ret == -ERESTARTSYS)
4459 ret = -EINTR;
fddaface 4460
0031275d 4461 if (ret < min_ret)
93d2bcd2 4462 req_set_fail(req);
889fca73 4463 __io_req_complete(req, issue_flags, ret, 0);
fddaface 4464 return 0;
fddaface
JA
4465}
4466
1400e697
PB
4467static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4468 struct io_async_msghdr *iomsg)
52de1fe1
JA
4469{
4470 struct io_sr_msg *sr = &req->sr_msg;
4471 struct iovec __user *uiov;
4472 size_t iov_len;
4473 int ret;
4474
1400e697
PB
4475 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4476 &iomsg->uaddr, &uiov, &iov_len);
52de1fe1
JA
4477 if (ret)
4478 return ret;
4479
4480 if (req->flags & REQ_F_BUFFER_SELECT) {
4481 if (iov_len > 1)
4482 return -EINVAL;
5476dfed 4483 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
52de1fe1 4484 return -EFAULT;
5476dfed 4485 sr->len = iomsg->fast_iov[0].iov_len;
257e84a5 4486 iomsg->free_iov = NULL;
52de1fe1 4487 } else {
257e84a5 4488 iomsg->free_iov = iomsg->fast_iov;
89cd35c5 4489 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
257e84a5 4490 &iomsg->free_iov, &iomsg->msg.msg_iter,
89cd35c5 4491 false);
52de1fe1
JA
4492 if (ret > 0)
4493 ret = 0;
4494 }
4495
4496 return ret;
4497}
4498
4499#ifdef CONFIG_COMPAT
4500static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
1400e697 4501 struct io_async_msghdr *iomsg)
52de1fe1 4502{
52de1fe1
JA
4503 struct io_sr_msg *sr = &req->sr_msg;
4504 struct compat_iovec __user *uiov;
4505 compat_uptr_t ptr;
4506 compat_size_t len;
4507 int ret;
4508
4af3417a
PB
4509 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
4510 &ptr, &len);
52de1fe1
JA
4511 if (ret)
4512 return ret;
4513
4514 uiov = compat_ptr(ptr);
4515 if (req->flags & REQ_F_BUFFER_SELECT) {
4516 compat_ssize_t clen;
4517
4518 if (len > 1)
4519 return -EINVAL;
4520 if (!access_ok(uiov, sizeof(*uiov)))
4521 return -EFAULT;
4522 if (__get_user(clen, &uiov->iov_len))
4523 return -EFAULT;
4524 if (clen < 0)
4525 return -EINVAL;
2d280bc8 4526 sr->len = clen;
257e84a5 4527 iomsg->free_iov = NULL;
52de1fe1 4528 } else {
257e84a5 4529 iomsg->free_iov = iomsg->fast_iov;
89cd35c5 4530 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
257e84a5 4531 UIO_FASTIOV, &iomsg->free_iov,
89cd35c5 4532 &iomsg->msg.msg_iter, true);
52de1fe1
JA
4533 if (ret < 0)
4534 return ret;
4535 }
4536
4537 return 0;
4538}
4539#endif
4540
1400e697
PB
4541static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4542 struct io_async_msghdr *iomsg)
52de1fe1 4543{
1400e697 4544 iomsg->msg.msg_name = &iomsg->addr;
52de1fe1
JA
4545
4546#ifdef CONFIG_COMPAT
4547 if (req->ctx->compat)
1400e697 4548 return __io_compat_recvmsg_copy_hdr(req, iomsg);
fddaface 4549#endif
52de1fe1 4550
1400e697 4551 return __io_recvmsg_copy_hdr(req, iomsg);
52de1fe1
JA
4552}
4553
bcda7baa 4554static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
7fbb1b54 4555 bool needs_lock)
bcda7baa
JA
4556{
4557 struct io_sr_msg *sr = &req->sr_msg;
4558 struct io_buffer *kbuf;
4559
bcda7baa
JA
4560 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4561 if (IS_ERR(kbuf))
4562 return kbuf;
4563
4564 sr->kbuf = kbuf;
4565 req->flags |= REQ_F_BUFFER_SELECTED;
bcda7baa 4566 return kbuf;
fddaface
JA
4567}
4568
7fbb1b54
PB
4569static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4570{
4571 return io_put_kbuf(req, req->sr_msg.kbuf);
4572}
4573
93642ef8 4574static int io_recvmsg_prep_async(struct io_kiocb *req)
aa1fa28f 4575{
99bc4c38 4576 int ret;
3529d8c2 4577
93642ef8
PB
4578 ret = io_recvmsg_copy_hdr(req, req->async_data);
4579 if (!ret)
4580 req->flags |= REQ_F_NEED_CLEANUP;
4581 return ret;
4582}
4583
4584static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4585{
4586 struct io_sr_msg *sr = &req->sr_msg;
4587
d2b6f48b
PB
4588 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4589 return -EINVAL;
4590
270a5940 4591 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 4592 sr->len = READ_ONCE(sqe->len);
bcda7baa 4593 sr->bgid = READ_ONCE(sqe->buf_group);
04411806
PB
4594 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4595 if (sr->msg_flags & MSG_DONTWAIT)
4596 req->flags |= REQ_F_NOWAIT;
06b76d44 4597
d8768362
JA
4598#ifdef CONFIG_COMPAT
4599 if (req->ctx->compat)
4600 sr->msg_flags |= MSG_CMSG_COMPAT;
4601#endif
93642ef8 4602 return 0;
aa1fa28f
JA
4603}
4604
889fca73 4605static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
aa1fa28f 4606{
6b754c8b 4607 struct io_async_msghdr iomsg, *kmsg;
03b1230c 4608 struct socket *sock;
7fbb1b54 4609 struct io_buffer *kbuf;
7a7cacba 4610 unsigned flags;
0031275d 4611 int min_ret = 0;
52de1fe1 4612 int ret, cflags = 0;
45d189c6 4613 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
03b1230c 4614
dba4a925 4615 sock = sock_from_file(req->file);
7a7cacba 4616 if (unlikely(!sock))
dba4a925 4617 return -ENOTSOCK;
3529d8c2 4618
257e84a5
PB
4619 kmsg = req->async_data;
4620 if (!kmsg) {
7a7cacba
PB
4621 ret = io_recvmsg_copy_hdr(req, &iomsg);
4622 if (ret)
681fda8d 4623 return ret;
7a7cacba
PB
4624 kmsg = &iomsg;
4625 }
03b1230c 4626
bc02ef33 4627 if (req->flags & REQ_F_BUFFER_SELECT) {
7fbb1b54 4628 kbuf = io_recv_buffer_select(req, !force_nonblock);
bc02ef33 4629 if (IS_ERR(kbuf))
52de1fe1 4630 return PTR_ERR(kbuf);
7a7cacba 4631 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
5476dfed
PB
4632 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4633 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
7a7cacba
PB
4634 1, req->sr_msg.len);
4635 }
52de1fe1 4636
04411806
PB
4637 flags = req->sr_msg.msg_flags;
4638 if (force_nonblock)
7a7cacba 4639 flags |= MSG_DONTWAIT;
0031275d
SM
4640 if (flags & MSG_WAITALL)
4641 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4642
7a7cacba
PB
4643 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4644 kmsg->uaddr, flags);
0e1b6fe3
PB
4645 if (force_nonblock && ret == -EAGAIN)
4646 return io_setup_async_msg(req, kmsg);
7a7cacba
PB
4647 if (ret == -ERESTARTSYS)
4648 ret = -EINTR;
03b1230c 4649
7fbb1b54
PB
4650 if (req->flags & REQ_F_BUFFER_SELECTED)
4651 cflags = io_put_recv_kbuf(req);
257e84a5
PB
4652 /* fast path, check for non-NULL to avoid function call */
4653 if (kmsg->free_iov)
4654 kfree(kmsg->free_iov);
99bc4c38 4655 req->flags &= ~REQ_F_NEED_CLEANUP;
0031275d 4656 if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
93d2bcd2 4657 req_set_fail(req);
889fca73 4658 __io_req_complete(req, issue_flags, ret, cflags);
03b1230c 4659 return 0;
0fa03c62 4660}
5d17b4a4 4661
889fca73 4662static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
fddaface 4663{
6b754c8b 4664 struct io_buffer *kbuf;
7a7cacba
PB
4665 struct io_sr_msg *sr = &req->sr_msg;
4666 struct msghdr msg;
4667 void __user *buf = sr->buf;
fddaface 4668 struct socket *sock;
7a7cacba
PB
4669 struct iovec iov;
4670 unsigned flags;
0031275d 4671 int min_ret = 0;
bcda7baa 4672 int ret, cflags = 0;
45d189c6 4673 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
fddaface 4674
dba4a925 4675 sock = sock_from_file(req->file);
7a7cacba 4676 if (unlikely(!sock))
dba4a925 4677 return -ENOTSOCK;
fddaface 4678
bc02ef33 4679 if (req->flags & REQ_F_BUFFER_SELECT) {
7fbb1b54 4680 kbuf = io_recv_buffer_select(req, !force_nonblock);
bcda7baa
JA
4681 if (IS_ERR(kbuf))
4682 return PTR_ERR(kbuf);
7a7cacba 4683 buf = u64_to_user_ptr(kbuf->addr);
bc02ef33 4684 }
bcda7baa 4685
7a7cacba 4686 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
14c32eee
PB
4687 if (unlikely(ret))
4688 goto out_free;
fddaface 4689
7a7cacba
PB
4690 msg.msg_name = NULL;
4691 msg.msg_control = NULL;
4692 msg.msg_controllen = 0;
4693 msg.msg_namelen = 0;
4694 msg.msg_iocb = NULL;
4695 msg.msg_flags = 0;
fddaface 4696
04411806
PB
4697 flags = req->sr_msg.msg_flags;
4698 if (force_nonblock)
7a7cacba 4699 flags |= MSG_DONTWAIT;
0031275d
SM
4700 if (flags & MSG_WAITALL)
4701 min_ret = iov_iter_count(&msg.msg_iter);
4702
7a7cacba
PB
4703 ret = sock_recvmsg(sock, &msg, flags);
4704 if (force_nonblock && ret == -EAGAIN)
4705 return -EAGAIN;
4706 if (ret == -ERESTARTSYS)
4707 ret = -EINTR;
14c32eee 4708out_free:
7fbb1b54
PB
4709 if (req->flags & REQ_F_BUFFER_SELECTED)
4710 cflags = io_put_recv_kbuf(req);
0031275d 4711 if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
93d2bcd2 4712 req_set_fail(req);
889fca73 4713 __io_req_complete(req, issue_flags, ret, cflags);
fddaface 4714 return 0;
fddaface
JA
4715}
4716
3529d8c2 4717static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35 4718{
8ed8d3c3
JA
4719 struct io_accept *accept = &req->accept;
4720
14587a46 4721 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
17f2fe35 4722 return -EINVAL;
8042d6ce 4723 if (sqe->ioprio || sqe->len || sqe->buf_index)
17f2fe35
JA
4724 return -EINVAL;
4725
d55e5f5b
JA
4726 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4727 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 4728 accept->flags = READ_ONCE(sqe->accept_flags);
09952e3e 4729 accept->nofile = rlimit(RLIMIT_NOFILE);
8ed8d3c3 4730 return 0;
8ed8d3c3 4731}
17f2fe35 4732
889fca73 4733static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3
JA
4734{
4735 struct io_accept *accept = &req->accept;
45d189c6 4736 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ac45abc0 4737 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
8ed8d3c3
JA
4738 int ret;
4739
e697deed
JX
4740 if (req->file->f_flags & O_NONBLOCK)
4741 req->flags |= REQ_F_NOWAIT;
4742
8ed8d3c3 4743 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
09952e3e
JA
4744 accept->addr_len, accept->flags,
4745 accept->nofile);
8ed8d3c3 4746 if (ret == -EAGAIN && force_nonblock)
17f2fe35 4747 return -EAGAIN;
ac45abc0
PB
4748 if (ret < 0) {
4749 if (ret == -ERESTARTSYS)
4750 ret = -EINTR;
93d2bcd2 4751 req_set_fail(req);
ac45abc0 4752 }
889fca73 4753 __io_req_complete(req, issue_flags, ret, 0);
17f2fe35 4754 return 0;
8ed8d3c3
JA
4755}
4756
93642ef8
PB
4757static int io_connect_prep_async(struct io_kiocb *req)
4758{
4759 struct io_async_connect *io = req->async_data;
4760 struct io_connect *conn = &req->connect;
4761
4762 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
4763}
4764
3529d8c2 4765static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021 4766{
3529d8c2 4767 struct io_connect *conn = &req->connect;
f499a021 4768
14587a46 4769 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3fbb51c1
JA
4770 return -EINVAL;
4771 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4772 return -EINVAL;
4773
3529d8c2
JA
4774 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4775 conn->addr_len = READ_ONCE(sqe->addr2);
93642ef8 4776 return 0;
f499a021
JA
4777}
4778
889fca73 4779static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
f8e85cf2 4780{
e8c2bc1f 4781 struct io_async_connect __io, *io;
f8e85cf2 4782 unsigned file_flags;
3fbb51c1 4783 int ret;
45d189c6 4784 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
f8e85cf2 4785
e8c2bc1f
JA
4786 if (req->async_data) {
4787 io = req->async_data;
f499a021 4788 } else {
3529d8c2
JA
4789 ret = move_addr_to_kernel(req->connect.addr,
4790 req->connect.addr_len,
e8c2bc1f 4791 &__io.address);
f499a021
JA
4792 if (ret)
4793 goto out;
4794 io = &__io;
4795 }
4796
3fbb51c1
JA
4797 file_flags = force_nonblock ? O_NONBLOCK : 0;
4798
e8c2bc1f 4799 ret = __sys_connect_file(req->file, &io->address,
3fbb51c1 4800 req->connect.addr_len, file_flags);
87f80d62 4801 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
e8c2bc1f 4802 if (req->async_data)
b7bb4f7d 4803 return -EAGAIN;
e8c2bc1f 4804 if (io_alloc_async_data(req)) {
f499a021
JA
4805 ret = -ENOMEM;
4806 goto out;
4807 }
e8c2bc1f 4808 memcpy(req->async_data, &__io, sizeof(__io));
f8e85cf2 4809 return -EAGAIN;
f499a021 4810 }
f8e85cf2
JA
4811 if (ret == -ERESTARTSYS)
4812 ret = -EINTR;
f499a021 4813out:
4e88d6e7 4814 if (ret < 0)
93d2bcd2 4815 req_set_fail(req);
889fca73 4816 __io_req_complete(req, issue_flags, ret, 0);
f8e85cf2 4817 return 0;
469956e8
Y
4818}
4819#else /* !CONFIG_NET */
99a10081
JA
4820#define IO_NETOP_FN(op) \
4821static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
4822{ \
4823 return -EOPNOTSUPP; \
4824}
4825
4826#define IO_NETOP_PREP(op) \
4827IO_NETOP_FN(op) \
4828static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
4829{ \
4830 return -EOPNOTSUPP; \
4831} \
4832
4833#define IO_NETOP_PREP_ASYNC(op) \
4834IO_NETOP_PREP(op) \
4835static int io_##op##_prep_async(struct io_kiocb *req) \
4836{ \
4837 return -EOPNOTSUPP; \
4838}
4839
4840IO_NETOP_PREP_ASYNC(sendmsg);
4841IO_NETOP_PREP_ASYNC(recvmsg);
4842IO_NETOP_PREP_ASYNC(connect);
4843IO_NETOP_PREP(accept);
4844IO_NETOP_FN(send);
4845IO_NETOP_FN(recv);
469956e8 4846#endif /* CONFIG_NET */
f8e85cf2 4847
d7718a9d
JA
4848struct io_poll_table {
4849 struct poll_table_struct pt;
4850 struct io_kiocb *req;
68b11e8b 4851 int nr_entries;
d7718a9d
JA
4852 int error;
4853};
ce593a6c 4854
d7718a9d 4855static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
5b0a6acc 4856 __poll_t mask, io_req_tw_func_t func)
d7718a9d 4857{
d7718a9d
JA
4858 /* for instances that support it check for an event match first: */
4859 if (mask && !(mask & poll->events))
4860 return 0;
4861
4862 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4863
4864 list_del_init(&poll->wait.entry);
4865
d7718a9d 4866 req->result = mask;
5b0a6acc 4867 req->io_task_work.func = func;
6d816e08 4868
d7718a9d 4869 /*
e3aabf95
JA
4870 * If this fails, then the task is exiting. When a task exits, the
4871 * work gets canceled, so just cancel this request as well instead
4872 * of executing it. We can't safely execute it anyway, as we may not
4873 * have the needed state needed for it anyway.
d7718a9d 4874 */
e09ee510 4875 io_req_task_work_add(req);
d7718a9d
JA
4876 return 1;
4877}
4878
74ce6ce4
JA
4879static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4880 __acquires(&req->ctx->completion_lock)
4881{
4882 struct io_ring_ctx *ctx = req->ctx;
4883
e09ee510
PB
4884 if (unlikely(req->task->flags & PF_EXITING))
4885 WRITE_ONCE(poll->canceled, true);
4886
74ce6ce4
JA
4887 if (!req->result && !READ_ONCE(poll->canceled)) {
4888 struct poll_table_struct pt = { ._key = poll->events };
4889
4890 req->result = vfs_poll(req->file, &pt) & poll->events;
4891 }
4892
4893 spin_lock_irq(&ctx->completion_lock);
4894 if (!req->result && !READ_ONCE(poll->canceled)) {
4895 add_wait_queue(poll->head, &poll->wait);
4896 return true;
4897 }
4898
4899 return false;
4900}
4901
d4e7cd36 4902static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
18bceab1 4903{
e8c2bc1f 4904 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
d4e7cd36 4905 if (req->opcode == IORING_OP_POLL_ADD)
e8c2bc1f 4906 return req->async_data;
d4e7cd36
JA
4907 return req->apoll->double_poll;
4908}
4909
4910static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4911{
4912 if (req->opcode == IORING_OP_POLL_ADD)
4913 return &req->poll;
4914 return &req->apoll->poll;
4915}
4916
4917static void io_poll_remove_double(struct io_kiocb *req)
e07785b0 4918 __must_hold(&req->ctx->completion_lock)
d4e7cd36
JA
4919{
4920 struct io_poll_iocb *poll = io_poll_get_double(req);
18bceab1
JA
4921
4922 lockdep_assert_held(&req->ctx->completion_lock);
4923
4924 if (poll && poll->head) {
4925 struct wait_queue_head *head = poll->head;
4926
4927 spin_lock(&head->lock);
4928 list_del_init(&poll->wait.entry);
4929 if (poll->wait.private)
de9b4cca 4930 req_ref_put(req);
18bceab1
JA
4931 poll->head = NULL;
4932 spin_unlock(&head->lock);
4933 }
4934}
4935
e27414be 4936static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
e07785b0 4937 __must_hold(&req->ctx->completion_lock)
18bceab1
JA
4938{
4939 struct io_ring_ctx *ctx = req->ctx;
88e41cf9 4940 unsigned flags = IORING_CQE_F_MORE;
e27414be 4941 int error;
18bceab1 4942
e27414be 4943 if (READ_ONCE(req->poll.canceled)) {
45ab03b1 4944 error = -ECANCELED;
88e41cf9 4945 req->poll.events |= EPOLLONESHOT;
e27414be 4946 } else {
5082620f 4947 error = mangle_poll(mask);
e27414be 4948 }
b69de288
JA
4949 if (req->poll.events & EPOLLONESHOT)
4950 flags = 0;
d4d19c19 4951 if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
88e41cf9
JA
4952 req->poll.done = true;
4953 flags = 0;
4954 }
7b289c38
HX
4955 if (flags & IORING_CQE_F_MORE)
4956 ctx->cq_extra++;
18bceab1 4957
18bceab1 4958 io_commit_cqring(ctx);
88e41cf9 4959 return !(flags & IORING_CQE_F_MORE);
18bceab1
JA
4960}
4961
5b0a6acc 4962static void io_poll_task_func(struct io_kiocb *req)
18bceab1
JA
4963{
4964 struct io_ring_ctx *ctx = req->ctx;
dd221f46 4965 struct io_kiocb *nxt;
18bceab1
JA
4966
4967 if (io_poll_rewait(req, &req->poll)) {
4968 spin_unlock_irq(&ctx->completion_lock);
dd221f46 4969 } else {
f40b964a 4970 bool done;
18bceab1 4971
e27414be 4972 done = io_poll_complete(req, req->result);
88e41cf9 4973 if (done) {
a890d01e 4974 io_poll_remove_double(req);
88e41cf9 4975 hash_del(&req->hash_node);
f40b964a 4976 } else {
88e41cf9
JA
4977 req->result = 0;
4978 add_wait_queue(req->poll.head, &req->poll.wait);
4979 }
dd221f46 4980 spin_unlock_irq(&ctx->completion_lock);
dd221f46 4981 io_cqring_ev_posted(ctx);
18bceab1 4982
88e41cf9
JA
4983 if (done) {
4984 nxt = io_put_req_find_next(req);
4985 if (nxt)
5b0a6acc 4986 io_req_task_submit(nxt);
88e41cf9 4987 }
dd221f46 4988 }
18bceab1
JA
4989}
4990
4991static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4992 int sync, void *key)
4993{
4994 struct io_kiocb *req = wait->private;
d4e7cd36 4995 struct io_poll_iocb *poll = io_poll_get_single(req);
18bceab1
JA
4996 __poll_t mask = key_to_poll(key);
4997
4998 /* for instances that support it check for an event match first: */
4999 if (mask && !(mask & poll->events))
5000 return 0;
88e41cf9
JA
5001 if (!(poll->events & EPOLLONESHOT))
5002 return poll->wait.func(&poll->wait, mode, sync, key);
18bceab1 5003
8706e04e
JA
5004 list_del_init(&wait->entry);
5005
9ce85ef2 5006 if (poll->head) {
18bceab1
JA
5007 bool done;
5008
807abcb0
JA
5009 spin_lock(&poll->head->lock);
5010 done = list_empty(&poll->wait.entry);
18bceab1 5011 if (!done)
807abcb0 5012 list_del_init(&poll->wait.entry);
d4e7cd36
JA
5013 /* make sure double remove sees this as being gone */
5014 wait->private = NULL;
807abcb0 5015 spin_unlock(&poll->head->lock);
c8b5e260
JA
5016 if (!done) {
5017 /* use wait func handler, so it matches the rq type */
5018 poll->wait.func(&poll->wait, mode, sync, key);
5019 }
18bceab1 5020 }
de9b4cca 5021 req_ref_put(req);
18bceab1
JA
5022 return 1;
5023}
5024
5025static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5026 wait_queue_func_t wake_func)
5027{
5028 poll->head = NULL;
5029 poll->done = false;
5030 poll->canceled = false;
464dca61
JA
5031#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
5032 /* mask in events that we always want/need */
5033 poll->events = events | IO_POLL_UNMASK;
18bceab1
JA
5034 INIT_LIST_HEAD(&poll->wait.entry);
5035 init_waitqueue_func_entry(&poll->wait, wake_func);
5036}
5037
5038static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
807abcb0
JA
5039 struct wait_queue_head *head,
5040 struct io_poll_iocb **poll_ptr)
18bceab1
JA
5041{
5042 struct io_kiocb *req = pt->req;
5043
5044 /*
68b11e8b
PB
5045 * The file being polled uses multiple waitqueues for poll handling
5046 * (e.g. one for read, one for write). Setup a separate io_poll_iocb
5047 * if this happens.
18bceab1 5048 */
68b11e8b 5049 if (unlikely(pt->nr_entries)) {
58852d4d
PB
5050 struct io_poll_iocb *poll_one = poll;
5051
18bceab1 5052 /* already have a 2nd entry, fail a third attempt */
807abcb0 5053 if (*poll_ptr) {
18bceab1
JA
5054 pt->error = -EINVAL;
5055 return;
5056 }
ea6a693d
JA
5057 /*
5058 * Can't handle multishot for double wait for now, turn it
5059 * into one-shot mode.
5060 */
7a274727
PB
5061 if (!(poll_one->events & EPOLLONESHOT))
5062 poll_one->events |= EPOLLONESHOT;
1c3b3e65 5063 /* double add on the same waitqueue head, ignore */
7a274727 5064 if (poll_one->head == head)
1c3b3e65 5065 return;
18bceab1
JA
5066 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5067 if (!poll) {
5068 pt->error = -ENOMEM;
5069 return;
5070 }
58852d4d 5071 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
de9b4cca 5072 req_ref_get(req);
18bceab1 5073 poll->wait.private = req;
807abcb0 5074 *poll_ptr = poll;
18bceab1
JA
5075 }
5076
68b11e8b 5077 pt->nr_entries++;
18bceab1 5078 poll->head = head;
a31eb4a2
JX
5079
5080 if (poll->events & EPOLLEXCLUSIVE)
5081 add_wait_queue_exclusive(head, &poll->wait);
5082 else
5083 add_wait_queue(head, &poll->wait);
18bceab1
JA
5084}
5085
5086static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5087 struct poll_table_struct *p)
5088{
5089 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
807abcb0 5090 struct async_poll *apoll = pt->req->apoll;
18bceab1 5091
807abcb0 5092 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
18bceab1
JA
5093}
5094
5b0a6acc 5095static void io_async_task_func(struct io_kiocb *req)
d7718a9d 5096{
d7718a9d
JA
5097 struct async_poll *apoll = req->apoll;
5098 struct io_ring_ctx *ctx = req->ctx;
5099
236daeae 5100 trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data);
d7718a9d 5101
74ce6ce4 5102 if (io_poll_rewait(req, &apoll->poll)) {
d7718a9d 5103 spin_unlock_irq(&ctx->completion_lock);
74ce6ce4 5104 return;
d7718a9d
JA
5105 }
5106
0ea13b44 5107 hash_del(&req->hash_node);
d4e7cd36 5108 io_poll_remove_double(req);
74ce6ce4
JA
5109 spin_unlock_irq(&ctx->completion_lock);
5110
0be0b0e3 5111 if (!READ_ONCE(apoll->poll.canceled))
5b0a6acc 5112 io_req_task_submit(req);
0be0b0e3 5113 else
2593553a 5114 io_req_complete_failed(req, -ECANCELED);
d7718a9d
JA
5115}
5116
5117static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5118 void *key)
5119{
5120 struct io_kiocb *req = wait->private;
5121 struct io_poll_iocb *poll = &req->apoll->poll;
5122
5123 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5124 key_to_poll(key));
5125
5126 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5127}
5128
5129static void io_poll_req_insert(struct io_kiocb *req)
5130{
5131 struct io_ring_ctx *ctx = req->ctx;
5132 struct hlist_head *list;
5133
5134 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5135 hlist_add_head(&req->hash_node, list);
5136}
5137
5138static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5139 struct io_poll_iocb *poll,
5140 struct io_poll_table *ipt, __poll_t mask,
5141 wait_queue_func_t wake_func)
5142 __acquires(&ctx->completion_lock)
5143{
5144 struct io_ring_ctx *ctx = req->ctx;
5145 bool cancel = false;
5146
4d52f338 5147 INIT_HLIST_NODE(&req->hash_node);
18bceab1 5148 io_init_poll_iocb(poll, mask, wake_func);
b90cd197 5149 poll->file = req->file;
18bceab1 5150 poll->wait.private = req;
d7718a9d
JA
5151
5152 ipt->pt._key = mask;
5153 ipt->req = req;
68b11e8b
PB
5154 ipt->error = 0;
5155 ipt->nr_entries = 0;
d7718a9d 5156
d7718a9d 5157 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
68b11e8b
PB
5158 if (unlikely(!ipt->nr_entries) && !ipt->error)
5159 ipt->error = -EINVAL;
d7718a9d
JA
5160
5161 spin_lock_irq(&ctx->completion_lock);
a890d01e 5162 if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
46fee9ab 5163 io_poll_remove_double(req);
d7718a9d
JA
5164 if (likely(poll->head)) {
5165 spin_lock(&poll->head->lock);
5166 if (unlikely(list_empty(&poll->wait.entry))) {
5167 if (ipt->error)
5168 cancel = true;
5169 ipt->error = 0;
5170 mask = 0;
5171 }
88e41cf9 5172 if ((mask && (poll->events & EPOLLONESHOT)) || ipt->error)
d7718a9d
JA
5173 list_del_init(&poll->wait.entry);
5174 else if (cancel)
5175 WRITE_ONCE(poll->canceled, true);
5176 else if (!poll->done) /* actually waiting for an event */
5177 io_poll_req_insert(req);
5178 spin_unlock(&poll->head->lock);
5179 }
5180
5181 return mask;
5182}
5183
59b735ae
OL
5184enum {
5185 IO_APOLL_OK,
5186 IO_APOLL_ABORTED,
5187 IO_APOLL_READY
5188};
5189
5190static int io_arm_poll_handler(struct io_kiocb *req)
d7718a9d
JA
5191{
5192 const struct io_op_def *def = &io_op_defs[req->opcode];
5193 struct io_ring_ctx *ctx = req->ctx;
5194 struct async_poll *apoll;
5195 struct io_poll_table ipt;
b2d9c3da 5196 __poll_t ret, mask = EPOLLONESHOT | POLLERR | POLLPRI;
9dab14b8 5197 int rw;
d7718a9d
JA
5198
5199 if (!req->file || !file_can_poll(req->file))
59b735ae 5200 return IO_APOLL_ABORTED;
24c74678 5201 if (req->flags & REQ_F_POLLED)
59b735ae 5202 return IO_APOLL_ABORTED;
b2d9c3da
PB
5203 if (!def->pollin && !def->pollout)
5204 return IO_APOLL_ABORTED;
5205
5206 if (def->pollin) {
9dab14b8 5207 rw = READ;
b2d9c3da
PB
5208 mask |= POLLIN | POLLRDNORM;
5209
5210 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5211 if ((req->opcode == IORING_OP_RECVMSG) &&
5212 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5213 mask &= ~POLLIN;
5214 } else {
9dab14b8 5215 rw = WRITE;
b2d9c3da
PB
5216 mask |= POLLOUT | POLLWRNORM;
5217 }
5218
9dab14b8 5219 /* if we can't nonblock try, then no point in arming a poll handler */
b191e2df 5220 if (!io_file_supports_nowait(req, rw))
59b735ae 5221 return IO_APOLL_ABORTED;
d7718a9d
JA
5222
5223 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5224 if (unlikely(!apoll))
59b735ae 5225 return IO_APOLL_ABORTED;
807abcb0 5226 apoll->double_poll = NULL;
d7718a9d 5227 req->apoll = apoll;
b2d9c3da 5228 req->flags |= REQ_F_POLLED;
d7718a9d
JA
5229 ipt.pt._qproc = io_async_queue_proc;
5230
5231 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5232 io_async_wake);
a36da65c 5233 if (ret || ipt.error) {
d7718a9d 5234 spin_unlock_irq(&ctx->completion_lock);
59b735ae
OL
5235 if (ret)
5236 return IO_APOLL_READY;
5237 return IO_APOLL_ABORTED;
d7718a9d
JA
5238 }
5239 spin_unlock_irq(&ctx->completion_lock);
236daeae
OL
5240 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
5241 mask, apoll->poll.events);
59b735ae 5242 return IO_APOLL_OK;
d7718a9d
JA
5243}
5244
5245static bool __io_poll_remove_one(struct io_kiocb *req,
b2e720ac 5246 struct io_poll_iocb *poll, bool do_cancel)
e07785b0 5247 __must_hold(&req->ctx->completion_lock)
221c5eb2 5248{
b41e9852 5249 bool do_complete = false;
221c5eb2 5250
5082620f
JA
5251 if (!poll->head)
5252 return false;
221c5eb2 5253 spin_lock(&poll->head->lock);
b2e720ac
JA
5254 if (do_cancel)
5255 WRITE_ONCE(poll->canceled, true);
392edb45
JA
5256 if (!list_empty(&poll->wait.entry)) {
5257 list_del_init(&poll->wait.entry);
b41e9852 5258 do_complete = true;
221c5eb2
JA
5259 }
5260 spin_unlock(&poll->head->lock);
3bfa5bcb 5261 hash_del(&req->hash_node);
d7718a9d
JA
5262 return do_complete;
5263}
5264
b2c3f7e1 5265static bool io_poll_remove_waitqs(struct io_kiocb *req)
e07785b0 5266 __must_hold(&req->ctx->completion_lock)
d7718a9d
JA
5267{
5268 bool do_complete;
5269
d4e7cd36 5270 io_poll_remove_double(req);
e31001a3 5271 do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
d4e7cd36 5272
e31001a3 5273 if (req->opcode != IORING_OP_POLL_ADD && do_complete) {
d7718a9d 5274 /* non-poll requests have submit ref still */
e31001a3 5275 req_ref_put(req);
b1f573bd 5276 }
b2c3f7e1
JA
5277 return do_complete;
5278}
5279
5280static bool io_poll_remove_one(struct io_kiocb *req)
e07785b0 5281 __must_hold(&req->ctx->completion_lock)
b2c3f7e1
JA
5282{
5283 bool do_complete;
b1f573bd 5284
b2c3f7e1 5285 do_complete = io_poll_remove_waitqs(req);
b41e9852 5286 if (do_complete) {
d4d19c19 5287 io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
b41e9852 5288 io_commit_cqring(req->ctx);
93d2bcd2 5289 req_set_fail(req);
216578e5 5290 io_put_req_deferred(req, 1);
b41e9852
JA
5291 }
5292
5293 return do_complete;
221c5eb2
JA
5294}
5295
76e1b642
JA
5296/*
5297 * Returns true if we found and killed one or more poll requests
5298 */
6b81928d 5299static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
3dd0c97a 5300 bool cancel_all)
221c5eb2 5301{
78076bb6 5302 struct hlist_node *tmp;
221c5eb2 5303 struct io_kiocb *req;
8e2e1faf 5304 int posted = 0, i;
221c5eb2
JA
5305
5306 spin_lock_irq(&ctx->completion_lock);
78076bb6
JA
5307 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5308 struct hlist_head *list;
5309
5310 list = &ctx->cancel_hash[i];
f3606e3a 5311 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
3dd0c97a 5312 if (io_match_task(req, tsk, cancel_all))
f3606e3a
JA
5313 posted += io_poll_remove_one(req);
5314 }
221c5eb2
JA
5315 }
5316 spin_unlock_irq(&ctx->completion_lock);
b41e9852 5317
8e2e1faf
JA
5318 if (posted)
5319 io_cqring_ev_posted(ctx);
76e1b642
JA
5320
5321 return posted != 0;
221c5eb2
JA
5322}
5323
9ba5fac8
PB
5324static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
5325 bool poll_only)
e07785b0 5326 __must_hold(&ctx->completion_lock)
47f46768 5327{
78076bb6 5328 struct hlist_head *list;
47f46768
JA
5329 struct io_kiocb *req;
5330
78076bb6
JA
5331 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5332 hlist_for_each_entry(req, list, hash_node) {
b41e9852
JA
5333 if (sqe_addr != req->user_data)
5334 continue;
9ba5fac8
PB
5335 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
5336 continue;
b2cb805f 5337 return req;
47f46768 5338 }
b2cb805f
JA
5339 return NULL;
5340}
5341
9ba5fac8
PB
5342static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
5343 bool poll_only)
e07785b0 5344 __must_hold(&ctx->completion_lock)
b2cb805f
JA
5345{
5346 struct io_kiocb *req;
5347
9ba5fac8 5348 req = io_poll_find(ctx, sqe_addr, poll_only);
b2cb805f
JA
5349 if (!req)
5350 return -ENOENT;
5351 if (io_poll_remove_one(req))
5352 return 0;
5353
5354 return -EALREADY;
47f46768
JA
5355}
5356
9096af3e
PB
5357static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
5358 unsigned int flags)
5359{
5360 u32 events;
47f46768 5361
9096af3e
PB
5362 events = READ_ONCE(sqe->poll32_events);
5363#ifdef __BIG_ENDIAN
5364 events = swahw32(events);
5365#endif
5366 if (!(flags & IORING_POLL_ADD_MULTI))
5367 events |= EPOLLONESHOT;
5368 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
47f46768
JA
5369}
5370
c5de0036 5371static int io_poll_update_prep(struct io_kiocb *req,
3529d8c2 5372 const struct io_uring_sqe *sqe)
0969e783 5373{
c5de0036
PB
5374 struct io_poll_update *upd = &req->poll_update;
5375 u32 flags;
5376
0969e783
JA
5377 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5378 return -EINVAL;
c5de0036
PB
5379 if (sqe->ioprio || sqe->buf_index)
5380 return -EINVAL;
5381 flags = READ_ONCE(sqe->len);
5382 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
5383 IORING_POLL_ADD_MULTI))
5384 return -EINVAL;
5385 /* meaningless without update */
5386 if (flags == IORING_POLL_ADD_MULTI)
0969e783
JA
5387 return -EINVAL;
5388
c5de0036
PB
5389 upd->old_user_data = READ_ONCE(sqe->addr);
5390 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
5391 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
221c5eb2 5392
c5de0036
PB
5393 upd->new_user_data = READ_ONCE(sqe->off);
5394 if (!upd->update_user_data && upd->new_user_data)
5395 return -EINVAL;
5396 if (upd->update_events)
5397 upd->events = io_poll_parse_events(sqe, flags);
5398 else if (sqe->poll32_events)
5399 return -EINVAL;
221c5eb2 5400
221c5eb2
JA
5401 return 0;
5402}
5403
221c5eb2
JA
5404static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5405 void *key)
5406{
c2f2eb7d
JA
5407 struct io_kiocb *req = wait->private;
5408 struct io_poll_iocb *poll = &req->poll;
221c5eb2 5409
d7718a9d 5410 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
221c5eb2
JA
5411}
5412
221c5eb2
JA
5413static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5414 struct poll_table_struct *p)
5415{
5416 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5417
e8c2bc1f 5418 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
eac406c6
JA
5419}
5420
3529d8c2 5421static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
5422{
5423 struct io_poll_iocb *poll = &req->poll;
c5de0036 5424 u32 flags;
221c5eb2
JA
5425
5426 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5427 return -EINVAL;
c5de0036 5428 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
88e41cf9
JA
5429 return -EINVAL;
5430 flags = READ_ONCE(sqe->len);
c5de0036 5431 if (flags & ~IORING_POLL_ADD_MULTI)
221c5eb2
JA
5432 return -EINVAL;
5433
c5de0036 5434 poll->events = io_poll_parse_events(sqe, flags);
0969e783
JA
5435 return 0;
5436}
5437
61e98203 5438static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
0969e783
JA
5439{
5440 struct io_poll_iocb *poll = &req->poll;
5441 struct io_ring_ctx *ctx = req->ctx;
5442 struct io_poll_table ipt;
0969e783 5443 __poll_t mask;
0969e783 5444
d7718a9d 5445 ipt.pt._qproc = io_poll_queue_proc;
36703247 5446
d7718a9d
JA
5447 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5448 io_poll_wake);
221c5eb2 5449
8c838788 5450 if (mask) { /* no async, we'd stolen it */
221c5eb2 5451 ipt.error = 0;
e27414be 5452 io_poll_complete(req, mask);
221c5eb2 5453 }
221c5eb2
JA
5454 spin_unlock_irq(&ctx->completion_lock);
5455
8c838788
JA
5456 if (mask) {
5457 io_cqring_ev_posted(ctx);
88e41cf9
JA
5458 if (poll->events & EPOLLONESHOT)
5459 io_put_req(req);
221c5eb2 5460 }
8c838788 5461 return ipt.error;
221c5eb2
JA
5462}
5463
c5de0036 5464static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
b69de288
JA
5465{
5466 struct io_ring_ctx *ctx = req->ctx;
5467 struct io_kiocb *preq;
cb3b200e 5468 bool completing;
b69de288
JA
5469 int ret;
5470
5471 spin_lock_irq(&ctx->completion_lock);
9ba5fac8 5472 preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
b69de288
JA
5473 if (!preq) {
5474 ret = -ENOENT;
5475 goto err;
b69de288 5476 }
cb3b200e 5477
c5de0036
PB
5478 if (!req->poll_update.update_events && !req->poll_update.update_user_data) {
5479 completing = true;
5480 ret = io_poll_remove_one(preq) ? 0 : -EALREADY;
5481 goto err;
5482 }
5483
cb3b200e
JA
5484 /*
5485 * Don't allow racy completion with singleshot, as we cannot safely
5486 * update those. For multishot, if we're racing with completion, just
5487 * let completion re-add it.
5488 */
5489 completing = !__io_poll_remove_one(preq, &preq->poll, false);
5490 if (completing && (preq->poll.events & EPOLLONESHOT)) {
5491 ret = -EALREADY;
5492 goto err;
b69de288
JA
5493 }
5494 /* we now have a detached poll request. reissue. */
5495 ret = 0;
5496err:
b69de288 5497 if (ret < 0) {
cb3b200e 5498 spin_unlock_irq(&ctx->completion_lock);
93d2bcd2 5499 req_set_fail(req);
b69de288
JA
5500 io_req_complete(req, ret);
5501 return 0;
5502 }
5503 /* only mask one event flags, keep behavior flags */
9d805892 5504 if (req->poll_update.update_events) {
b69de288 5505 preq->poll.events &= ~0xffff;
9d805892 5506 preq->poll.events |= req->poll_update.events & 0xffff;
b69de288
JA
5507 preq->poll.events |= IO_POLL_UNMASK;
5508 }
9d805892
PB
5509 if (req->poll_update.update_user_data)
5510 preq->user_data = req->poll_update.new_user_data;
cb3b200e
JA
5511 spin_unlock_irq(&ctx->completion_lock);
5512
b69de288
JA
5513 /* complete update request, we're done with it */
5514 io_req_complete(req, ret);
5515
cb3b200e 5516 if (!completing) {
c5de0036 5517 ret = io_poll_add(preq, issue_flags);
cb3b200e 5518 if (ret < 0) {
93d2bcd2 5519 req_set_fail(preq);
cb3b200e
JA
5520 io_req_complete(preq, ret);
5521 }
b69de288
JA
5522 }
5523 return 0;
5524}
5525
5262f567
JA
5526static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5527{
ad8a48ac
JA
5528 struct io_timeout_data *data = container_of(timer,
5529 struct io_timeout_data, timer);
5530 struct io_kiocb *req = data->req;
5531 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
5532 unsigned long flags;
5533
5262f567 5534 spin_lock_irqsave(&ctx->completion_lock, flags);
a71976f3 5535 list_del_init(&req->timeout.list);
01cec8c1
PB
5536 atomic_set(&req->ctx->cq_timeouts,
5537 atomic_read(&req->ctx->cq_timeouts) + 1);
5538
d4d19c19 5539 io_cqring_fill_event(ctx, req->user_data, -ETIME, 0);
5262f567
JA
5540 io_commit_cqring(ctx);
5541 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5542
5543 io_cqring_ev_posted(ctx);
93d2bcd2 5544 req_set_fail(req);
5262f567
JA
5545 io_put_req(req);
5546 return HRTIMER_NORESTART;
5547}
5548
fbd15848
PB
5549static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5550 __u64 user_data)
e07785b0 5551 __must_hold(&ctx->completion_lock)
f254ac04 5552{
fbd15848 5553 struct io_timeout_data *io;
47f46768 5554 struct io_kiocb *req;
fd9c7bc5 5555 bool found = false;
f254ac04 5556
135fcde8 5557 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
fd9c7bc5
PB
5558 found = user_data == req->user_data;
5559 if (found)
47f46768 5560 break;
47f46768 5561 }
fd9c7bc5
PB
5562 if (!found)
5563 return ERR_PTR(-ENOENT);
fbd15848
PB
5564
5565 io = req->async_data;
fd9c7bc5 5566 if (hrtimer_try_to_cancel(&io->timer) == -1)
fbd15848 5567 return ERR_PTR(-EALREADY);
a71976f3 5568 list_del_init(&req->timeout.list);
fbd15848
PB
5569 return req;
5570}
47f46768 5571
fbd15848 5572static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
e07785b0 5573 __must_hold(&ctx->completion_lock)
fbd15848
PB
5574{
5575 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5576
5577 if (IS_ERR(req))
5578 return PTR_ERR(req);
f254ac04 5579
93d2bcd2 5580 req_set_fail(req);
d4d19c19 5581 io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
216578e5 5582 io_put_req_deferred(req, 1);
f254ac04
JA
5583 return 0;
5584}
5585
9c8e11b3
PB
5586static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5587 struct timespec64 *ts, enum hrtimer_mode mode)
e07785b0 5588 __must_hold(&ctx->completion_lock)
47f46768 5589{
9c8e11b3
PB
5590 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5591 struct io_timeout_data *data;
47f46768 5592
9c8e11b3
PB
5593 if (IS_ERR(req))
5594 return PTR_ERR(req);
47f46768 5595
9c8e11b3
PB
5596 req->timeout.off = 0; /* noseq */
5597 data = req->async_data;
5598 list_add_tail(&req->timeout.list, &ctx->timeout_list);
5599 hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5600 data->timer.function = io_timeout_fn;
5601 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5602 return 0;
47f46768
JA
5603}
5604
3529d8c2
JA
5605static int io_timeout_remove_prep(struct io_kiocb *req,
5606 const struct io_uring_sqe *sqe)
b29472ee 5607{
9c8e11b3
PB
5608 struct io_timeout_rem *tr = &req->timeout_rem;
5609
b29472ee
JA
5610 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5611 return -EINVAL;
61710e43
DA
5612 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5613 return -EINVAL;
9c8e11b3 5614 if (sqe->ioprio || sqe->buf_index || sqe->len)
b29472ee
JA
5615 return -EINVAL;
5616
9c8e11b3
PB
5617 tr->addr = READ_ONCE(sqe->addr);
5618 tr->flags = READ_ONCE(sqe->timeout_flags);
5619 if (tr->flags & IORING_TIMEOUT_UPDATE) {
5620 if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5621 return -EINVAL;
5622 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5623 return -EFAULT;
5624 } else if (tr->flags) {
5625 /* timeout removal doesn't support flags */
b29472ee 5626 return -EINVAL;
9c8e11b3 5627 }
b29472ee 5628
b29472ee
JA
5629 return 0;
5630}
5631
8662daec
PB
5632static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5633{
5634 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5635 : HRTIMER_MODE_REL;
5636}
5637
11365043
JA
5638/*
5639 * Remove or update an existing timeout command
5640 */
61e98203 5641static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
11365043 5642{
9c8e11b3 5643 struct io_timeout_rem *tr = &req->timeout_rem;
11365043 5644 struct io_ring_ctx *ctx = req->ctx;
47f46768 5645 int ret;
11365043 5646
11365043 5647 spin_lock_irq(&ctx->completion_lock);
8662daec 5648 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
9c8e11b3 5649 ret = io_timeout_cancel(ctx, tr->addr);
8662daec
PB
5650 else
5651 ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5652 io_translate_timeout_mode(tr->flags));
11365043 5653
d4d19c19 5654 io_cqring_fill_event(ctx, req->user_data, ret, 0);
11365043
JA
5655 io_commit_cqring(ctx);
5656 spin_unlock_irq(&ctx->completion_lock);
5262f567 5657 io_cqring_ev_posted(ctx);
4e88d6e7 5658 if (ret < 0)
93d2bcd2 5659 req_set_fail(req);
ec9c02ad 5660 io_put_req(req);
11365043 5661 return 0;
5262f567
JA
5662}
5663
3529d8c2 5664static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2d28390a 5665 bool is_timeout_link)
5262f567 5666{
ad8a48ac 5667 struct io_timeout_data *data;
a41525ab 5668 unsigned flags;
56080b02 5669 u32 off = READ_ONCE(sqe->off);
5262f567 5670
ad8a48ac 5671 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5262f567 5672 return -EINVAL;
ad8a48ac 5673 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
a41525ab 5674 return -EINVAL;
56080b02 5675 if (off && is_timeout_link)
2d28390a 5676 return -EINVAL;
a41525ab
JA
5677 flags = READ_ONCE(sqe->timeout_flags);
5678 if (flags & ~IORING_TIMEOUT_ABS)
5262f567 5679 return -EINVAL;
bdf20073 5680
bfe68a22 5681 req->timeout.off = off;
f18ee4cf
PB
5682 if (unlikely(off && !req->ctx->off_timeout_used))
5683 req->ctx->off_timeout_used = true;
26a61679 5684
e8c2bc1f 5685 if (!req->async_data && io_alloc_async_data(req))
26a61679
JA
5686 return -ENOMEM;
5687
e8c2bc1f 5688 data = req->async_data;
ad8a48ac 5689 data->req = req;
ad8a48ac
JA
5690
5691 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
5692 return -EFAULT;
5693
8662daec 5694 data->mode = io_translate_timeout_mode(flags);
ad8a48ac 5695 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
2482b58f
PB
5696 if (is_timeout_link)
5697 io_req_track_inflight(req);
ad8a48ac
JA
5698 return 0;
5699}
5700
61e98203 5701static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
ad8a48ac 5702{
ad8a48ac 5703 struct io_ring_ctx *ctx = req->ctx;
e8c2bc1f 5704 struct io_timeout_data *data = req->async_data;
ad8a48ac 5705 struct list_head *entry;
bfe68a22 5706 u32 tail, off = req->timeout.off;
ad8a48ac 5707
733f5c95 5708 spin_lock_irq(&ctx->completion_lock);
93bd25bb 5709
5262f567
JA
5710 /*
5711 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
5712 * timeout event to be satisfied. If it isn't set, then this is
5713 * a pure timeout request, sequence isn't used.
5262f567 5714 */
8eb7e2d0 5715 if (io_is_timeout_noseq(req)) {
93bd25bb
JA
5716 entry = ctx->timeout_list.prev;
5717 goto add;
5718 }
5262f567 5719
bfe68a22
PB
5720 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5721 req->timeout.target_seq = tail + off;
5262f567 5722
f010505b
MDG
5723 /* Update the last seq here in case io_flush_timeouts() hasn't.
5724 * This is safe because ->completion_lock is held, and submissions
5725 * and completions are never mixed in the same ->completion_lock section.
5726 */
5727 ctx->cq_last_tm_flush = tail;
5728
5262f567
JA
5729 /*
5730 * Insertion sort, ensuring the first entry in the list is always
5731 * the one we need first.
5732 */
5262f567 5733 list_for_each_prev(entry, &ctx->timeout_list) {
135fcde8
PB
5734 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5735 timeout.list);
5262f567 5736
8eb7e2d0 5737 if (io_is_timeout_noseq(nxt))
93bd25bb 5738 continue;
bfe68a22
PB
5739 /* nxt.seq is behind @tail, otherwise would've been completed */
5740 if (off >= nxt->timeout.target_seq - tail)
5262f567
JA
5741 break;
5742 }
93bd25bb 5743add:
135fcde8 5744 list_add(&req->timeout.list, entry);
ad8a48ac
JA
5745 data->timer.function = io_timeout_fn;
5746 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5262f567 5747 spin_unlock_irq(&ctx->completion_lock);
5262f567
JA
5748 return 0;
5749}
5262f567 5750
f458dd84
PB
5751struct io_cancel_data {
5752 struct io_ring_ctx *ctx;
5753 u64 user_data;
5754};
5755
62755e35
JA
5756static bool io_cancel_cb(struct io_wq_work *work, void *data)
5757{
5758 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
f458dd84 5759 struct io_cancel_data *cd = data;
62755e35 5760
f458dd84 5761 return req->ctx == cd->ctx && req->user_data == cd->user_data;
62755e35
JA
5762}
5763
f458dd84
PB
5764static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
5765 struct io_ring_ctx *ctx)
62755e35 5766{
f458dd84 5767 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
62755e35 5768 enum io_wq_cancel cancel_ret;
62755e35
JA
5769 int ret = 0;
5770
f458dd84 5771 if (!tctx || !tctx->io_wq)
5aa75ed5
JA
5772 return -ENOENT;
5773
f458dd84 5774 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
62755e35
JA
5775 switch (cancel_ret) {
5776 case IO_WQ_CANCEL_OK:
5777 ret = 0;
5778 break;
5779 case IO_WQ_CANCEL_RUNNING:
5780 ret = -EALREADY;
5781 break;
5782 case IO_WQ_CANCEL_NOTFOUND:
5783 ret = -ENOENT;
5784 break;
5785 }
5786
e977d6d3
JA
5787 return ret;
5788}
5789
47f46768
JA
5790static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5791 struct io_kiocb *req, __u64 sqe_addr,
014db007 5792 int success_ret)
47f46768
JA
5793{
5794 unsigned long flags;
5795 int ret;
5796
f458dd84 5797 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
47f46768 5798 spin_lock_irqsave(&ctx->completion_lock, flags);
df9727af
PB
5799 if (ret != -ENOENT)
5800 goto done;
47f46768
JA
5801 ret = io_timeout_cancel(ctx, sqe_addr);
5802 if (ret != -ENOENT)
5803 goto done;
9ba5fac8 5804 ret = io_poll_cancel(ctx, sqe_addr, false);
47f46768 5805done:
b0dd8a41
JA
5806 if (!ret)
5807 ret = success_ret;
d4d19c19 5808 io_cqring_fill_event(ctx, req->user_data, ret, 0);
47f46768
JA
5809 io_commit_cqring(ctx);
5810 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5811 io_cqring_ev_posted(ctx);
5812
4e88d6e7 5813 if (ret < 0)
93d2bcd2 5814 req_set_fail(req);
47f46768
JA
5815}
5816
3529d8c2
JA
5817static int io_async_cancel_prep(struct io_kiocb *req,
5818 const struct io_uring_sqe *sqe)
e977d6d3 5819{
fbf23849 5820 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
e977d6d3 5821 return -EINVAL;
61710e43
DA
5822 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5823 return -EINVAL;
5824 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
e977d6d3
JA
5825 return -EINVAL;
5826
fbf23849
JA
5827 req->cancel.addr = READ_ONCE(sqe->addr);
5828 return 0;
5829}
5830
61e98203 5831static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
fbf23849
JA
5832{
5833 struct io_ring_ctx *ctx = req->ctx;
58f99373
PB
5834 u64 sqe_addr = req->cancel.addr;
5835 struct io_tctx_node *node;
5836 int ret;
5837
5838 /* tasks should wait for their io-wq threads, so safe w/o sync */
5839 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
5840 spin_lock_irq(&ctx->completion_lock);
5841 if (ret != -ENOENT)
5842 goto done;
5843 ret = io_timeout_cancel(ctx, sqe_addr);
5844 if (ret != -ENOENT)
5845 goto done;
9ba5fac8 5846 ret = io_poll_cancel(ctx, sqe_addr, false);
58f99373
PB
5847 if (ret != -ENOENT)
5848 goto done;
5849 spin_unlock_irq(&ctx->completion_lock);
5850
5851 /* slow path, try all io-wq's */
5852 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5853 ret = -ENOENT;
5854 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
5855 struct io_uring_task *tctx = node->task->io_uring;
fbf23849 5856
58f99373
PB
5857 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
5858 if (ret != -ENOENT)
5859 break;
5860 }
5861 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5862
5863 spin_lock_irq(&ctx->completion_lock);
5864done:
d4d19c19 5865 io_cqring_fill_event(ctx, req->user_data, ret, 0);
58f99373
PB
5866 io_commit_cqring(ctx);
5867 spin_unlock_irq(&ctx->completion_lock);
5868 io_cqring_ev_posted(ctx);
5869
5870 if (ret < 0)
93d2bcd2 5871 req_set_fail(req);
58f99373 5872 io_put_req(req);
5262f567
JA
5873 return 0;
5874}
5875
269bbe5f 5876static int io_rsrc_update_prep(struct io_kiocb *req,
05f3fb3c
JA
5877 const struct io_uring_sqe *sqe)
5878{
61710e43
DA
5879 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5880 return -EINVAL;
5881 if (sqe->ioprio || sqe->rw_flags)
05f3fb3c
JA
5882 return -EINVAL;
5883
269bbe5f
BM
5884 req->rsrc_update.offset = READ_ONCE(sqe->off);
5885 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
5886 if (!req->rsrc_update.nr_args)
05f3fb3c 5887 return -EINVAL;
269bbe5f 5888 req->rsrc_update.arg = READ_ONCE(sqe->addr);
05f3fb3c
JA
5889 return 0;
5890}
5891
889fca73 5892static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
fbf23849
JA
5893{
5894 struct io_ring_ctx *ctx = req->ctx;
c3bdad02 5895 struct io_uring_rsrc_update2 up;
05f3fb3c 5896 int ret;
fbf23849 5897
45d189c6 5898 if (issue_flags & IO_URING_F_NONBLOCK)
05f3fb3c 5899 return -EAGAIN;
05f3fb3c 5900
269bbe5f
BM
5901 up.offset = req->rsrc_update.offset;
5902 up.data = req->rsrc_update.arg;
c3bdad02
PB
5903 up.nr = 0;
5904 up.tags = 0;
615cee49 5905 up.resv = 0;
05f3fb3c
JA
5906
5907 mutex_lock(&ctx->uring_lock);
fdecb662 5908 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
98f0b3b4 5909 &up, req->rsrc_update.nr_args);
05f3fb3c
JA
5910 mutex_unlock(&ctx->uring_lock);
5911
5912 if (ret < 0)
93d2bcd2 5913 req_set_fail(req);
889fca73 5914 __io_req_complete(req, issue_flags, ret, 0);
5262f567
JA
5915 return 0;
5916}
5917
bfe76559 5918static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1 5919{
d625c6ee 5920 switch (req->opcode) {
e781573e 5921 case IORING_OP_NOP:
bfe76559 5922 return 0;
f67676d1
JA
5923 case IORING_OP_READV:
5924 case IORING_OP_READ_FIXED:
3a6820f2 5925 case IORING_OP_READ:
bfe76559 5926 return io_read_prep(req, sqe);
f67676d1
JA
5927 case IORING_OP_WRITEV:
5928 case IORING_OP_WRITE_FIXED:
3a6820f2 5929 case IORING_OP_WRITE:
bfe76559 5930 return io_write_prep(req, sqe);
0969e783 5931 case IORING_OP_POLL_ADD:
bfe76559 5932 return io_poll_add_prep(req, sqe);
0969e783 5933 case IORING_OP_POLL_REMOVE:
c5de0036 5934 return io_poll_update_prep(req, sqe);
8ed8d3c3 5935 case IORING_OP_FSYNC:
1155c76a 5936 return io_fsync_prep(req, sqe);
8ed8d3c3 5937 case IORING_OP_SYNC_FILE_RANGE:
1155c76a 5938 return io_sfr_prep(req, sqe);
03b1230c 5939 case IORING_OP_SENDMSG:
fddaface 5940 case IORING_OP_SEND:
bfe76559 5941 return io_sendmsg_prep(req, sqe);
03b1230c 5942 case IORING_OP_RECVMSG:
fddaface 5943 case IORING_OP_RECV:
bfe76559 5944 return io_recvmsg_prep(req, sqe);
f499a021 5945 case IORING_OP_CONNECT:
bfe76559 5946 return io_connect_prep(req, sqe);
2d28390a 5947 case IORING_OP_TIMEOUT:
bfe76559 5948 return io_timeout_prep(req, sqe, false);
b29472ee 5949 case IORING_OP_TIMEOUT_REMOVE:
bfe76559 5950 return io_timeout_remove_prep(req, sqe);
fbf23849 5951 case IORING_OP_ASYNC_CANCEL:
bfe76559 5952 return io_async_cancel_prep(req, sqe);
2d28390a 5953 case IORING_OP_LINK_TIMEOUT:
bfe76559 5954 return io_timeout_prep(req, sqe, true);
8ed8d3c3 5955 case IORING_OP_ACCEPT:
bfe76559 5956 return io_accept_prep(req, sqe);
d63d1b5e 5957 case IORING_OP_FALLOCATE:
bfe76559 5958 return io_fallocate_prep(req, sqe);
15b71abe 5959 case IORING_OP_OPENAT:
bfe76559 5960 return io_openat_prep(req, sqe);
b5dba59e 5961 case IORING_OP_CLOSE:
bfe76559 5962 return io_close_prep(req, sqe);
05f3fb3c 5963 case IORING_OP_FILES_UPDATE:
269bbe5f 5964 return io_rsrc_update_prep(req, sqe);
eddc7ef5 5965 case IORING_OP_STATX:
bfe76559 5966 return io_statx_prep(req, sqe);
4840e418 5967 case IORING_OP_FADVISE:
bfe76559 5968 return io_fadvise_prep(req, sqe);
c1ca757b 5969 case IORING_OP_MADVISE:
bfe76559 5970 return io_madvise_prep(req, sqe);
cebdb986 5971 case IORING_OP_OPENAT2:
bfe76559 5972 return io_openat2_prep(req, sqe);
3e4827b0 5973 case IORING_OP_EPOLL_CTL:
bfe76559 5974 return io_epoll_ctl_prep(req, sqe);
7d67af2c 5975 case IORING_OP_SPLICE:
bfe76559 5976 return io_splice_prep(req, sqe);
ddf0322d 5977 case IORING_OP_PROVIDE_BUFFERS:
bfe76559 5978 return io_provide_buffers_prep(req, sqe);
067524e9 5979 case IORING_OP_REMOVE_BUFFERS:
bfe76559 5980 return io_remove_buffers_prep(req, sqe);
f2a8d5c7 5981 case IORING_OP_TEE:
bfe76559 5982 return io_tee_prep(req, sqe);
36f4fa68
JA
5983 case IORING_OP_SHUTDOWN:
5984 return io_shutdown_prep(req, sqe);
80a261fd
JA
5985 case IORING_OP_RENAMEAT:
5986 return io_renameat_prep(req, sqe);
14a1143b
JA
5987 case IORING_OP_UNLINKAT:
5988 return io_unlinkat_prep(req, sqe);
f67676d1
JA
5989 }
5990
bfe76559
PB
5991 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5992 req->opcode);
bd54b6fe 5993 return -EINVAL;
bfe76559
PB
5994}
5995
93642ef8 5996static int io_req_prep_async(struct io_kiocb *req)
bfe76559 5997{
b7e298d2
PB
5998 if (!io_op_defs[req->opcode].needs_async_setup)
5999 return 0;
6000 if (WARN_ON_ONCE(req->async_data))
6001 return -EFAULT;
6002 if (io_alloc_async_data(req))
6003 return -EAGAIN;
6004
93642ef8
PB
6005 switch (req->opcode) {
6006 case IORING_OP_READV:
93642ef8
PB
6007 return io_rw_prep_async(req, READ);
6008 case IORING_OP_WRITEV:
93642ef8
PB
6009 return io_rw_prep_async(req, WRITE);
6010 case IORING_OP_SENDMSG:
93642ef8
PB
6011 return io_sendmsg_prep_async(req);
6012 case IORING_OP_RECVMSG:
93642ef8
PB
6013 return io_recvmsg_prep_async(req);
6014 case IORING_OP_CONNECT:
6015 return io_connect_prep_async(req);
6016 }
b7e298d2
PB
6017 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
6018 req->opcode);
6019 return -EFAULT;
f67676d1
JA
6020}
6021
9cf7c104
PB
6022static u32 io_get_sequence(struct io_kiocb *req)
6023{
a3dbdf54 6024 u32 seq = req->ctx->cached_sq_head;
9cf7c104 6025
a3dbdf54
PB
6026 /* need original cached_sq_head, but it was increased for each req */
6027 io_for_each_link(req, req)
6028 seq--;
6029 return seq;
9cf7c104
PB
6030}
6031
76cc33d7 6032static bool io_drain_req(struct io_kiocb *req)
de0617e4 6033{
3c19966d 6034 struct io_kiocb *pos;
a197f664 6035 struct io_ring_ctx *ctx = req->ctx;
27dc8338 6036 struct io_defer_entry *de;
f67676d1 6037 int ret;
9cf7c104 6038 u32 seq;
de0617e4 6039
3c19966d
PB
6040 /*
6041 * If we need to drain a request in the middle of a link, drain the
6042 * head request and the next request/link after the current link.
6043 * Considering sequential execution of links, IOSQE_IO_DRAIN will be
6044 * maintained for every request of our link.
6045 */
6046 if (ctx->drain_next) {
6047 req->flags |= REQ_F_IO_DRAIN;
6048 ctx->drain_next = false;
6049 }
6050 /* not interested in head, start from the first linked */
6051 io_for_each_link(pos, req->link) {
6052 if (pos->flags & REQ_F_IO_DRAIN) {
6053 ctx->drain_next = true;
6054 req->flags |= REQ_F_IO_DRAIN;
6055 break;
6056 }
6057 }
6058
9d858b21 6059 /* Still need defer if there is pending req in defer list. */
9cf7c104 6060 if (likely(list_empty_careful(&ctx->defer_list) &&
10c66904
PB
6061 !(req->flags & REQ_F_IO_DRAIN))) {
6062 ctx->drain_active = false;
76cc33d7 6063 return false;
10c66904 6064 }
9cf7c104
PB
6065
6066 seq = io_get_sequence(req);
6067 /* Still a chance to pass the sequence check */
6068 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
76cc33d7 6069 return false;
de0617e4 6070
b7e298d2 6071 ret = io_req_prep_async(req);
be7053b7 6072 if (ret)
1b48773f 6073 goto fail;
cbdcb435 6074 io_prep_async_link(req);
27dc8338 6075 de = kmalloc(sizeof(*de), GFP_KERNEL);
76cc33d7 6076 if (!de) {
1b48773f
PB
6077 ret = -ENOMEM;
6078fail:
6079 io_req_complete_failed(req, ret);
76cc33d7
PB
6080 return true;
6081 }
2d28390a 6082
de0617e4 6083 spin_lock_irq(&ctx->completion_lock);
9cf7c104 6084 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
de0617e4 6085 spin_unlock_irq(&ctx->completion_lock);
27dc8338 6086 kfree(de);
ae34817b 6087 io_queue_async_work(req);
76cc33d7 6088 return true;
de0617e4
JA
6089 }
6090
915967f6 6091 trace_io_uring_defer(ctx, req, req->user_data);
27dc8338 6092 de->req = req;
9cf7c104 6093 de->seq = seq;
27dc8338 6094 list_add_tail(&de->list, &ctx->defer_list);
de0617e4 6095 spin_unlock_irq(&ctx->completion_lock);
76cc33d7 6096 return true;
de0617e4
JA
6097}
6098
68fb8979 6099static void io_clean_op(struct io_kiocb *req)
99bc4c38 6100{
0e1b6fe3
PB
6101 if (req->flags & REQ_F_BUFFER_SELECTED) {
6102 switch (req->opcode) {
6103 case IORING_OP_READV:
6104 case IORING_OP_READ_FIXED:
6105 case IORING_OP_READ:
bcda7baa 6106 kfree((void *)(unsigned long)req->rw.addr);
0e1b6fe3
PB
6107 break;
6108 case IORING_OP_RECVMSG:
6109 case IORING_OP_RECV:
bcda7baa 6110 kfree(req->sr_msg.kbuf);
0e1b6fe3
PB
6111 break;
6112 }
99bc4c38
PB
6113 }
6114
0e1b6fe3
PB
6115 if (req->flags & REQ_F_NEED_CLEANUP) {
6116 switch (req->opcode) {
6117 case IORING_OP_READV:
6118 case IORING_OP_READ_FIXED:
6119 case IORING_OP_READ:
6120 case IORING_OP_WRITEV:
6121 case IORING_OP_WRITE_FIXED:
e8c2bc1f
JA
6122 case IORING_OP_WRITE: {
6123 struct io_async_rw *io = req->async_data;
1dacb4df
PB
6124
6125 kfree(io->free_iovec);
0e1b6fe3 6126 break;
e8c2bc1f 6127 }
0e1b6fe3 6128 case IORING_OP_RECVMSG:
e8c2bc1f
JA
6129 case IORING_OP_SENDMSG: {
6130 struct io_async_msghdr *io = req->async_data;
257e84a5
PB
6131
6132 kfree(io->free_iov);
0e1b6fe3 6133 break;
e8c2bc1f 6134 }
0e1b6fe3
PB
6135 case IORING_OP_SPLICE:
6136 case IORING_OP_TEE:
e1d767f0
PB
6137 if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
6138 io_put_file(req->splice.file_in);
0e1b6fe3 6139 break;
f3cd4850
JA
6140 case IORING_OP_OPENAT:
6141 case IORING_OP_OPENAT2:
6142 if (req->open.filename)
6143 putname(req->open.filename);
6144 break;
80a261fd
JA
6145 case IORING_OP_RENAMEAT:
6146 putname(req->rename.oldpath);
6147 putname(req->rename.newpath);
6148 break;
14a1143b
JA
6149 case IORING_OP_UNLINKAT:
6150 putname(req->unlink.filename);
6151 break;
0e1b6fe3 6152 }
99bc4c38 6153 }
75652a30
JA
6154 if ((req->flags & REQ_F_POLLED) && req->apoll) {
6155 kfree(req->apoll->double_poll);
6156 kfree(req->apoll);
6157 req->apoll = NULL;
6158 }
3a0a6902
PB
6159 if (req->flags & REQ_F_INFLIGHT) {
6160 struct io_uring_task *tctx = req->task->io_uring;
6161
6162 atomic_dec(&tctx->inflight_tracked);
3a0a6902 6163 }
c854357b 6164 if (req->flags & REQ_F_CREDS)
b8e64b53 6165 put_cred(req->creds);
c854357b
PB
6166
6167 req->flags &= ~IO_REQ_CLEAN_FLAGS;
99bc4c38
PB
6168}
6169
889fca73 6170static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1 6171{
a197f664 6172 struct io_ring_ctx *ctx = req->ctx;
5730b27e 6173 const struct cred *creds = NULL;
d625c6ee 6174 int ret;
2b188cc1 6175
b8e64b53 6176 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
c10d1f98 6177 creds = override_creds(req->creds);
5730b27e 6178
d625c6ee 6179 switch (req->opcode) {
2b188cc1 6180 case IORING_OP_NOP:
889fca73 6181 ret = io_nop(req, issue_flags);
2b188cc1
JA
6182 break;
6183 case IORING_OP_READV:
edafccee 6184 case IORING_OP_READ_FIXED:
3a6820f2 6185 case IORING_OP_READ:
889fca73 6186 ret = io_read(req, issue_flags);
edafccee 6187 break;
3529d8c2 6188 case IORING_OP_WRITEV:
edafccee 6189 case IORING_OP_WRITE_FIXED:
3a6820f2 6190 case IORING_OP_WRITE:
889fca73 6191 ret = io_write(req, issue_flags);
2b188cc1 6192 break;
c992fe29 6193 case IORING_OP_FSYNC:
45d189c6 6194 ret = io_fsync(req, issue_flags);
c992fe29 6195 break;
221c5eb2 6196 case IORING_OP_POLL_ADD:
61e98203 6197 ret = io_poll_add(req, issue_flags);
221c5eb2
JA
6198 break;
6199 case IORING_OP_POLL_REMOVE:
c5de0036 6200 ret = io_poll_update(req, issue_flags);
221c5eb2 6201 break;
5d17b4a4 6202 case IORING_OP_SYNC_FILE_RANGE:
45d189c6 6203 ret = io_sync_file_range(req, issue_flags);
5d17b4a4 6204 break;
0fa03c62 6205 case IORING_OP_SENDMSG:
889fca73 6206 ret = io_sendmsg(req, issue_flags);
062d04d7 6207 break;
fddaface 6208 case IORING_OP_SEND:
889fca73 6209 ret = io_send(req, issue_flags);
0fa03c62 6210 break;
aa1fa28f 6211 case IORING_OP_RECVMSG:
889fca73 6212 ret = io_recvmsg(req, issue_flags);
062d04d7 6213 break;
fddaface 6214 case IORING_OP_RECV:
889fca73 6215 ret = io_recv(req, issue_flags);
aa1fa28f 6216 break;
5262f567 6217 case IORING_OP_TIMEOUT:
61e98203 6218 ret = io_timeout(req, issue_flags);
5262f567 6219 break;
11365043 6220 case IORING_OP_TIMEOUT_REMOVE:
61e98203 6221 ret = io_timeout_remove(req, issue_flags);
11365043 6222 break;
17f2fe35 6223 case IORING_OP_ACCEPT:
889fca73 6224 ret = io_accept(req, issue_flags);
17f2fe35 6225 break;
f8e85cf2 6226 case IORING_OP_CONNECT:
889fca73 6227 ret = io_connect(req, issue_flags);
f8e85cf2 6228 break;
62755e35 6229 case IORING_OP_ASYNC_CANCEL:
61e98203 6230 ret = io_async_cancel(req, issue_flags);
62755e35 6231 break;
d63d1b5e 6232 case IORING_OP_FALLOCATE:
45d189c6 6233 ret = io_fallocate(req, issue_flags);
d63d1b5e 6234 break;
15b71abe 6235 case IORING_OP_OPENAT:
45d189c6 6236 ret = io_openat(req, issue_flags);
15b71abe 6237 break;
b5dba59e 6238 case IORING_OP_CLOSE:
889fca73 6239 ret = io_close(req, issue_flags);
b5dba59e 6240 break;
05f3fb3c 6241 case IORING_OP_FILES_UPDATE:
889fca73 6242 ret = io_files_update(req, issue_flags);
05f3fb3c 6243 break;
eddc7ef5 6244 case IORING_OP_STATX:
45d189c6 6245 ret = io_statx(req, issue_flags);
eddc7ef5 6246 break;
4840e418 6247 case IORING_OP_FADVISE:
45d189c6 6248 ret = io_fadvise(req, issue_flags);
4840e418 6249 break;
c1ca757b 6250 case IORING_OP_MADVISE:
45d189c6 6251 ret = io_madvise(req, issue_flags);
c1ca757b 6252 break;
cebdb986 6253 case IORING_OP_OPENAT2:
45d189c6 6254 ret = io_openat2(req, issue_flags);
cebdb986 6255 break;
3e4827b0 6256 case IORING_OP_EPOLL_CTL:
889fca73 6257 ret = io_epoll_ctl(req, issue_flags);
3e4827b0 6258 break;
7d67af2c 6259 case IORING_OP_SPLICE:
45d189c6 6260 ret = io_splice(req, issue_flags);
7d67af2c 6261 break;
ddf0322d 6262 case IORING_OP_PROVIDE_BUFFERS:
889fca73 6263 ret = io_provide_buffers(req, issue_flags);
ddf0322d 6264 break;
067524e9 6265 case IORING_OP_REMOVE_BUFFERS:
889fca73 6266 ret = io_remove_buffers(req, issue_flags);
3e4827b0 6267 break;
f2a8d5c7 6268 case IORING_OP_TEE:
45d189c6 6269 ret = io_tee(req, issue_flags);
f2a8d5c7 6270 break;
36f4fa68 6271 case IORING_OP_SHUTDOWN:
45d189c6 6272 ret = io_shutdown(req, issue_flags);
36f4fa68 6273 break;
80a261fd 6274 case IORING_OP_RENAMEAT:
45d189c6 6275 ret = io_renameat(req, issue_flags);
80a261fd 6276 break;
14a1143b 6277 case IORING_OP_UNLINKAT:
45d189c6 6278 ret = io_unlinkat(req, issue_flags);
14a1143b 6279 break;
2b188cc1
JA
6280 default:
6281 ret = -EINVAL;
6282 break;
6283 }
6284
5730b27e
JA
6285 if (creds)
6286 revert_creds(creds);
def596e9
JA
6287 if (ret)
6288 return ret;
b532576e 6289 /* If the op doesn't have a file, we're not polling for it */
cb3d8972
PB
6290 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
6291 io_iopoll_req_issued(req);
def596e9
JA
6292
6293 return 0;
2b188cc1
JA
6294}
6295
ebc11b6c
PB
6296static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
6297{
6298 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6299
6300 req = io_put_req_find_next(req);
6301 return req ? &req->work : NULL;
6302}
6303
5280f7e5 6304static void io_wq_submit_work(struct io_wq_work *work)
2b188cc1
JA
6305{
6306 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6df1db6b 6307 struct io_kiocb *timeout;
561fb04a 6308 int ret = 0;
2b188cc1 6309
6df1db6b
PB
6310 timeout = io_prep_linked_timeout(req);
6311 if (timeout)
6312 io_queue_linked_timeout(timeout);
d4c81f38 6313
4014d943 6314 if (work->flags & IO_WQ_WORK_CANCEL)
561fb04a 6315 ret = -ECANCELED;
31b51510 6316
561fb04a 6317 if (!ret) {
561fb04a 6318 do {
889fca73 6319 ret = io_issue_sqe(req, 0);
561fb04a
JA
6320 /*
6321 * We can get EAGAIN for polled IO even though we're
6322 * forcing a sync submission from here, since we can't
6323 * wait for request slots on the block side.
6324 */
6325 if (ret != -EAGAIN)
6326 break;
6327 cond_resched();
6328 } while (1);
6329 }
31b51510 6330
a3df7698 6331 /* avoid locking problems by failing it from a clean context */
561fb04a 6332 if (ret) {
a3df7698 6333 /* io-wq is going to take one down */
de9b4cca 6334 req_ref_get(req);
a3df7698 6335 io_req_task_queue_fail(req, ret);
edafccee 6336 }
2b188cc1
JA
6337}
6338
aeca241b 6339static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
042b0d85 6340 unsigned i)
65e19f54 6341{
042b0d85 6342 return &table->files[i];
dafecf19
PB
6343}
6344
65e19f54
JA
6345static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6346 int index)
6347{
aeca241b 6348 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
65e19f54 6349
a04b0ac0 6350 return (struct file *) (slot->file_ptr & FFS_MASK);
65e19f54
JA
6351}
6352
a04b0ac0 6353static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
9a321c98
PB
6354{
6355 unsigned long file_ptr = (unsigned long) file;
6356
b191e2df 6357 if (__io_file_supports_nowait(file, READ))
9a321c98 6358 file_ptr |= FFS_ASYNC_READ;
b191e2df 6359 if (__io_file_supports_nowait(file, WRITE))
9a321c98
PB
6360 file_ptr |= FFS_ASYNC_WRITE;
6361 if (S_ISREG(file_inode(file)->i_mode))
6362 file_ptr |= FFS_ISREG;
a04b0ac0 6363 file_slot->file_ptr = file_ptr;
65e19f54
JA
6364}
6365
ac177053
PB
6366static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
6367 struct io_kiocb *req, int fd)
09bb8394 6368{
8da11c19 6369 struct file *file;
ac177053 6370 unsigned long file_ptr;
09bb8394 6371
ac177053
PB
6372 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
6373 return NULL;
6374 fd = array_index_nospec(fd, ctx->nr_user_files);
6375 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
6376 file = (struct file *) (file_ptr & FFS_MASK);
6377 file_ptr &= ~FFS_MASK;
6378 /* mask in overlapping REQ_F and FFS bits */
b191e2df 6379 req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT);
ac177053
PB
6380 io_req_set_rsrc_node(req);
6381 return file;
6382}
d44f554e 6383
ac177053
PB
6384static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
6385 struct io_submit_state *state,
6386 struct io_kiocb *req, int fd)
6387{
6388 struct file *file = __io_file_get(state, fd);
6389
6390 trace_io_uring_file_get(ctx, fd);
09bb8394 6391
ac177053
PB
6392 /* we don't allow fixed io_uring files */
6393 if (file && unlikely(file->f_op == &io_uring_fops))
6394 io_req_track_inflight(req);
8371adf5 6395 return file;
09bb8394
JA
6396}
6397
ac177053
PB
6398static inline struct file *io_file_get(struct io_ring_ctx *ctx,
6399 struct io_submit_state *state,
6400 struct io_kiocb *req, int fd, bool fixed)
6401{
6402 if (fixed)
6403 return io_file_get_fixed(ctx, req, fd);
6404 else
6405 return io_file_get_normal(ctx, state, req, fd);
6406}
6407
2665abfd 6408static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 6409{
ad8a48ac
JA
6410 struct io_timeout_data *data = container_of(timer,
6411 struct io_timeout_data, timer);
90cd7e42 6412 struct io_kiocb *prev, *req = data->req;
2665abfd 6413 struct io_ring_ctx *ctx = req->ctx;
2665abfd 6414 unsigned long flags;
2665abfd
JA
6415
6416 spin_lock_irqsave(&ctx->completion_lock, flags);
90cd7e42
PB
6417 prev = req->timeout.head;
6418 req->timeout.head = NULL;
2665abfd
JA
6419
6420 /*
6421 * We don't expect the list to be empty, that will only happen if we
6422 * race with the completion of the linked work.
6423 */
447c19f3 6424 if (prev) {
f2f87370 6425 io_remove_next_linked(prev);
447c19f3
PB
6426 if (!req_ref_inc_not_zero(prev))
6427 prev = NULL;
6428 }
2665abfd
JA
6429 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6430
6431 if (prev) {
014db007 6432 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
9ae1f8dd 6433 io_put_req_deferred(prev, 1);
a298232e 6434 io_put_req_deferred(req, 1);
47f46768 6435 } else {
9ae1f8dd 6436 io_req_complete_post(req, -ETIME, 0);
2665abfd 6437 }
2665abfd
JA
6438 return HRTIMER_NORESTART;
6439}
6440
de968c18 6441static void io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 6442{
de968c18
PB
6443 struct io_ring_ctx *ctx = req->ctx;
6444
6445 spin_lock_irq(&ctx->completion_lock);
76a46e06 6446 /*
f2f87370
PB
6447 * If the back reference is NULL, then our linked request finished
6448 * before we got a chance to setup the timer
76a46e06 6449 */
90cd7e42 6450 if (req->timeout.head) {
e8c2bc1f 6451 struct io_timeout_data *data = req->async_data;
94ae5e77 6452
ad8a48ac
JA
6453 data->timer.function = io_link_timeout_fn;
6454 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6455 data->mode);
2665abfd 6456 }
76a46e06 6457 spin_unlock_irq(&ctx->completion_lock);
2665abfd 6458 /* drop submission reference */
76a46e06
JA
6459 io_put_req(req);
6460}
2665abfd 6461
ad8a48ac 6462static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
2665abfd 6463{
f2f87370 6464 struct io_kiocb *nxt = req->link;
2665abfd 6465
f2f87370
PB
6466 if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
6467 nxt->opcode != IORING_OP_LINK_TIMEOUT)
76a46e06 6468 return NULL;
2665abfd 6469
90cd7e42 6470 nxt->timeout.head = req;
900fad45 6471 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
76a46e06 6472 req->flags |= REQ_F_LINK_TIMEOUT;
76a46e06 6473 return nxt;
2665abfd
JA
6474}
6475
c5eef2b9 6476static void __io_queue_sqe(struct io_kiocb *req)
282cdc86 6477 __must_hold(&req->ctx->uring_lock)
2b188cc1 6478{
d3d7298d 6479 struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
e0c5c576 6480 int ret;
2b188cc1 6481
59b735ae 6482issue_sqe:
c5eef2b9 6483 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
193155c8 6484
491381ce
JA
6485 /*
6486 * We async punt it if the file wasn't marked NOWAIT, or if the file
6487 * doesn't support non-blocking read/write attempts
6488 */
1840038e 6489 if (likely(!ret)) {
0d63c148 6490 /* drop submission reference */
e342c807 6491 if (req->flags & REQ_F_COMPLETE_INLINE) {
c5eef2b9
PB
6492 struct io_ring_ctx *ctx = req->ctx;
6493 struct io_comp_state *cs = &ctx->submit_state.comp;
e65ef56d 6494
6dd0be1e 6495 cs->reqs[cs->nr++] = req;
d3d7298d 6496 if (cs->nr == ARRAY_SIZE(cs->reqs))
2a2758f2 6497 io_submit_flush_completions(ctx);
9affd664 6498 } else {
d3d7298d 6499 io_put_req(req);
0d63c148 6500 }
1840038e 6501 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
59b735ae
OL
6502 switch (io_arm_poll_handler(req)) {
6503 case IO_APOLL_READY:
6504 goto issue_sqe;
6505 case IO_APOLL_ABORTED:
1840038e
PB
6506 /*
6507 * Queued up for async execution, worker will release
6508 * submit reference when the iocb is actually submitted.
6509 */
6510 io_queue_async_work(req);
59b735ae 6511 break;
1840038e 6512 }
0d63c148 6513 } else {
f41db273 6514 io_req_complete_failed(req, ret);
9e645e11 6515 }
d3d7298d
PB
6516 if (linked_timeout)
6517 io_queue_linked_timeout(linked_timeout);
2b188cc1
JA
6518}
6519
441b8a78 6520static inline void io_queue_sqe(struct io_kiocb *req)
282cdc86 6521 __must_hold(&req->ctx->uring_lock)
4fe2c963 6522{
10c66904 6523 if (unlikely(req->ctx->drain_active) && io_drain_req(req))
76cc33d7 6524 return;
4fe2c963 6525
76cc33d7 6526 if (likely(!(req->flags & REQ_F_FORCE_ASYNC))) {
c5eef2b9 6527 __io_queue_sqe(req);
76cc33d7
PB
6528 } else {
6529 int ret = io_req_prep_async(req);
6530
6531 if (unlikely(ret))
6532 io_req_complete_failed(req, ret);
6533 else
6534 io_queue_async_work(req);
ce35a47a 6535 }
4fe2c963
JL
6536}
6537
b16fed66
PB
6538/*
6539 * Check SQE restrictions (opcode and flags).
6540 *
6541 * Returns 'true' if SQE is allowed, 'false' otherwise.
6542 */
6543static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6544 struct io_kiocb *req,
6545 unsigned int sqe_flags)
4fe2c963 6546{
4cfb25bf 6547 if (likely(!ctx->restricted))
b16fed66
PB
6548 return true;
6549
6550 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6551 return false;
6552
6553 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6554 ctx->restrictions.sqe_flags_required)
6555 return false;
6556
6557 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6558 ctx->restrictions.sqe_flags_required))
6559 return false;
6560
6561 return true;
4fe2c963
JL
6562}
6563
b16fed66
PB
6564static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
6565 const struct io_uring_sqe *sqe)
282cdc86 6566 __must_hold(&ctx->uring_lock)
b16fed66
PB
6567{
6568 struct io_submit_state *state;
6569 unsigned int sqe_flags;
003e8dcc 6570 int personality, ret = 0;
b16fed66 6571
864ea921 6572 /* req is partially pre-initialised, see io_preinit_req() */
b16fed66
PB
6573 req->opcode = READ_ONCE(sqe->opcode);
6574 /* same numerical values with corresponding REQ_F_*, safe to copy */
6575 req->flags = sqe_flags = READ_ONCE(sqe->flags);
6576 req->user_data = READ_ONCE(sqe->user_data);
b16fed66 6577 req->file = NULL;
b16fed66
PB
6578 req->fixed_rsrc_refs = NULL;
6579 /* one is dropped after submission, the other at completion */
abc54d63 6580 atomic_set(&req->refs, 2);
b16fed66 6581 req->task = current;
b16fed66
PB
6582
6583 /* enforce forwards compatibility on users */
dddca226 6584 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
b16fed66 6585 return -EINVAL;
b16fed66
PB
6586 if (unlikely(req->opcode >= IORING_OP_LAST))
6587 return -EINVAL;
4cfb25bf 6588 if (!io_check_restriction(ctx, req, sqe_flags))
b16fed66
PB
6589 return -EACCES;
6590
6591 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6592 !io_op_defs[req->opcode].buffer_select)
6593 return -EOPNOTSUPP;
3c19966d
PB
6594 if (unlikely(sqe_flags & IOSQE_IO_DRAIN))
6595 ctx->drain_active = true;
863e0560 6596
003e8dcc
JA
6597 personality = READ_ONCE(sqe->personality);
6598 if (personality) {
c10d1f98
PB
6599 req->creds = xa_load(&ctx->personalities, personality);
6600 if (!req->creds)
003e8dcc 6601 return -EINVAL;
c10d1f98 6602 get_cred(req->creds);
b8e64b53 6603 req->flags |= REQ_F_CREDS;
003e8dcc 6604 }
b16fed66
PB
6605 state = &ctx->submit_state;
6606
6607 /*
6608 * Plug now if we have more than 1 IO left after this, and the target
6609 * is potentially a read/write to block based storage.
6610 */
6611 if (!state->plug_started && state->ios_left > 1 &&
6612 io_op_defs[req->opcode].plug) {
6613 blk_start_plug(&state->plug);
6614 state->plug_started = true;
6615 }
6616
6617 if (io_op_defs[req->opcode].needs_file) {
ac177053
PB
6618 req->file = io_file_get(ctx, state, req, READ_ONCE(sqe->fd),
6619 (sqe_flags & IOSQE_FIXED_FILE));
b16fed66
PB
6620 if (unlikely(!req->file))
6621 ret = -EBADF;
6622 }
6623
6624 state->ios_left--;
6625 return ret;
6626}
6627
a6b8cadc 6628static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
a1ab7b35 6629 const struct io_uring_sqe *sqe)
282cdc86 6630 __must_hold(&ctx->uring_lock)
9e645e11 6631{
a1ab7b35 6632 struct io_submit_link *link = &ctx->submit_state.link;
ef4ff581 6633 int ret;
9e645e11 6634
a6b8cadc
PB
6635 ret = io_init_req(ctx, req, sqe);
6636 if (unlikely(ret)) {
6637fail_req:
de59bc10
PB
6638 if (link->head) {
6639 /* fail even hard links since we don't submit */
93d2bcd2 6640 req_set_fail(link->head);
f41db273 6641 io_req_complete_failed(link->head, -ECANCELED);
de59bc10
PB
6642 link->head = NULL;
6643 }
f41db273 6644 io_req_complete_failed(req, ret);
a6b8cadc
PB
6645 return ret;
6646 }
441b8a78 6647
be7053b7
PB
6648 ret = io_req_prep(req, sqe);
6649 if (unlikely(ret))
6650 goto fail_req;
a6b8cadc 6651
be7053b7 6652 /* don't need @sqe from now on */
236daeae
OL
6653 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
6654 req->flags, true,
6655 ctx->flags & IORING_SETUP_SQPOLL);
a6b8cadc 6656
9e645e11
JA
6657 /*
6658 * If we already have a head request, queue this one for async
6659 * submittal once the head completes. If we don't have a head but
6660 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6661 * submitted sync once the chain is complete. If none of those
6662 * conditions are true (normal request), then just queue it.
6663 */
863e0560
PB
6664 if (link->head) {
6665 struct io_kiocb *head = link->head;
4e88d6e7 6666
b7e298d2 6667 ret = io_req_prep_async(req);
cf109604 6668 if (unlikely(ret))
a6b8cadc 6669 goto fail_req;
9d76377f 6670 trace_io_uring_link(ctx, req, head);
f2f87370 6671 link->last->link = req;
863e0560 6672 link->last = req;
32fe525b
PB
6673
6674 /* last request of a link, enqueue the link */
ef4ff581 6675 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
863e0560 6676 link->head = NULL;
5e159204 6677 io_queue_sqe(head);
32fe525b 6678 }
9e645e11 6679 } else {
ef4ff581 6680 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
863e0560
PB
6681 link->head = req;
6682 link->last = req;
711be031 6683 } else {
be7053b7 6684 io_queue_sqe(req);
711be031 6685 }
9e645e11 6686 }
2e6e1fde 6687
1d4240cc 6688 return 0;
9e645e11
JA
6689}
6690
9a56a232
JA
6691/*
6692 * Batched submission is done, ensure local IO is flushed out.
6693 */
ba88ff11
PB
6694static void io_submit_state_end(struct io_submit_state *state,
6695 struct io_ring_ctx *ctx)
9a56a232 6696{
a1ab7b35 6697 if (state->link.head)
de59bc10 6698 io_queue_sqe(state->link.head);
6dd0be1e 6699 if (state->comp.nr)
2a2758f2 6700 io_submit_flush_completions(ctx);
27926b68
JA
6701 if (state->plug_started)
6702 blk_finish_plug(&state->plug);
9f13c35b 6703 io_state_file_put(state);
9a56a232
JA
6704}
6705
6706/*
6707 * Start submission side cache.
6708 */
6709static void io_submit_state_start(struct io_submit_state *state,
ba88ff11 6710 unsigned int max_ios)
9a56a232 6711{
27926b68 6712 state->plug_started = false;
9a56a232 6713 state->ios_left = max_ios;
a1ab7b35
PB
6714 /* set only head, no need to init link_last in advance */
6715 state->link.head = NULL;
9a56a232
JA
6716}
6717
2b188cc1
JA
6718static void io_commit_sqring(struct io_ring_ctx *ctx)
6719{
75b28aff 6720 struct io_rings *rings = ctx->rings;
2b188cc1 6721
caf582c6
PB
6722 /*
6723 * Ensure any loads from the SQEs are done at this point,
6724 * since once we write the new head, the application could
6725 * write new data to them.
6726 */
6727 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
6728}
6729
2b188cc1 6730/*
dd9ae8a0 6731 * Fetch an sqe, if one is available. Note this returns a pointer to memory
2b188cc1
JA
6732 * that is mapped by userspace. This means that care needs to be taken to
6733 * ensure that reads are stable, as we cannot rely on userspace always
6734 * being a good citizen. If members of the sqe are validated and then later
6735 * used, it's important that those reads are done through READ_ONCE() to
6736 * prevent a re-load down the line.
6737 */
709b302f 6738static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2b188cc1 6739{
ea5ab3b5 6740 unsigned head, mask = ctx->sq_entries - 1;
17d3aeb3 6741 unsigned sq_idx = ctx->cached_sq_head++ & mask;
2b188cc1
JA
6742
6743 /*
6744 * The cached sq head (or cq tail) serves two purposes:
6745 *
6746 * 1) allows us to batch the cost of updating the user visible
6747 * head updates.
6748 * 2) allows the kernel side to track the head on its own, even
6749 * though the application is the one updating it.
6750 */
17d3aeb3 6751 head = READ_ONCE(ctx->sq_array[sq_idx]);
709b302f
PB
6752 if (likely(head < ctx->sq_entries))
6753 return &ctx->sq_sqes[head];
2b188cc1
JA
6754
6755 /* drop invalid entries */
15641e42
PB
6756 ctx->cq_extra--;
6757 WRITE_ONCE(ctx->rings->sq_dropped,
6758 READ_ONCE(ctx->rings->sq_dropped) + 1);
709b302f
PB
6759 return NULL;
6760}
6761
0f212204 6762static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
282cdc86 6763 __must_hold(&ctx->uring_lock)
6c271ce2 6764{
09899b19 6765 struct io_uring_task *tctx;
46c4e16a 6766 int submitted = 0;
6c271ce2 6767
ee7d46d9
PB
6768 /* make sure SQ entry isn't read before tail */
6769 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
2b85edfc
PB
6770 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6771 return -EAGAIN;
6c271ce2 6772
09899b19
PB
6773 tctx = current->io_uring;
6774 tctx->cached_refs -= nr;
6775 if (unlikely(tctx->cached_refs < 0)) {
6776 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
6777
6778 percpu_counter_add(&tctx->inflight, refill);
6779 refcount_add(refill, &current->usage);
6780 tctx->cached_refs += refill;
6781 }
ba88ff11 6782 io_submit_state_start(&ctx->submit_state, nr);
b14cca0c 6783
46c4e16a 6784 while (submitted < nr) {
3529d8c2 6785 const struct io_uring_sqe *sqe;
196be95c 6786 struct io_kiocb *req;
fb5ccc98 6787
258b29a9 6788 req = io_alloc_req(ctx);
196be95c
PB
6789 if (unlikely(!req)) {
6790 if (!submitted)
6791 submitted = -EAGAIN;
fb5ccc98 6792 break;
196be95c 6793 }
4fccfcbb
PB
6794 sqe = io_get_sqe(ctx);
6795 if (unlikely(!sqe)) {
6796 kmem_cache_free(req_cachep, req);
6797 break;
6798 }
d3656344
JA
6799 /* will complete beyond this point, count as submitted */
6800 submitted++;
a1ab7b35 6801 if (io_submit_sqe(ctx, req, sqe))
196be95c 6802 break;
6c271ce2
JA
6803 }
6804
9466f437
PB
6805 if (unlikely(submitted != nr)) {
6806 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
d8a6df10 6807 int unused = nr - ref_used;
9466f437 6808
09899b19 6809 current->io_uring->cached_refs += unused;
d8a6df10 6810 percpu_ref_put_many(&ctx->refs, unused);
9466f437 6811 }
6c271ce2 6812
a1ab7b35 6813 io_submit_state_end(&ctx->submit_state, ctx);
ae9428ca
PB
6814 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6815 io_commit_sqring(ctx);
6816
6c271ce2
JA
6817 return submitted;
6818}
6819
e4b6d902
PB
6820static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
6821{
6822 return READ_ONCE(sqd->state);
6823}
6824
23b3628e
XW
6825static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6826{
6827 /* Tell userspace we may need a wakeup call */
6828 spin_lock_irq(&ctx->completion_lock);
20c0b380
NA
6829 WRITE_ONCE(ctx->rings->sq_flags,
6830 ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
23b3628e
XW
6831 spin_unlock_irq(&ctx->completion_lock);
6832}
6833
6834static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6835{
6836 spin_lock_irq(&ctx->completion_lock);
20c0b380
NA
6837 WRITE_ONCE(ctx->rings->sq_flags,
6838 ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
23b3628e
XW
6839 spin_unlock_irq(&ctx->completion_lock);
6840}
6841
08369246 6842static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
6c271ce2 6843{
c8d1ba58 6844 unsigned int to_submit;
bdcd3eab 6845 int ret = 0;
6c271ce2 6846
c8d1ba58 6847 to_submit = io_sqring_entries(ctx);
e95eee2d 6848 /* if we're handling multiple rings, cap submit size for fairness */
4ce8ad95
OL
6849 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
6850 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
e95eee2d 6851
906a3c6f 6852 if (!list_empty(&ctx->iopoll_list) || to_submit) {
c8d1ba58 6853 unsigned nr_events = 0;
948e1947
PB
6854 const struct cred *creds = NULL;
6855
6856 if (ctx->sq_creds != current_cred())
6857 creds = override_creds(ctx->sq_creds);
a4c0b3de 6858
c8d1ba58 6859 mutex_lock(&ctx->uring_lock);
906a3c6f 6860 if (!list_empty(&ctx->iopoll_list))
3c30ef0f 6861 io_do_iopoll(ctx, &nr_events, 0, true);
906a3c6f 6862
3b763ba1
PB
6863 /*
6864 * Don't submit if refs are dying, good for io_uring_register(),
6865 * but also it is relied upon by io_ring_exit_work()
6866 */
0298ef96
PB
6867 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
6868 !(ctx->flags & IORING_SETUP_R_DISABLED))
08369246 6869 ret = io_submit_sqes(ctx, to_submit);
c8d1ba58 6870 mutex_unlock(&ctx->uring_lock);
6c271ce2 6871
acfb381d
PB
6872 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
6873 wake_up(&ctx->sqo_sq_wait);
948e1947
PB
6874 if (creds)
6875 revert_creds(creds);
acfb381d 6876 }
6c271ce2 6877
08369246
XW
6878 return ret;
6879}
6c271ce2 6880
08369246
XW
6881static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
6882{
6883 struct io_ring_ctx *ctx;
6884 unsigned sq_thread_idle = 0;
6c271ce2 6885
c9dca27d
PB
6886 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6887 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
08369246 6888 sqd->sq_thread_idle = sq_thread_idle;
c8d1ba58 6889}
6c271ce2 6890
e4b6d902
PB
6891static bool io_sqd_handle_event(struct io_sq_data *sqd)
6892{
6893 bool did_sig = false;
6894 struct ksignal ksig;
6895
6896 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
6897 signal_pending(current)) {
6898 mutex_unlock(&sqd->lock);
6899 if (signal_pending(current))
6900 did_sig = get_signal(&ksig);
6901 cond_resched();
6902 mutex_lock(&sqd->lock);
6903 }
e4b6d902
PB
6904 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
6905}
6906
c8d1ba58
JA
6907static int io_sq_thread(void *data)
6908{
69fb2131
JA
6909 struct io_sq_data *sqd = data;
6910 struct io_ring_ctx *ctx;
a0d9205f 6911 unsigned long timeout = 0;
37d1e2e3 6912 char buf[TASK_COMM_LEN];
08369246 6913 DEFINE_WAIT(wait);
6c271ce2 6914
696ee88a 6915 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
37d1e2e3 6916 set_task_comm(current, buf);
37d1e2e3
JA
6917
6918 if (sqd->sq_cpu != -1)
6919 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
6920 else
6921 set_cpus_allowed_ptr(current, cpu_online_mask);
6922 current->flags |= PF_NO_SETAFFINITY;
6923
09a6f4ef 6924 mutex_lock(&sqd->lock);
e4b6d902 6925 while (1) {
1a924a80 6926 bool cap_entries, sqt_spin = false;
c1edbf5f 6927
e4b6d902
PB
6928 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
6929 if (io_sqd_handle_event(sqd))
c7d95613 6930 break;
08369246
XW
6931 timeout = jiffies + sqd->sq_thread_idle;
6932 }
e4b6d902 6933
e95eee2d 6934 cap_entries = !list_is_singular(&sqd->ctx_list);
69fb2131 6935 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
948e1947 6936 int ret = __io_sq_thread(ctx, cap_entries);
7c30f36a 6937
08369246
XW
6938 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
6939 sqt_spin = true;
69fb2131 6940 }
dd432ea5
PB
6941 if (io_run_task_work())
6942 sqt_spin = true;
6c271ce2 6943
08369246 6944 if (sqt_spin || !time_after(jiffies, timeout)) {
c8d1ba58 6945 cond_resched();
08369246
XW
6946 if (sqt_spin)
6947 timeout = jiffies + sqd->sq_thread_idle;
6948 continue;
6949 }
6950
08369246 6951 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
dd432ea5 6952 if (!io_sqd_events_pending(sqd) && !current->task_works) {
1a924a80
PB
6953 bool needs_sched = true;
6954
724cb4f9 6955 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
aaa9f0f4
PB
6956 io_ring_set_wakeup_flag(ctx);
6957
724cb4f9
HX
6958 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6959 !list_empty_careful(&ctx->iopoll_list)) {
6960 needs_sched = false;
6961 break;
6962 }
6963 if (io_sqring_entries(ctx)) {
6964 needs_sched = false;
6965 break;
6966 }
6967 }
6968
6969 if (needs_sched) {
6970 mutex_unlock(&sqd->lock);
6971 schedule();
6972 mutex_lock(&sqd->lock);
6973 }
69fb2131
JA
6974 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6975 io_ring_clear_wakeup_flag(ctx);
6c271ce2 6976 }
08369246
XW
6977
6978 finish_wait(&sqd->wait, &wait);
6979 timeout = jiffies + sqd->sq_thread_idle;
6c271ce2 6980 }
28cea78a 6981
78cc687b 6982 io_uring_cancel_generic(true, sqd);
37d1e2e3 6983 sqd->thread = NULL;
05962f95 6984 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
5f3f26f9 6985 io_ring_set_wakeup_flag(ctx);
521d6a73 6986 io_run_task_work();
734551df
PB
6987 mutex_unlock(&sqd->lock);
6988
37d1e2e3
JA
6989 complete(&sqd->exited);
6990 do_exit(0);
6c271ce2
JA
6991}
6992
bda52162
JA
6993struct io_wait_queue {
6994 struct wait_queue_entry wq;
6995 struct io_ring_ctx *ctx;
5fd46178 6996 unsigned cq_tail;
bda52162
JA
6997 unsigned nr_timeouts;
6998};
6999
6c503150 7000static inline bool io_should_wake(struct io_wait_queue *iowq)
bda52162
JA
7001{
7002 struct io_ring_ctx *ctx = iowq->ctx;
5fd46178 7003 int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
bda52162
JA
7004
7005 /*
d195a66e 7006 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
7007 * started waiting. For timeouts, we always want to return to userspace,
7008 * regardless of event count.
7009 */
5fd46178 7010 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
bda52162
JA
7011}
7012
7013static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
7014 int wake_flags, void *key)
7015{
7016 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
7017 wq);
7018
6c503150
PB
7019 /*
7020 * Cannot safely flush overflowed CQEs from here, ensure we wake up
7021 * the task, and the next invocation will do it.
7022 */
5ed7a37d 7023 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
6c503150
PB
7024 return autoremove_wake_function(curr, mode, wake_flags, key);
7025 return -1;
bda52162
JA
7026}
7027
af9c1a44
JA
7028static int io_run_task_work_sig(void)
7029{
7030 if (io_run_task_work())
7031 return 1;
7032 if (!signal_pending(current))
7033 return 0;
0b8cfa97 7034 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
792ee0f6 7035 return -ERESTARTSYS;
af9c1a44
JA
7036 return -EINTR;
7037}
7038
eeb60b9a
PB
7039/* when returns >0, the caller should retry */
7040static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
7041 struct io_wait_queue *iowq,
7042 signed long *timeout)
7043{
7044 int ret;
7045
7046 /* make sure we run task_work before checking for signals */
7047 ret = io_run_task_work_sig();
7048 if (ret || io_should_wake(iowq))
7049 return ret;
7050 /* let the caller flush overflows, retry */
5ed7a37d 7051 if (test_bit(0, &ctx->check_cq_overflow))
eeb60b9a
PB
7052 return 1;
7053
7054 *timeout = schedule_timeout(*timeout);
7055 return !*timeout ? -ETIME : 1;
7056}
7057
2b188cc1
JA
7058/*
7059 * Wait until events become available, if we don't already have some. The
7060 * application must reap them itself, as they reside on the shared cq ring.
7061 */
7062static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
c73ebb68
HX
7063 const sigset_t __user *sig, size_t sigsz,
7064 struct __kernel_timespec __user *uts)
2b188cc1 7065{
90291099 7066 struct io_wait_queue iowq;
75b28aff 7067 struct io_rings *rings = ctx->rings;
c1d5a224
PB
7068 signed long timeout = MAX_SCHEDULE_TIMEOUT;
7069 int ret;
2b188cc1 7070
b41e9852 7071 do {
6c2450ae 7072 io_cqring_overflow_flush(ctx, false);
6c503150 7073 if (io_cqring_events(ctx) >= min_events)
b41e9852 7074 return 0;
4c6e277c 7075 if (!io_run_task_work())
b41e9852 7076 break;
b41e9852 7077 } while (1);
2b188cc1
JA
7078
7079 if (sig) {
9e75ad5d
AB
7080#ifdef CONFIG_COMPAT
7081 if (in_compat_syscall())
7082 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 7083 sigsz);
9e75ad5d
AB
7084 else
7085#endif
b772434b 7086 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 7087
2b188cc1
JA
7088 if (ret)
7089 return ret;
7090 }
7091
c73ebb68 7092 if (uts) {
c1d5a224
PB
7093 struct timespec64 ts;
7094
c73ebb68
HX
7095 if (get_timespec64(&ts, uts))
7096 return -EFAULT;
7097 timeout = timespec64_to_jiffies(&ts);
7098 }
7099
90291099
PB
7100 init_waitqueue_func_entry(&iowq.wq, io_wake_function);
7101 iowq.wq.private = current;
7102 INIT_LIST_HEAD(&iowq.wq.entry);
7103 iowq.ctx = ctx;
bda52162 7104 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
5fd46178 7105 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
90291099 7106
c826bd7a 7107 trace_io_uring_cqring_wait(ctx, min_events);
bda52162 7108 do {
ca0a2651 7109 /* if we can't even flush overflow, don't wait for more */
6c2450ae 7110 if (!io_cqring_overflow_flush(ctx, false)) {
ca0a2651
JA
7111 ret = -EBUSY;
7112 break;
7113 }
311997b3 7114 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
bda52162 7115 TASK_INTERRUPTIBLE);
eeb60b9a 7116 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
311997b3 7117 finish_wait(&ctx->cq_wait, &iowq.wq);
ca0a2651 7118 cond_resched();
eeb60b9a 7119 } while (ret > 0);
bda52162 7120
b7db41c9 7121 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 7122
75b28aff 7123 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
7124}
7125
9123c8ff 7126static void io_free_page_table(void **table, size_t size)
05f3fb3c 7127{
9123c8ff 7128 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
05f3fb3c 7129
846a4ef2 7130 for (i = 0; i < nr_tables; i++)
9123c8ff
PB
7131 kfree(table[i]);
7132 kfree(table);
7133}
7134
7135static void **io_alloc_page_table(size_t size)
7136{
7137 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
7138 size_t init_size = size;
7139 void **table;
7140
7141 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL);
7142 if (!table)
7143 return NULL;
7144
7145 for (i = 0; i < nr_tables; i++) {
27f6b318 7146 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
9123c8ff
PB
7147
7148 table[i] = kzalloc(this_size, GFP_KERNEL);
7149 if (!table[i]) {
7150 io_free_page_table(table, init_size);
7151 return NULL;
7152 }
7153 size -= this_size;
7154 }
7155 return table;
05f3fb3c
JA
7156}
7157
28a9fe25 7158static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
1642b445 7159{
28a9fe25
PB
7160 percpu_ref_exit(&ref_node->refs);
7161 kfree(ref_node);
1642b445
PB
7162}
7163
a7f0ed5a
PB
7164static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
7165 struct io_rsrc_data *data_to_kill)
6b06314c 7166{
a7f0ed5a
PB
7167 WARN_ON_ONCE(!ctx->rsrc_backup_node);
7168 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
6b06314c 7169
a7f0ed5a
PB
7170 if (data_to_kill) {
7171 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
82fbcfa9 7172
a7f0ed5a 7173 rsrc_node->rsrc_data = data_to_kill;
4956b9ea 7174 spin_lock_irq(&ctx->rsrc_ref_lock);
a7f0ed5a 7175 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
4956b9ea 7176 spin_unlock_irq(&ctx->rsrc_ref_lock);
82fbcfa9 7177
3e942498 7178 atomic_inc(&data_to_kill->refs);
a7f0ed5a
PB
7179 percpu_ref_kill(&rsrc_node->refs);
7180 ctx->rsrc_node = NULL;
7181 }
6b06314c 7182
a7f0ed5a
PB
7183 if (!ctx->rsrc_node) {
7184 ctx->rsrc_node = ctx->rsrc_backup_node;
7185 ctx->rsrc_backup_node = NULL;
7186 }
8bad28d8
HX
7187}
7188
a7f0ed5a 7189static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
8dd03afe
PB
7190{
7191 if (ctx->rsrc_backup_node)
7192 return 0;
b895c9a6 7193 ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
8dd03afe 7194 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
8bad28d8
HX
7195}
7196
40ae0ff7 7197static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx)
8bad28d8
HX
7198{
7199 int ret;
05589553 7200
215c3902 7201 /* As we may drop ->uring_lock, other task may have started quiesce */
8bad28d8
HX
7202 if (data->quiesce)
7203 return -ENXIO;
05589553 7204
8bad28d8 7205 data->quiesce = true;
1ffc5422 7206 do {
a7f0ed5a 7207 ret = io_rsrc_node_switch_start(ctx);
8dd03afe 7208 if (ret)
f2303b1f 7209 break;
a7f0ed5a 7210 io_rsrc_node_switch(ctx, data);
f2303b1f 7211
3e942498
PB
7212 /* kill initial ref, already quiesced if zero */
7213 if (atomic_dec_and_test(&data->refs))
7214 break;
c018db4a 7215 mutex_unlock(&ctx->uring_lock);
8bad28d8 7216 flush_delayed_work(&ctx->rsrc_put_work);
1ffc5422 7217 ret = wait_for_completion_interruptible(&data->done);
c018db4a
JA
7218 if (!ret) {
7219 mutex_lock(&ctx->uring_lock);
1ffc5422 7220 break;
c018db4a 7221 }
8bad28d8 7222
3e942498
PB
7223 atomic_inc(&data->refs);
7224 /* wait for all works potentially completing data->done */
7225 flush_delayed_work(&ctx->rsrc_put_work);
cb5e1b81 7226 reinit_completion(&data->done);
8dd03afe 7227
1ffc5422 7228 ret = io_run_task_work_sig();
8bad28d8 7229 mutex_lock(&ctx->uring_lock);
f2303b1f 7230 } while (ret >= 0);
8bad28d8 7231 data->quiesce = false;
05f3fb3c 7232
8bad28d8 7233 return ret;
d7954b2b
BM
7234}
7235
2d091d62
PB
7236static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
7237{
7238 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
7239 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
7240
7241 return &data->tags[table_idx][off];
7242}
7243
44b31f2f 7244static void io_rsrc_data_free(struct io_rsrc_data *data)
1ad555c6 7245{
2d091d62
PB
7246 size_t size = data->nr * sizeof(data->tags[0][0]);
7247
7248 if (data->tags)
7249 io_free_page_table((void **)data->tags, size);
44b31f2f
PB
7250 kfree(data);
7251}
7252
d878c816
PB
7253static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
7254 u64 __user *utags, unsigned nr,
7255 struct io_rsrc_data **pdata)
1ad555c6 7256{
b895c9a6 7257 struct io_rsrc_data *data;
2d091d62 7258 int ret = -ENOMEM;
d878c816 7259 unsigned i;
1ad555c6
BM
7260
7261 data = kzalloc(sizeof(*data), GFP_KERNEL);
7262 if (!data)
d878c816 7263 return -ENOMEM;
2d091d62 7264 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
b60c8dce 7265 if (!data->tags) {
1ad555c6 7266 kfree(data);
d878c816
PB
7267 return -ENOMEM;
7268 }
2d091d62
PB
7269
7270 data->nr = nr;
7271 data->ctx = ctx;
7272 data->do_put = do_put;
d878c816 7273 if (utags) {
2d091d62 7274 ret = -EFAULT;
d878c816 7275 for (i = 0; i < nr; i++) {
fdd1dc31
CIK
7276 u64 *tag_slot = io_get_tag_slot(data, i);
7277
7278 if (copy_from_user(tag_slot, &utags[i],
7279 sizeof(*tag_slot)))
2d091d62 7280 goto fail;
d878c816 7281 }
1ad555c6 7282 }
b60c8dce 7283
3e942498 7284 atomic_set(&data->refs, 1);
1ad555c6 7285 init_completion(&data->done);
d878c816
PB
7286 *pdata = data;
7287 return 0;
2d091d62
PB
7288fail:
7289 io_rsrc_data_free(data);
7290 return ret;
1ad555c6
BM
7291}
7292
9123c8ff
PB
7293static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
7294{
042b0d85 7295 table->files = kvcalloc(nr_files, sizeof(table->files[0]), GFP_KERNEL);
9123c8ff
PB
7296 return !!table->files;
7297}
7298
042b0d85 7299static void io_free_file_tables(struct io_file_table *table)
9123c8ff 7300{
042b0d85 7301 kvfree(table->files);
9123c8ff
PB
7302 table->files = NULL;
7303}
7304
fff4db76 7305static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
1ad555c6 7306{
fff4db76
PB
7307#if defined(CONFIG_UNIX)
7308 if (ctx->ring_sock) {
7309 struct sock *sock = ctx->ring_sock->sk;
7310 struct sk_buff *skb;
7311
7312 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7313 kfree_skb(skb);
7314 }
7315#else
7316 int i;
7317
7318 for (i = 0; i < ctx->nr_user_files; i++) {
7319 struct file *file;
7320
7321 file = io_file_from_index(ctx, i);
7322 if (file)
7323 fput(file);
7324 }
7325#endif
042b0d85 7326 io_free_file_tables(&ctx->file_table);
44b31f2f 7327 io_rsrc_data_free(ctx->file_data);
fff4db76
PB
7328 ctx->file_data = NULL;
7329 ctx->nr_user_files = 0;
1ad555c6
BM
7330}
7331
d7954b2b
BM
7332static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7333{
d7954b2b
BM
7334 int ret;
7335
08480400 7336 if (!ctx->file_data)
d7954b2b 7337 return -ENXIO;
08480400
PB
7338 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
7339 if (!ret)
7340 __io_sqe_files_unregister(ctx);
7341 return ret;
6b06314c
JA
7342}
7343
37d1e2e3 7344static void io_sq_thread_unpark(struct io_sq_data *sqd)
09a6f4ef 7345 __releases(&sqd->lock)
37d1e2e3 7346{
521d6a73
PB
7347 WARN_ON_ONCE(sqd->thread == current);
7348
9e138a48
PB
7349 /*
7350 * Do the dance but not conditional clear_bit() because it'd race with
7351 * other threads incrementing park_pending and setting the bit.
7352 */
37d1e2e3 7353 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
9e138a48
PB
7354 if (atomic_dec_return(&sqd->park_pending))
7355 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
09a6f4ef 7356 mutex_unlock(&sqd->lock);
37d1e2e3
JA
7357}
7358
86e0d676 7359static void io_sq_thread_park(struct io_sq_data *sqd)
09a6f4ef 7360 __acquires(&sqd->lock)
37d1e2e3 7361{
521d6a73
PB
7362 WARN_ON_ONCE(sqd->thread == current);
7363
9e138a48 7364 atomic_inc(&sqd->park_pending);
86e0d676 7365 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
09a6f4ef 7366 mutex_lock(&sqd->lock);
05962f95 7367 if (sqd->thread)
86e0d676 7368 wake_up_process(sqd->thread);
37d1e2e3
JA
7369}
7370
7371static void io_sq_thread_stop(struct io_sq_data *sqd)
7372{
521d6a73 7373 WARN_ON_ONCE(sqd->thread == current);
88885f66 7374 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
521d6a73 7375
05962f95 7376 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
88885f66 7377 mutex_lock(&sqd->lock);
e8f98f24
JA
7378 if (sqd->thread)
7379 wake_up_process(sqd->thread);
09a6f4ef 7380 mutex_unlock(&sqd->lock);
05962f95 7381 wait_for_completion(&sqd->exited);
37d1e2e3
JA
7382}
7383
534ca6d6 7384static void io_put_sq_data(struct io_sq_data *sqd)
6c271ce2 7385{
534ca6d6 7386 if (refcount_dec_and_test(&sqd->refs)) {
9e138a48
PB
7387 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
7388
37d1e2e3
JA
7389 io_sq_thread_stop(sqd);
7390 kfree(sqd);
7391 }
7392}
7393
7394static void io_sq_thread_finish(struct io_ring_ctx *ctx)
7395{
7396 struct io_sq_data *sqd = ctx->sq_data;
7397
7398 if (sqd) {
05962f95 7399 io_sq_thread_park(sqd);
521d6a73 7400 list_del_init(&ctx->sqd_list);
37d1e2e3 7401 io_sqd_update_thread_idle(sqd);
05962f95 7402 io_sq_thread_unpark(sqd);
37d1e2e3
JA
7403
7404 io_put_sq_data(sqd);
7405 ctx->sq_data = NULL;
534ca6d6
JA
7406 }
7407}
7408
aa06165d
JA
7409static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7410{
7411 struct io_ring_ctx *ctx_attach;
7412 struct io_sq_data *sqd;
7413 struct fd f;
7414
7415 f = fdget(p->wq_fd);
7416 if (!f.file)
7417 return ERR_PTR(-ENXIO);
7418 if (f.file->f_op != &io_uring_fops) {
7419 fdput(f);
7420 return ERR_PTR(-EINVAL);
7421 }
7422
7423 ctx_attach = f.file->private_data;
7424 sqd = ctx_attach->sq_data;
7425 if (!sqd) {
7426 fdput(f);
7427 return ERR_PTR(-EINVAL);
7428 }
5c2469e0
JA
7429 if (sqd->task_tgid != current->tgid) {
7430 fdput(f);
7431 return ERR_PTR(-EPERM);
7432 }
aa06165d
JA
7433
7434 refcount_inc(&sqd->refs);
7435 fdput(f);
7436 return sqd;
7437}
7438
26984fbf
PB
7439static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
7440 bool *attached)
534ca6d6
JA
7441{
7442 struct io_sq_data *sqd;
7443
26984fbf 7444 *attached = false;
5c2469e0
JA
7445 if (p->flags & IORING_SETUP_ATTACH_WQ) {
7446 sqd = io_attach_sq_data(p);
26984fbf
PB
7447 if (!IS_ERR(sqd)) {
7448 *attached = true;
5c2469e0 7449 return sqd;
26984fbf 7450 }
5c2469e0
JA
7451 /* fall through for EPERM case, setup new sqd/task */
7452 if (PTR_ERR(sqd) != -EPERM)
7453 return sqd;
7454 }
aa06165d 7455
534ca6d6
JA
7456 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7457 if (!sqd)
7458 return ERR_PTR(-ENOMEM);
7459
9e138a48 7460 atomic_set(&sqd->park_pending, 0);
534ca6d6 7461 refcount_set(&sqd->refs, 1);
69fb2131 7462 INIT_LIST_HEAD(&sqd->ctx_list);
09a6f4ef 7463 mutex_init(&sqd->lock);
534ca6d6 7464 init_waitqueue_head(&sqd->wait);
37d1e2e3 7465 init_completion(&sqd->exited);
534ca6d6
JA
7466 return sqd;
7467}
7468
6b06314c 7469#if defined(CONFIG_UNIX)
6b06314c
JA
7470/*
7471 * Ensure the UNIX gc is aware of our file set, so we are certain that
7472 * the io_uring can be safely unregistered on process exit, even if we have
7473 * loops in the file referencing.
7474 */
7475static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7476{
7477 struct sock *sk = ctx->ring_sock->sk;
7478 struct scm_fp_list *fpl;
7479 struct sk_buff *skb;
08a45173 7480 int i, nr_files;
6b06314c 7481
6b06314c
JA
7482 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7483 if (!fpl)
7484 return -ENOMEM;
7485
7486 skb = alloc_skb(0, GFP_KERNEL);
7487 if (!skb) {
7488 kfree(fpl);
7489 return -ENOMEM;
7490 }
7491
7492 skb->sk = sk;
6b06314c 7493
08a45173 7494 nr_files = 0;
62e398be 7495 fpl->user = get_uid(current_user());
6b06314c 7496 for (i = 0; i < nr; i++) {
65e19f54
JA
7497 struct file *file = io_file_from_index(ctx, i + offset);
7498
7499 if (!file)
08a45173 7500 continue;
65e19f54 7501 fpl->fp[nr_files] = get_file(file);
08a45173
JA
7502 unix_inflight(fpl->user, fpl->fp[nr_files]);
7503 nr_files++;
6b06314c
JA
7504 }
7505
08a45173
JA
7506 if (nr_files) {
7507 fpl->max = SCM_MAX_FD;
7508 fpl->count = nr_files;
7509 UNIXCB(skb).fp = fpl;
05f3fb3c 7510 skb->destructor = unix_destruct_scm;
08a45173
JA
7511 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7512 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 7513
08a45173
JA
7514 for (i = 0; i < nr_files; i++)
7515 fput(fpl->fp[i]);
7516 } else {
7517 kfree_skb(skb);
7518 kfree(fpl);
7519 }
6b06314c
JA
7520
7521 return 0;
7522}
7523
7524/*
7525 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7526 * causes regular reference counting to break down. We rely on the UNIX
7527 * garbage collection to take care of this problem for us.
7528 */
7529static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7530{
7531 unsigned left, total;
7532 int ret = 0;
7533
7534 total = 0;
7535 left = ctx->nr_user_files;
7536 while (left) {
7537 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
7538
7539 ret = __io_sqe_files_scm(ctx, this_files, total);
7540 if (ret)
7541 break;
7542 left -= this_files;
7543 total += this_files;
7544 }
7545
7546 if (!ret)
7547 return 0;
7548
7549 while (total < ctx->nr_user_files) {
65e19f54
JA
7550 struct file *file = io_file_from_index(ctx, total);
7551
7552 if (file)
7553 fput(file);
6b06314c
JA
7554 total++;
7555 }
7556
7557 return ret;
7558}
7559#else
7560static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7561{
7562 return 0;
7563}
7564#endif
7565
47e90392 7566static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
05f3fb3c 7567{
50238531 7568 struct file *file = prsrc->file;
05f3fb3c
JA
7569#if defined(CONFIG_UNIX)
7570 struct sock *sock = ctx->ring_sock->sk;
7571 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7572 struct sk_buff *skb;
7573 int i;
7574
7575 __skb_queue_head_init(&list);
7576
7577 /*
7578 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7579 * remove this entry and rearrange the file array.
7580 */
7581 skb = skb_dequeue(head);
7582 while (skb) {
7583 struct scm_fp_list *fp;
7584
7585 fp = UNIXCB(skb).fp;
7586 for (i = 0; i < fp->count; i++) {
7587 int left;
7588
7589 if (fp->fp[i] != file)
7590 continue;
7591
7592 unix_notinflight(fp->user, fp->fp[i]);
7593 left = fp->count - 1 - i;
7594 if (left) {
7595 memmove(&fp->fp[i], &fp->fp[i + 1],
7596 left * sizeof(struct file *));
7597 }
7598 fp->count--;
7599 if (!fp->count) {
7600 kfree_skb(skb);
7601 skb = NULL;
7602 } else {
7603 __skb_queue_tail(&list, skb);
7604 }
7605 fput(file);
7606 file = NULL;
7607 break;
7608 }
7609
7610 if (!file)
7611 break;
7612
7613 __skb_queue_tail(&list, skb);
7614
7615 skb = skb_dequeue(head);
7616 }
7617
7618 if (skb_peek(&list)) {
7619 spin_lock_irq(&head->lock);
7620 while ((skb = __skb_dequeue(&list)) != NULL)
7621 __skb_queue_tail(head, skb);
7622 spin_unlock_irq(&head->lock);
7623 }
7624#else
7625 fput(file);
7626#endif
7627}
7628
b895c9a6 7629static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
65e19f54 7630{
b895c9a6 7631 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
269bbe5f
BM
7632 struct io_ring_ctx *ctx = rsrc_data->ctx;
7633 struct io_rsrc_put *prsrc, *tmp;
05589553 7634
269bbe5f
BM
7635 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
7636 list_del(&prsrc->list);
b60c8dce
PB
7637
7638 if (prsrc->tag) {
7639 bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
b60c8dce
PB
7640
7641 io_ring_submit_lock(ctx, lock_ring);
157d257f 7642 spin_lock_irq(&ctx->completion_lock);
b60c8dce 7643 io_cqring_fill_event(ctx, prsrc->tag, 0, 0);
2840f710 7644 ctx->cq_extra++;
b60c8dce 7645 io_commit_cqring(ctx);
157d257f 7646 spin_unlock_irq(&ctx->completion_lock);
b60c8dce
PB
7647 io_cqring_ev_posted(ctx);
7648 io_ring_submit_unlock(ctx, lock_ring);
7649 }
7650
40ae0ff7 7651 rsrc_data->do_put(ctx, prsrc);
269bbe5f 7652 kfree(prsrc);
65e19f54 7653 }
05589553 7654
28a9fe25 7655 io_rsrc_node_destroy(ref_node);
3e942498
PB
7656 if (atomic_dec_and_test(&rsrc_data->refs))
7657 complete(&rsrc_data->done);
2faf852d 7658}
65e19f54 7659
269bbe5f 7660static void io_rsrc_put_work(struct work_struct *work)
4a38aed2
JA
7661{
7662 struct io_ring_ctx *ctx;
7663 struct llist_node *node;
7664
269bbe5f
BM
7665 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
7666 node = llist_del_all(&ctx->rsrc_put_llist);
4a38aed2
JA
7667
7668 while (node) {
b895c9a6 7669 struct io_rsrc_node *ref_node;
4a38aed2
JA
7670 struct llist_node *next = node->next;
7671
b895c9a6 7672 ref_node = llist_entry(node, struct io_rsrc_node, llist);
269bbe5f 7673 __io_rsrc_put_work(ref_node);
4a38aed2
JA
7674 node = next;
7675 }
7676}
7677
00835dce 7678static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
2faf852d 7679{
b895c9a6 7680 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
3e942498 7681 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
4956b9ea 7682 unsigned long flags;
e297822b 7683 bool first_add = false;
e297822b 7684
4956b9ea 7685 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
b895c9a6 7686 node->done = true;
e297822b 7687
d67d2263 7688 while (!list_empty(&ctx->rsrc_ref_list)) {
b895c9a6
PB
7689 node = list_first_entry(&ctx->rsrc_ref_list,
7690 struct io_rsrc_node, node);
e297822b 7691 /* recycle ref nodes in order */
b895c9a6 7692 if (!node->done)
e297822b 7693 break;
b895c9a6
PB
7694 list_del(&node->node);
7695 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
e297822b 7696 }
4956b9ea 7697 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
05589553 7698
3e942498
PB
7699 if (first_add)
7700 mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
05f3fb3c 7701}
65e19f54 7702
b895c9a6 7703static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
05f3fb3c 7704{
b895c9a6 7705 struct io_rsrc_node *ref_node;
05f3fb3c 7706
05589553
XW
7707 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7708 if (!ref_node)
3e2224c5 7709 return NULL;
05f3fb3c 7710
00835dce 7711 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
05589553
XW
7712 0, GFP_KERNEL)) {
7713 kfree(ref_node);
3e2224c5 7714 return NULL;
05589553
XW
7715 }
7716 INIT_LIST_HEAD(&ref_node->node);
269bbe5f 7717 INIT_LIST_HEAD(&ref_node->rsrc_list);
e297822b 7718 ref_node->done = false;
05589553 7719 return ref_node;
05589553
XW
7720}
7721
6b06314c 7722static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
792e3582 7723 unsigned nr_args, u64 __user *tags)
6b06314c
JA
7724{
7725 __s32 __user *fds = (__s32 __user *) arg;
05f3fb3c 7726 struct file *file;
f3baed39 7727 int fd, ret;
846a4ef2 7728 unsigned i;
6b06314c 7729
05f3fb3c 7730 if (ctx->file_data)
6b06314c
JA
7731 return -EBUSY;
7732 if (!nr_args)
7733 return -EINVAL;
7734 if (nr_args > IORING_MAX_FIXED_FILES)
7735 return -EMFILE;
a7f0ed5a 7736 ret = io_rsrc_node_switch_start(ctx);
f3baed39
PB
7737 if (ret)
7738 return ret;
d878c816
PB
7739 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
7740 &ctx->file_data);
7741 if (ret)
7742 return ret;
6b06314c 7743
f3baed39 7744 ret = -ENOMEM;
aeca241b 7745 if (!io_alloc_file_tables(&ctx->file_table, nr_args))
1ad555c6 7746 goto out_free;
65e19f54 7747
08a45173 7748 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
d878c816 7749 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
600cf3f8
PB
7750 ret = -EFAULT;
7751 goto out_fput;
7752 }
08a45173 7753 /* allow sparse sets */
792e3582
PB
7754 if (fd == -1) {
7755 ret = -EINVAL;
2d091d62 7756 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
792e3582 7757 goto out_fput;
08a45173 7758 continue;
792e3582 7759 }
6b06314c 7760
05f3fb3c 7761 file = fget(fd);
6b06314c 7762 ret = -EBADF;
792e3582 7763 if (unlikely(!file))
600cf3f8 7764 goto out_fput;
05f3fb3c 7765
6b06314c
JA
7766 /*
7767 * Don't allow io_uring instances to be registered. If UNIX
7768 * isn't enabled, then this causes a reference cycle and this
7769 * instance can never get freed. If UNIX is enabled we'll
7770 * handle it just fine, but there's still no point in allowing
7771 * a ring fd as it doesn't support regular read/write anyway.
7772 */
05f3fb3c
JA
7773 if (file->f_op == &io_uring_fops) {
7774 fput(file);
600cf3f8 7775 goto out_fput;
6b06314c 7776 }
aeca241b 7777 io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
6b06314c
JA
7778 }
7779
6b06314c 7780 ret = io_sqe_files_scm(ctx);
05589553 7781 if (ret) {
08480400 7782 __io_sqe_files_unregister(ctx);
05589553
XW
7783 return ret;
7784 }
6b06314c 7785
a7f0ed5a 7786 io_rsrc_node_switch(ctx, NULL);
6b06314c 7787 return ret;
600cf3f8
PB
7788out_fput:
7789 for (i = 0; i < ctx->nr_user_files; i++) {
7790 file = io_file_from_index(ctx, i);
7791 if (file)
7792 fput(file);
7793 }
042b0d85 7794 io_free_file_tables(&ctx->file_table);
600cf3f8 7795 ctx->nr_user_files = 0;
600cf3f8 7796out_free:
44b31f2f 7797 io_rsrc_data_free(ctx->file_data);
55cbc256 7798 ctx->file_data = NULL;
6b06314c
JA
7799 return ret;
7800}
7801
c3a31e60
JA
7802static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7803 int index)
7804{
7805#if defined(CONFIG_UNIX)
7806 struct sock *sock = ctx->ring_sock->sk;
7807 struct sk_buff_head *head = &sock->sk_receive_queue;
7808 struct sk_buff *skb;
7809
7810 /*
7811 * See if we can merge this file into an existing skb SCM_RIGHTS
7812 * file set. If there's no room, fall back to allocating a new skb
7813 * and filling it in.
7814 */
7815 spin_lock_irq(&head->lock);
7816 skb = skb_peek(head);
7817 if (skb) {
7818 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7819
7820 if (fpl->count < SCM_MAX_FD) {
7821 __skb_unlink(skb, head);
7822 spin_unlock_irq(&head->lock);
7823 fpl->fp[fpl->count] = get_file(file);
7824 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7825 fpl->count++;
7826 spin_lock_irq(&head->lock);
7827 __skb_queue_head(head, skb);
7828 } else {
7829 skb = NULL;
7830 }
7831 }
7832 spin_unlock_irq(&head->lock);
7833
7834 if (skb) {
7835 fput(file);
7836 return 0;
7837 }
7838
7839 return __io_sqe_files_scm(ctx, 1, index);
7840#else
7841 return 0;
7842#endif
7843}
7844
b60c8dce 7845static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
e7c78371 7846 struct io_rsrc_node *node, void *rsrc)
05f3fb3c 7847{
269bbe5f 7848 struct io_rsrc_put *prsrc;
05f3fb3c 7849
269bbe5f
BM
7850 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
7851 if (!prsrc)
a5318d3c 7852 return -ENOMEM;
05f3fb3c 7853
2d091d62 7854 prsrc->tag = *io_get_tag_slot(data, idx);
50238531 7855 prsrc->rsrc = rsrc;
e7c78371 7856 list_add(&prsrc->list, &node->rsrc_list);
a5318d3c 7857 return 0;
05f3fb3c
JA
7858}
7859
7860static int __io_sqe_files_update(struct io_ring_ctx *ctx,
c3bdad02 7861 struct io_uring_rsrc_update2 *up,
05f3fb3c
JA
7862 unsigned nr_args)
7863{
c3bdad02 7864 u64 __user *tags = u64_to_user_ptr(up->tags);
98f0b3b4 7865 __s32 __user *fds = u64_to_user_ptr(up->data);
b895c9a6 7866 struct io_rsrc_data *data = ctx->file_data;
a04b0ac0
PB
7867 struct io_fixed_file *file_slot;
7868 struct file *file;
98f0b3b4
PB
7869 int fd, i, err = 0;
7870 unsigned int done;
05589553 7871 bool needs_switch = false;
c3a31e60 7872
98f0b3b4
PB
7873 if (!ctx->file_data)
7874 return -ENXIO;
7875 if (up->offset + nr_args > ctx->nr_user_files)
c3a31e60
JA
7876 return -EINVAL;
7877
67973b93 7878 for (done = 0; done < nr_args; done++) {
c3bdad02
PB
7879 u64 tag = 0;
7880
7881 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
7882 copy_from_user(&fd, &fds[done], sizeof(fd))) {
c3a31e60
JA
7883 err = -EFAULT;
7884 break;
7885 }
c3bdad02
PB
7886 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
7887 err = -EINVAL;
7888 break;
7889 }
4e0377a1 7890 if (fd == IORING_REGISTER_FILES_SKIP)
7891 continue;
7892
67973b93 7893 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
aeca241b 7894 file_slot = io_fixed_file_slot(&ctx->file_table, i);
ea64ec02 7895
a04b0ac0
PB
7896 if (file_slot->file_ptr) {
7897 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
b60c8dce
PB
7898 err = io_queue_rsrc_removal(data, up->offset + done,
7899 ctx->rsrc_node, file);
a5318d3c
HD
7900 if (err)
7901 break;
a04b0ac0 7902 file_slot->file_ptr = 0;
05589553 7903 needs_switch = true;
c3a31e60
JA
7904 }
7905 if (fd != -1) {
c3a31e60
JA
7906 file = fget(fd);
7907 if (!file) {
7908 err = -EBADF;
7909 break;
7910 }
7911 /*
7912 * Don't allow io_uring instances to be registered. If
7913 * UNIX isn't enabled, then this causes a reference
7914 * cycle and this instance can never get freed. If UNIX
7915 * is enabled we'll handle it just fine, but there's
7916 * still no point in allowing a ring fd as it doesn't
7917 * support regular read/write anyway.
7918 */
7919 if (file->f_op == &io_uring_fops) {
7920 fput(file);
7921 err = -EBADF;
7922 break;
7923 }
2d091d62 7924 *io_get_tag_slot(data, up->offset + done) = tag;
9a321c98 7925 io_fixed_file_set(file_slot, file);
c3a31e60 7926 err = io_sqe_file_register(ctx, file, i);
f3bd9dae 7927 if (err) {
a04b0ac0 7928 file_slot->file_ptr = 0;
f3bd9dae 7929 fput(file);
c3a31e60 7930 break;
f3bd9dae 7931 }
c3a31e60 7932 }
05f3fb3c
JA
7933 }
7934
a7f0ed5a
PB
7935 if (needs_switch)
7936 io_rsrc_node_switch(ctx, data);
c3a31e60
JA
7937 return done ? done : err;
7938}
05589553 7939
685fe7fe
JA
7940static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
7941 struct task_struct *task)
24369c2e 7942{
e941894e 7943 struct io_wq_hash *hash;
24369c2e 7944 struct io_wq_data data;
24369c2e 7945 unsigned int concurrency;
24369c2e 7946
362a9e65 7947 mutex_lock(&ctx->uring_lock);
e941894e
JA
7948 hash = ctx->hash_map;
7949 if (!hash) {
7950 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
362a9e65
YY
7951 if (!hash) {
7952 mutex_unlock(&ctx->uring_lock);
e941894e 7953 return ERR_PTR(-ENOMEM);
362a9e65 7954 }
e941894e
JA
7955 refcount_set(&hash->refs, 1);
7956 init_waitqueue_head(&hash->wait);
7957 ctx->hash_map = hash;
24369c2e 7958 }
362a9e65 7959 mutex_unlock(&ctx->uring_lock);
24369c2e 7960
e941894e 7961 data.hash = hash;
685fe7fe 7962 data.task = task;
ebc11b6c 7963 data.free_work = io_wq_free_work;
f5fa38c5 7964 data.do_work = io_wq_submit_work;
24369c2e 7965
d25e3a3d
JA
7966 /* Do QD, or 4 * CPUS, whatever is smallest */
7967 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
24369c2e 7968
5aa75ed5 7969 return io_wq_create(concurrency, &data);
24369c2e
PB
7970}
7971
5aa75ed5
JA
7972static int io_uring_alloc_task_context(struct task_struct *task,
7973 struct io_ring_ctx *ctx)
0f212204
JA
7974{
7975 struct io_uring_task *tctx;
d8a6df10 7976 int ret;
0f212204 7977
09899b19 7978 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
0f212204
JA
7979 if (unlikely(!tctx))
7980 return -ENOMEM;
7981
d8a6df10
JA
7982 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7983 if (unlikely(ret)) {
7984 kfree(tctx);
7985 return ret;
7986 }
7987
685fe7fe 7988 tctx->io_wq = io_init_wq_offload(ctx, task);
5aa75ed5
JA
7989 if (IS_ERR(tctx->io_wq)) {
7990 ret = PTR_ERR(tctx->io_wq);
7991 percpu_counter_destroy(&tctx->inflight);
7992 kfree(tctx);
7993 return ret;
7994 }
7995
0f212204
JA
7996 xa_init(&tctx->xa);
7997 init_waitqueue_head(&tctx->wait);
fdaf083c 7998 atomic_set(&tctx->in_idle, 0);
b303fe2e 7999 atomic_set(&tctx->inflight_tracked, 0);
0f212204 8000 task->io_uring = tctx;
7cbf1722
JA
8001 spin_lock_init(&tctx->task_lock);
8002 INIT_WQ_LIST(&tctx->task_list);
7cbf1722 8003 init_task_work(&tctx->task_work, tctx_task_work);
0f212204
JA
8004 return 0;
8005}
8006
8007void __io_uring_free(struct task_struct *tsk)
8008{
8009 struct io_uring_task *tctx = tsk->io_uring;
8010
8011 WARN_ON_ONCE(!xa_empty(&tctx->xa));
ef8eaa4e 8012 WARN_ON_ONCE(tctx->io_wq);
09899b19 8013 WARN_ON_ONCE(tctx->cached_refs);
ef8eaa4e 8014
d8a6df10 8015 percpu_counter_destroy(&tctx->inflight);
0f212204
JA
8016 kfree(tctx);
8017 tsk->io_uring = NULL;
8018}
8019
7e84e1c7
SG
8020static int io_sq_offload_create(struct io_ring_ctx *ctx,
8021 struct io_uring_params *p)
2b188cc1
JA
8022{
8023 int ret;
8024
d25e3a3d
JA
8025 /* Retain compatibility with failing for an invalid attach attempt */
8026 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
8027 IORING_SETUP_ATTACH_WQ) {
8028 struct fd f;
8029
8030 f = fdget(p->wq_fd);
8031 if (!f.file)
8032 return -ENXIO;
0cc936f7
JA
8033 if (f.file->f_op != &io_uring_fops) {
8034 fdput(f);
f2a48dd0 8035 return -EINVAL;
0cc936f7
JA
8036 }
8037 fdput(f);
d25e3a3d 8038 }
6c271ce2 8039 if (ctx->flags & IORING_SETUP_SQPOLL) {
46fe18b1 8040 struct task_struct *tsk;
534ca6d6 8041 struct io_sq_data *sqd;
26984fbf 8042 bool attached;
534ca6d6 8043
26984fbf 8044 sqd = io_get_sq_data(p, &attached);
534ca6d6
JA
8045 if (IS_ERR(sqd)) {
8046 ret = PTR_ERR(sqd);
8047 goto err;
8048 }
69fb2131 8049
7c30f36a 8050 ctx->sq_creds = get_current_cred();
534ca6d6 8051 ctx->sq_data = sqd;
917257da
JA
8052 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8053 if (!ctx->sq_thread_idle)
8054 ctx->sq_thread_idle = HZ;
8055
78d7f6ba 8056 io_sq_thread_park(sqd);
de75a3d3
PB
8057 list_add(&ctx->sqd_list, &sqd->ctx_list);
8058 io_sqd_update_thread_idle(sqd);
26984fbf 8059 /* don't attach to a dying SQPOLL thread, would be racy */
f2a48dd0 8060 ret = (attached && !sqd->thread) ? -ENXIO : 0;
78d7f6ba
PB
8061 io_sq_thread_unpark(sqd);
8062
de75a3d3
PB
8063 if (ret < 0)
8064 goto err;
8065 if (attached)
5aa75ed5 8066 return 0;
aa06165d 8067
6c271ce2 8068 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 8069 int cpu = p->sq_thread_cpu;
6c271ce2 8070
917257da 8071 ret = -EINVAL;
f2a48dd0 8072 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
e8f98f24 8073 goto err_sqpoll;
37d1e2e3 8074 sqd->sq_cpu = cpu;
6c271ce2 8075 } else {
37d1e2e3 8076 sqd->sq_cpu = -1;
6c271ce2 8077 }
37d1e2e3
JA
8078
8079 sqd->task_pid = current->pid;
5c2469e0 8080 sqd->task_tgid = current->tgid;
46fe18b1
JA
8081 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
8082 if (IS_ERR(tsk)) {
8083 ret = PTR_ERR(tsk);
e8f98f24 8084 goto err_sqpoll;
6c271ce2 8085 }
97a73a0f 8086
46fe18b1 8087 sqd->thread = tsk;
97a73a0f 8088 ret = io_uring_alloc_task_context(tsk, ctx);
46fe18b1 8089 wake_up_new_task(tsk);
0f212204
JA
8090 if (ret)
8091 goto err;
6c271ce2
JA
8092 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8093 /* Can't have SQ_AFF without SQPOLL */
8094 ret = -EINVAL;
8095 goto err;
8096 }
8097
2b188cc1 8098 return 0;
f2a48dd0
PB
8099err_sqpoll:
8100 complete(&ctx->sq_data->exited);
2b188cc1 8101err:
37d1e2e3 8102 io_sq_thread_finish(ctx);
2b188cc1
JA
8103 return ret;
8104}
8105
a087e2b5
BM
8106static inline void __io_unaccount_mem(struct user_struct *user,
8107 unsigned long nr_pages)
2b188cc1
JA
8108{
8109 atomic_long_sub(nr_pages, &user->locked_vm);
8110}
8111
a087e2b5
BM
8112static inline int __io_account_mem(struct user_struct *user,
8113 unsigned long nr_pages)
2b188cc1
JA
8114{
8115 unsigned long page_limit, cur_pages, new_pages;
8116
8117 /* Don't allow more pages than we can safely lock */
8118 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8119
8120 do {
8121 cur_pages = atomic_long_read(&user->locked_vm);
8122 new_pages = cur_pages + nr_pages;
8123 if (new_pages > page_limit)
8124 return -ENOMEM;
8125 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8126 new_pages) != cur_pages);
8127
8128 return 0;
8129}
8130
26bfa89e 8131static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
a087e2b5 8132{
62e398be 8133 if (ctx->user)
a087e2b5 8134 __io_unaccount_mem(ctx->user, nr_pages);
30975825 8135
26bfa89e
JA
8136 if (ctx->mm_account)
8137 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
a087e2b5
BM
8138}
8139
26bfa89e 8140static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
a087e2b5 8141{
30975825
BM
8142 int ret;
8143
62e398be 8144 if (ctx->user) {
30975825
BM
8145 ret = __io_account_mem(ctx->user, nr_pages);
8146 if (ret)
8147 return ret;
8148 }
8149
26bfa89e
JA
8150 if (ctx->mm_account)
8151 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
a087e2b5
BM
8152
8153 return 0;
8154}
8155
2b188cc1
JA
8156static void io_mem_free(void *ptr)
8157{
52e04ef4
MR
8158 struct page *page;
8159
8160 if (!ptr)
8161 return;
2b188cc1 8162
52e04ef4 8163 page = virt_to_head_page(ptr);
2b188cc1
JA
8164 if (put_page_testzero(page))
8165 free_compound_page(page);
8166}
8167
8168static void *io_mem_alloc(size_t size)
8169{
8170 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
26bfa89e 8171 __GFP_NORETRY | __GFP_ACCOUNT;
2b188cc1
JA
8172
8173 return (void *) __get_free_pages(gfp_flags, get_order(size));
8174}
8175
75b28aff
HV
8176static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8177 size_t *sq_offset)
8178{
8179 struct io_rings *rings;
8180 size_t off, sq_array_size;
8181
8182 off = struct_size(rings, cqes, cq_entries);
8183 if (off == SIZE_MAX)
8184 return SIZE_MAX;
8185
8186#ifdef CONFIG_SMP
8187 off = ALIGN(off, SMP_CACHE_BYTES);
8188 if (off == 0)
8189 return SIZE_MAX;
8190#endif
8191
b36200f5
DV
8192 if (sq_offset)
8193 *sq_offset = off;
8194
75b28aff
HV
8195 sq_array_size = array_size(sizeof(u32), sq_entries);
8196 if (sq_array_size == SIZE_MAX)
8197 return SIZE_MAX;
8198
8199 if (check_add_overflow(off, sq_array_size, &off))
8200 return SIZE_MAX;
8201
75b28aff
HV
8202 return off;
8203}
8204
41edf1a5 8205static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
7f61a1e9 8206{
41edf1a5 8207 struct io_mapped_ubuf *imu = *slot;
7f61a1e9
PB
8208 unsigned int i;
8209
6224843d
PB
8210 if (imu != ctx->dummy_ubuf) {
8211 for (i = 0; i < imu->nr_bvecs; i++)
8212 unpin_user_page(imu->bvec[i].bv_page);
8213 if (imu->acct_pages)
8214 io_unaccount_mem(ctx, imu->acct_pages);
8215 kvfree(imu);
8216 }
41edf1a5 8217 *slot = NULL;
7f61a1e9
PB
8218}
8219
bd54b6fe 8220static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
edafccee 8221{
634d00df
PB
8222 io_buffer_unmap(ctx, &prsrc->buf);
8223 prsrc->buf = NULL;
bd54b6fe 8224}
edafccee 8225
bd54b6fe
BM
8226static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
8227{
8228 unsigned int i;
edafccee 8229
7f61a1e9
PB
8230 for (i = 0; i < ctx->nr_user_bufs; i++)
8231 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
edafccee 8232 kfree(ctx->user_bufs);
bb6659cc 8233 io_rsrc_data_free(ctx->buf_data);
edafccee 8234 ctx->user_bufs = NULL;
bd54b6fe 8235 ctx->buf_data = NULL;
edafccee 8236 ctx->nr_user_bufs = 0;
bd54b6fe
BM
8237}
8238
0a96bbe4 8239static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
edafccee 8240{
bd54b6fe 8241 int ret;
edafccee 8242
bd54b6fe 8243 if (!ctx->buf_data)
edafccee
JA
8244 return -ENXIO;
8245
bd54b6fe
BM
8246 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
8247 if (!ret)
8248 __io_sqe_buffers_unregister(ctx);
8249 return ret;
edafccee
JA
8250}
8251
8252static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8253 void __user *arg, unsigned index)
8254{
8255 struct iovec __user *src;
8256
8257#ifdef CONFIG_COMPAT
8258 if (ctx->compat) {
8259 struct compat_iovec __user *ciovs;
8260 struct compat_iovec ciov;
8261
8262 ciovs = (struct compat_iovec __user *) arg;
8263 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8264 return -EFAULT;
8265
d55e5f5b 8266 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
8267 dst->iov_len = ciov.iov_len;
8268 return 0;
8269 }
8270#endif
8271 src = (struct iovec __user *) arg;
8272 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8273 return -EFAULT;
8274 return 0;
8275}
8276
de293938
JA
8277/*
8278 * Not super efficient, but this is just a registration time. And we do cache
8279 * the last compound head, so generally we'll only do a full search if we don't
8280 * match that one.
8281 *
8282 * We check if the given compound head page has already been accounted, to
8283 * avoid double accounting it. This allows us to account the full size of the
8284 * page, not just the constituent pages of a huge page.
8285 */
8286static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8287 int nr_pages, struct page *hpage)
8288{
8289 int i, j;
8290
8291 /* check current page array */
8292 for (i = 0; i < nr_pages; i++) {
8293 if (!PageCompound(pages[i]))
8294 continue;
8295 if (compound_head(pages[i]) == hpage)
8296 return true;
8297 }
8298
8299 /* check previously registered pages */
8300 for (i = 0; i < ctx->nr_user_bufs; i++) {
41edf1a5 8301 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
de293938
JA
8302
8303 for (j = 0; j < imu->nr_bvecs; j++) {
8304 if (!PageCompound(imu->bvec[j].bv_page))
8305 continue;
8306 if (compound_head(imu->bvec[j].bv_page) == hpage)
8307 return true;
8308 }
8309 }
8310
8311 return false;
8312}
8313
8314static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8315 int nr_pages, struct io_mapped_ubuf *imu,
8316 struct page **last_hpage)
8317{
8318 int i, ret;
8319
216e5835 8320 imu->acct_pages = 0;
de293938
JA
8321 for (i = 0; i < nr_pages; i++) {
8322 if (!PageCompound(pages[i])) {
8323 imu->acct_pages++;
8324 } else {
8325 struct page *hpage;
8326
8327 hpage = compound_head(pages[i]);
8328 if (hpage == *last_hpage)
8329 continue;
8330 *last_hpage = hpage;
8331 if (headpage_already_acct(ctx, pages, i, hpage))
8332 continue;
8333 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8334 }
8335 }
8336
8337 if (!imu->acct_pages)
8338 return 0;
8339
26bfa89e 8340 ret = io_account_mem(ctx, imu->acct_pages);
de293938
JA
8341 if (ret)
8342 imu->acct_pages = 0;
8343 return ret;
8344}
8345
0a96bbe4 8346static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
41edf1a5 8347 struct io_mapped_ubuf **pimu,
0a96bbe4 8348 struct page **last_hpage)
edafccee 8349{
41edf1a5 8350 struct io_mapped_ubuf *imu = NULL;
edafccee
JA
8351 struct vm_area_struct **vmas = NULL;
8352 struct page **pages = NULL;
0a96bbe4
BM
8353 unsigned long off, start, end, ubuf;
8354 size_t size;
8355 int ret, pret, nr_pages, i;
8356
6224843d
PB
8357 if (!iov->iov_base) {
8358 *pimu = ctx->dummy_ubuf;
8359 return 0;
8360 }
8361
0a96bbe4
BM
8362 ubuf = (unsigned long) iov->iov_base;
8363 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8364 start = ubuf >> PAGE_SHIFT;
8365 nr_pages = end - start;
8366
41edf1a5 8367 *pimu = NULL;
0a96bbe4
BM
8368 ret = -ENOMEM;
8369
8370 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8371 if (!pages)
8372 goto done;
8373
8374 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8375 GFP_KERNEL);
8376 if (!vmas)
8377 goto done;
edafccee 8378
41edf1a5 8379 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
a2b4198c 8380 if (!imu)
0a96bbe4
BM
8381 goto done;
8382
8383 ret = 0;
8384 mmap_read_lock(current->mm);
8385 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8386 pages, vmas);
8387 if (pret == nr_pages) {
8388 /* don't support file backed memory */
8389 for (i = 0; i < nr_pages; i++) {
8390 struct vm_area_struct *vma = vmas[i];
8391
40dad765
PB
8392 if (vma_is_shmem(vma))
8393 continue;
0a96bbe4
BM
8394 if (vma->vm_file &&
8395 !is_file_hugepages(vma->vm_file)) {
8396 ret = -EOPNOTSUPP;
8397 break;
8398 }
8399 }
8400 } else {
8401 ret = pret < 0 ? pret : -EFAULT;
8402 }
8403 mmap_read_unlock(current->mm);
8404 if (ret) {
8405 /*
8406 * if we did partial map, or found file backed vmas,
8407 * release any pages we did get
8408 */
8409 if (pret > 0)
8410 unpin_user_pages(pages, pret);
0a96bbe4
BM
8411 goto done;
8412 }
8413
8414 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8415 if (ret) {
8416 unpin_user_pages(pages, pret);
0a96bbe4
BM
8417 goto done;
8418 }
8419
8420 off = ubuf & ~PAGE_MASK;
8421 size = iov->iov_len;
8422 for (i = 0; i < nr_pages; i++) {
8423 size_t vec_len;
8424
8425 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8426 imu->bvec[i].bv_page = pages[i];
8427 imu->bvec[i].bv_len = vec_len;
8428 imu->bvec[i].bv_offset = off;
8429 off = 0;
8430 size -= vec_len;
8431 }
8432 /* store original address for later verification */
8433 imu->ubuf = ubuf;
4751f53d 8434 imu->ubuf_end = ubuf + iov->iov_len;
0a96bbe4 8435 imu->nr_bvecs = nr_pages;
41edf1a5 8436 *pimu = imu;
0a96bbe4
BM
8437 ret = 0;
8438done:
41edf1a5
PB
8439 if (ret)
8440 kvfree(imu);
0a96bbe4
BM
8441 kvfree(pages);
8442 kvfree(vmas);
8443 return ret;
8444}
8445
2b358604 8446static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
0a96bbe4 8447{
87094465
PB
8448 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
8449 return ctx->user_bufs ? 0 : -ENOMEM;
2b358604 8450}
edafccee 8451
2b358604
BM
8452static int io_buffer_validate(struct iovec *iov)
8453{
50e96989
PB
8454 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
8455
2b358604
BM
8456 /*
8457 * Don't impose further limits on the size and buffer
8458 * constraints here, we'll -EINVAL later when IO is
8459 * submitted if they are wrong.
8460 */
6224843d
PB
8461 if (!iov->iov_base)
8462 return iov->iov_len ? -EFAULT : 0;
8463 if (!iov->iov_len)
2b358604 8464 return -EFAULT;
edafccee 8465
2b358604
BM
8466 /* arbitrary limit, but we need something */
8467 if (iov->iov_len > SZ_1G)
8468 return -EFAULT;
edafccee 8469
50e96989
PB
8470 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
8471 return -EOVERFLOW;
8472
2b358604
BM
8473 return 0;
8474}
edafccee 8475
2b358604 8476static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
634d00df 8477 unsigned int nr_args, u64 __user *tags)
2b358604 8478{
bd54b6fe
BM
8479 struct page *last_hpage = NULL;
8480 struct io_rsrc_data *data;
2b358604
BM
8481 int i, ret;
8482 struct iovec iov;
edafccee 8483
87094465
PB
8484 if (ctx->user_bufs)
8485 return -EBUSY;
489809e2 8486 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
87094465 8487 return -EINVAL;
bd54b6fe 8488 ret = io_rsrc_node_switch_start(ctx);
2b358604
BM
8489 if (ret)
8490 return ret;
d878c816
PB
8491 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
8492 if (ret)
8493 return ret;
bd54b6fe
BM
8494 ret = io_buffers_map_alloc(ctx, nr_args);
8495 if (ret) {
bb6659cc 8496 io_rsrc_data_free(data);
bd54b6fe
BM
8497 return ret;
8498 }
edafccee 8499
87094465 8500 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
edafccee
JA
8501 ret = io_copy_iov(ctx, &iov, arg, i);
8502 if (ret)
0a96bbe4 8503 break;
2b358604
BM
8504 ret = io_buffer_validate(&iov);
8505 if (ret)
0a96bbe4 8506 break;
2d091d62 8507 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
cf3770e7
CIK
8508 ret = -EINVAL;
8509 break;
8510 }
edafccee 8511
41edf1a5
PB
8512 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
8513 &last_hpage);
0a96bbe4
BM
8514 if (ret)
8515 break;
edafccee 8516 }
0a96bbe4 8517
bd54b6fe 8518 WARN_ON_ONCE(ctx->buf_data);
0a96bbe4 8519
bd54b6fe
BM
8520 ctx->buf_data = data;
8521 if (ret)
8522 __io_sqe_buffers_unregister(ctx);
8523 else
8524 io_rsrc_node_switch(ctx, NULL);
edafccee
JA
8525 return ret;
8526}
8527
634d00df
PB
8528static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
8529 struct io_uring_rsrc_update2 *up,
8530 unsigned int nr_args)
8531{
8532 u64 __user *tags = u64_to_user_ptr(up->tags);
8533 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
634d00df
PB
8534 struct page *last_hpage = NULL;
8535 bool needs_switch = false;
8536 __u32 done;
8537 int i, err;
8538
8539 if (!ctx->buf_data)
8540 return -ENXIO;
8541 if (up->offset + nr_args > ctx->nr_user_bufs)
8542 return -EINVAL;
8543
8544 for (done = 0; done < nr_args; done++) {
0b8c0e7c
PB
8545 struct io_mapped_ubuf *imu;
8546 int offset = up->offset + done;
634d00df
PB
8547 u64 tag = 0;
8548
8549 err = io_copy_iov(ctx, &iov, iovs, done);
8550 if (err)
8551 break;
8552 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
8553 err = -EFAULT;
8554 break;
8555 }
0b8c0e7c
PB
8556 err = io_buffer_validate(&iov);
8557 if (err)
8558 break;
cf3770e7
CIK
8559 if (!iov.iov_base && tag) {
8560 err = -EINVAL;
8561 break;
8562 }
0b8c0e7c
PB
8563 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
8564 if (err)
8565 break;
634d00df 8566
0b8c0e7c 8567 i = array_index_nospec(offset, ctx->nr_user_bufs);
6224843d 8568 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
0b8c0e7c
PB
8569 err = io_queue_rsrc_removal(ctx->buf_data, offset,
8570 ctx->rsrc_node, ctx->user_bufs[i]);
8571 if (unlikely(err)) {
8572 io_buffer_unmap(ctx, &imu);
634d00df 8573 break;
0b8c0e7c 8574 }
634d00df
PB
8575 ctx->user_bufs[i] = NULL;
8576 needs_switch = true;
8577 }
8578
0b8c0e7c 8579 ctx->user_bufs[i] = imu;
2d091d62 8580 *io_get_tag_slot(ctx->buf_data, offset) = tag;
634d00df
PB
8581 }
8582
8583 if (needs_switch)
8584 io_rsrc_node_switch(ctx, ctx->buf_data);
8585 return done ? done : err;
8586}
8587
9b402849
JA
8588static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8589{
8590 __s32 __user *fds = arg;
8591 int fd;
8592
8593 if (ctx->cq_ev_fd)
8594 return -EBUSY;
8595
8596 if (copy_from_user(&fd, fds, sizeof(*fds)))
8597 return -EFAULT;
8598
8599 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8600 if (IS_ERR(ctx->cq_ev_fd)) {
8601 int ret = PTR_ERR(ctx->cq_ev_fd);
fe7e3257 8602
9b402849
JA
8603 ctx->cq_ev_fd = NULL;
8604 return ret;
8605 }
8606
8607 return 0;
8608}
8609
8610static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8611{
8612 if (ctx->cq_ev_fd) {
8613 eventfd_ctx_put(ctx->cq_ev_fd);
8614 ctx->cq_ev_fd = NULL;
8615 return 0;
8616 }
8617
8618 return -ENXIO;
8619}
8620
5a2e745d
JA
8621static void io_destroy_buffers(struct io_ring_ctx *ctx)
8622{
9e15c3a0
JA
8623 struct io_buffer *buf;
8624 unsigned long index;
8625
8626 xa_for_each(&ctx->io_buffers, index, buf)
8627 __io_remove_buffers(ctx, buf, index, -1U);
5a2e745d
JA
8628}
8629
68e68ee6 8630static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
1b4c351f 8631{
68e68ee6 8632 struct io_kiocb *req, *nxt;
1b4c351f 8633
68e68ee6
JA
8634 list_for_each_entry_safe(req, nxt, list, compl.list) {
8635 if (tsk && req->task != tsk)
8636 continue;
1b4c351f
JA
8637 list_del(&req->compl.list);
8638 kmem_cache_free(req_cachep, req);
8639 }
8640}
8641
4010fec4 8642static void io_req_caches_free(struct io_ring_ctx *ctx)
2b188cc1 8643{
bf019da7 8644 struct io_submit_state *submit_state = &ctx->submit_state;
e5547d2c 8645 struct io_comp_state *cs = &ctx->submit_state.comp;
bf019da7 8646
9a4fdbd8
JA
8647 mutex_lock(&ctx->uring_lock);
8648
8e5c66c4 8649 if (submit_state->free_reqs) {
9a4fdbd8
JA
8650 kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
8651 submit_state->reqs);
8e5c66c4
PB
8652 submit_state->free_reqs = 0;
8653 }
9a4fdbd8 8654
dac7a098 8655 io_flush_cached_locked_reqs(ctx, cs);
e5547d2c 8656 io_req_cache_free(&cs->free_list, NULL);
9a4fdbd8
JA
8657 mutex_unlock(&ctx->uring_lock);
8658}
8659
43597aac 8660static void io_wait_rsrc_data(struct io_rsrc_data *data)
2b188cc1 8661{
43597aac 8662 if (data && !atomic_dec_and_test(&data->refs))
bd54b6fe 8663 wait_for_completion(&data->done);
bd54b6fe 8664}
04fc6c80 8665
2b188cc1
JA
8666static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8667{
37d1e2e3 8668 io_sq_thread_finish(ctx);
2aede0e4 8669
37d1e2e3 8670 if (ctx->mm_account) {
2aede0e4
JA
8671 mmdrop(ctx->mm_account);
8672 ctx->mm_account = NULL;
30975825 8673 }
def596e9 8674
43597aac
PB
8675 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
8676 io_wait_rsrc_data(ctx->buf_data);
8677 io_wait_rsrc_data(ctx->file_data);
8678
8bad28d8 8679 mutex_lock(&ctx->uring_lock);
43597aac 8680 if (ctx->buf_data)
bd54b6fe 8681 __io_sqe_buffers_unregister(ctx);
43597aac 8682 if (ctx->file_data)
08480400 8683 __io_sqe_files_unregister(ctx);
c4ea060e
PB
8684 if (ctx->rings)
8685 __io_cqring_overflow_flush(ctx, true);
8bad28d8 8686 mutex_unlock(&ctx->uring_lock);
9b402849 8687 io_eventfd_unregister(ctx);
5a2e745d 8688 io_destroy_buffers(ctx);
07db298a
PB
8689 if (ctx->sq_creds)
8690 put_cred(ctx->sq_creds);
def596e9 8691
a7f0ed5a
PB
8692 /* there are no registered resources left, nobody uses it */
8693 if (ctx->rsrc_node)
8694 io_rsrc_node_destroy(ctx->rsrc_node);
8dd03afe 8695 if (ctx->rsrc_backup_node)
b895c9a6 8696 io_rsrc_node_destroy(ctx->rsrc_backup_node);
a7f0ed5a
PB
8697 flush_delayed_work(&ctx->rsrc_put_work);
8698
8699 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
8700 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
def596e9 8701
2b188cc1 8702#if defined(CONFIG_UNIX)
355e8d26
EB
8703 if (ctx->ring_sock) {
8704 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 8705 sock_release(ctx->ring_sock);
355e8d26 8706 }
2b188cc1
JA
8707#endif
8708
75b28aff 8709 io_mem_free(ctx->rings);
2b188cc1 8710 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
8711
8712 percpu_ref_exit(&ctx->refs);
2b188cc1 8713 free_uid(ctx->user);
4010fec4 8714 io_req_caches_free(ctx);
e941894e
JA
8715 if (ctx->hash_map)
8716 io_wq_put_hash(ctx->hash_map);
78076bb6 8717 kfree(ctx->cancel_hash);
6224843d 8718 kfree(ctx->dummy_ubuf);
2b188cc1
JA
8719 kfree(ctx);
8720}
8721
8722static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8723{
8724 struct io_ring_ctx *ctx = file->private_data;
8725 __poll_t mask = 0;
8726
311997b3 8727 poll_wait(file, &ctx->poll_wait, wait);
4f7067c3
SB
8728 /*
8729 * synchronizes with barrier from wq_has_sleeper call in
8730 * io_commit_cqring
8731 */
2b188cc1 8732 smp_rmb();
90554200 8733 if (!io_sqring_full(ctx))
2b188cc1 8734 mask |= EPOLLOUT | EPOLLWRNORM;
ed670c3f
HX
8735
8736 /*
8737 * Don't flush cqring overflow list here, just do a simple check.
8738 * Otherwise there could possible be ABBA deadlock:
8739 * CPU0 CPU1
8740 * ---- ----
8741 * lock(&ctx->uring_lock);
8742 * lock(&ep->mtx);
8743 * lock(&ctx->uring_lock);
8744 * lock(&ep->mtx);
8745 *
8746 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
8747 * pushs them to do the flush.
8748 */
5ed7a37d 8749 if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
2b188cc1
JA
8750 mask |= EPOLLIN | EPOLLRDNORM;
8751
8752 return mask;
8753}
8754
8755static int io_uring_fasync(int fd, struct file *file, int on)
8756{
8757 struct io_ring_ctx *ctx = file->private_data;
8758
8759 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8760}
8761
0bead8cd 8762static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
071698e1 8763{
4379bf8b 8764 const struct cred *creds;
071698e1 8765
61cf9370 8766 creds = xa_erase(&ctx->personalities, id);
4379bf8b
JA
8767 if (creds) {
8768 put_cred(creds);
0bead8cd 8769 return 0;
1e6fa521 8770 }
0bead8cd
YD
8771
8772 return -EINVAL;
8773}
8774
d56d938b
PB
8775struct io_tctx_exit {
8776 struct callback_head task_work;
8777 struct completion completion;
baf186c4 8778 struct io_ring_ctx *ctx;
d56d938b
PB
8779};
8780
8781static void io_tctx_exit_cb(struct callback_head *cb)
8782{
8783 struct io_uring_task *tctx = current->io_uring;
8784 struct io_tctx_exit *work;
8785
8786 work = container_of(cb, struct io_tctx_exit, task_work);
8787 /*
8788 * When @in_idle, we're in cancellation and it's racy to remove the
8789 * node. It'll be removed by the end of cancellation, just ignore it.
8790 */
8791 if (!atomic_read(&tctx->in_idle))
eef51daa 8792 io_uring_del_tctx_node((unsigned long)work->ctx);
d56d938b
PB
8793 complete(&work->completion);
8794}
8795
28090c13
PB
8796static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
8797{
8798 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8799
8800 return req->ctx == data;
8801}
8802
85faa7b8
JA
8803static void io_ring_exit_work(struct work_struct *work)
8804{
d56d938b 8805 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
b5bb3a24 8806 unsigned long timeout = jiffies + HZ * 60 * 5;
d56d938b
PB
8807 struct io_tctx_exit exit;
8808 struct io_tctx_node *node;
8809 int ret;
85faa7b8 8810
56952e91
JA
8811 /*
8812 * If we're doing polled IO and end up having requests being
8813 * submitted async (out-of-line), then completions can come in while
8814 * we're waiting for refs to drop. We need to reap these manually,
8815 * as nobody else will be looking for them.
8816 */
b2edc0a7 8817 do {
3dd0c97a 8818 io_uring_try_cancel_requests(ctx, NULL, true);
28090c13
PB
8819 if (ctx->sq_data) {
8820 struct io_sq_data *sqd = ctx->sq_data;
8821 struct task_struct *tsk;
8822
8823 io_sq_thread_park(sqd);
8824 tsk = sqd->thread;
8825 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
8826 io_wq_cancel_cb(tsk->io_uring->io_wq,
8827 io_cancel_ctx_cb, ctx, true);
8828 io_sq_thread_unpark(sqd);
8829 }
b5bb3a24
PB
8830
8831 WARN_ON_ONCE(time_after(jiffies, timeout));
b2edc0a7 8832 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
d56d938b 8833
7f00651a
PB
8834 init_completion(&exit.completion);
8835 init_task_work(&exit.task_work, io_tctx_exit_cb);
8836 exit.ctx = ctx;
89b5066e
PB
8837 /*
8838 * Some may use context even when all refs and requests have been put,
8839 * and they are free to do so while still holding uring_lock or
5b0a6acc 8840 * completion_lock, see io_req_task_submit(). Apart from other work,
89b5066e
PB
8841 * this lock/unlock section also waits them to finish.
8842 */
d56d938b
PB
8843 mutex_lock(&ctx->uring_lock);
8844 while (!list_empty(&ctx->tctx_list)) {
b5bb3a24
PB
8845 WARN_ON_ONCE(time_after(jiffies, timeout));
8846
d56d938b
PB
8847 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
8848 ctx_node);
7f00651a
PB
8849 /* don't spin on a single task if cancellation failed */
8850 list_rotate_left(&ctx->tctx_list);
d56d938b
PB
8851 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
8852 if (WARN_ON_ONCE(ret))
8853 continue;
8854 wake_up_process(node->task);
8855
8856 mutex_unlock(&ctx->uring_lock);
8857 wait_for_completion(&exit.completion);
d56d938b
PB
8858 mutex_lock(&ctx->uring_lock);
8859 }
8860 mutex_unlock(&ctx->uring_lock);
89b5066e
PB
8861 spin_lock_irq(&ctx->completion_lock);
8862 spin_unlock_irq(&ctx->completion_lock);
d56d938b 8863
85faa7b8
JA
8864 io_ring_ctx_free(ctx);
8865}
8866
80c4cbdb
PB
8867/* Returns true if we found and killed one or more timeouts */
8868static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
3dd0c97a 8869 bool cancel_all)
80c4cbdb
PB
8870{
8871 struct io_kiocb *req, *tmp;
8872 int canceled = 0;
8873
8874 spin_lock_irq(&ctx->completion_lock);
8875 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
3dd0c97a 8876 if (io_match_task(req, tsk, cancel_all)) {
80c4cbdb
PB
8877 io_kill_timeout(req, -ECANCELED);
8878 canceled++;
8879 }
8880 }
51520426
PB
8881 if (canceled != 0)
8882 io_commit_cqring(ctx);
80c4cbdb 8883 spin_unlock_irq(&ctx->completion_lock);
80c4cbdb
PB
8884 if (canceled != 0)
8885 io_cqring_ev_posted(ctx);
8886 return canceled != 0;
8887}
8888
2b188cc1
JA
8889static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8890{
61cf9370
MWO
8891 unsigned long index;
8892 struct creds *creds;
8893
2b188cc1
JA
8894 mutex_lock(&ctx->uring_lock);
8895 percpu_ref_kill(&ctx->refs);
634578f8 8896 if (ctx->rings)
6c2450ae 8897 __io_cqring_overflow_flush(ctx, true);
61cf9370
MWO
8898 xa_for_each(&ctx->personalities, index, creds)
8899 io_unregister_personality(ctx, index);
2b188cc1
JA
8900 mutex_unlock(&ctx->uring_lock);
8901
3dd0c97a
PB
8902 io_kill_timeouts(ctx, NULL, true);
8903 io_poll_remove_all(ctx, NULL, true);
561fb04a 8904
15dff286 8905 /* if we failed setting up the ctx, we might not have any rings */
b2edc0a7 8906 io_iopoll_try_reap_events(ctx);
309fc03a 8907
85faa7b8 8908 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
fc666777
JA
8909 /*
8910 * Use system_unbound_wq to avoid spawning tons of event kworkers
8911 * if we're exiting a ton of rings at the same time. It just adds
8912 * noise and overhead, there's no discernable change in runtime
8913 * over using system_wq.
8914 */
8915 queue_work(system_unbound_wq, &ctx->exit_work);
2b188cc1
JA
8916}
8917
8918static int io_uring_release(struct inode *inode, struct file *file)
8919{
8920 struct io_ring_ctx *ctx = file->private_data;
8921
8922 file->private_data = NULL;
8923 io_ring_ctx_wait_and_kill(ctx);
8924 return 0;
8925}
8926
f6edbabb
PB
8927struct io_task_cancel {
8928 struct task_struct *task;
3dd0c97a 8929 bool all;
f6edbabb 8930};
f254ac04 8931
f6edbabb 8932static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
b711d4ea 8933{
9a472ef7 8934 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
f6edbabb 8935 struct io_task_cancel *cancel = data;
9a472ef7
PB
8936 bool ret;
8937
3dd0c97a 8938 if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
9a472ef7
PB
8939 unsigned long flags;
8940 struct io_ring_ctx *ctx = req->ctx;
8941
8942 /* protect against races with linked timeouts */
8943 spin_lock_irqsave(&ctx->completion_lock, flags);
3dd0c97a 8944 ret = io_match_task(req, cancel->task, cancel->all);
9a472ef7
PB
8945 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8946 } else {
3dd0c97a 8947 ret = io_match_task(req, cancel->task, cancel->all);
9a472ef7
PB
8948 }
8949 return ret;
b711d4ea
JA
8950}
8951
e1915f76 8952static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
3dd0c97a 8953 struct task_struct *task, bool cancel_all)
b7ddce3c 8954{
e1915f76 8955 struct io_defer_entry *de;
b7ddce3c
PB
8956 LIST_HEAD(list);
8957
8958 spin_lock_irq(&ctx->completion_lock);
8959 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
3dd0c97a 8960 if (io_match_task(de->req, task, cancel_all)) {
b7ddce3c
PB
8961 list_cut_position(&list, &ctx->defer_list, &de->list);
8962 break;
8963 }
8964 }
8965 spin_unlock_irq(&ctx->completion_lock);
e1915f76
PB
8966 if (list_empty(&list))
8967 return false;
b7ddce3c
PB
8968
8969 while (!list_empty(&list)) {
8970 de = list_first_entry(&list, struct io_defer_entry, list);
8971 list_del_init(&de->list);
f41db273 8972 io_req_complete_failed(de->req, -ECANCELED);
b7ddce3c
PB
8973 kfree(de);
8974 }
e1915f76 8975 return true;
b7ddce3c
PB
8976}
8977
1b00764f
PB
8978static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
8979{
8980 struct io_tctx_node *node;
8981 enum io_wq_cancel cret;
8982 bool ret = false;
8983
8984 mutex_lock(&ctx->uring_lock);
8985 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
8986 struct io_uring_task *tctx = node->task->io_uring;
8987
8988 /*
8989 * io_wq will stay alive while we hold uring_lock, because it's
8990 * killed after ctx nodes, which requires to take the lock.
8991 */
8992 if (!tctx || !tctx->io_wq)
8993 continue;
8994 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
8995 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8996 }
8997 mutex_unlock(&ctx->uring_lock);
8998
8999 return ret;
9000}
9001
9936c7c2
PB
9002static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
9003 struct task_struct *task,
3dd0c97a 9004 bool cancel_all)
9936c7c2 9005{
3dd0c97a 9006 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
1b00764f 9007 struct io_uring_task *tctx = task ? task->io_uring : NULL;
9936c7c2
PB
9008
9009 while (1) {
9010 enum io_wq_cancel cret;
9011 bool ret = false;
9012
1b00764f
PB
9013 if (!task) {
9014 ret |= io_uring_try_cancel_iowq(ctx);
9015 } else if (tctx && tctx->io_wq) {
9016 /*
9017 * Cancels requests of all rings, not only @ctx, but
9018 * it's fine as the task is in exit/exec.
9019 */
5aa75ed5 9020 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
9936c7c2
PB
9021 &cancel, true);
9022 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9023 }
9024
9025 /* SQPOLL thread does its own polling */
3dd0c97a 9026 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
d052d1d6 9027 (ctx->sq_data && ctx->sq_data->thread == current)) {
9936c7c2
PB
9028 while (!list_empty_careful(&ctx->iopoll_list)) {
9029 io_iopoll_try_reap_events(ctx);
9030 ret = true;
9031 }
9032 }
9033
3dd0c97a
PB
9034 ret |= io_cancel_defer_files(ctx, task, cancel_all);
9035 ret |= io_poll_remove_all(ctx, task, cancel_all);
9036 ret |= io_kill_timeouts(ctx, task, cancel_all);
e5dc480d
PB
9037 if (task)
9038 ret |= io_run_task_work();
9936c7c2
PB
9039 if (!ret)
9040 break;
9041 cond_resched();
9042 }
9043}
9044
eef51daa 9045static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
0f212204 9046{
236434c3 9047 struct io_uring_task *tctx = current->io_uring;
13bf43f5 9048 struct io_tctx_node *node;
a528b04e 9049 int ret;
236434c3
MWO
9050
9051 if (unlikely(!tctx)) {
5aa75ed5 9052 ret = io_uring_alloc_task_context(current, ctx);
0f212204
JA
9053 if (unlikely(ret))
9054 return ret;
236434c3 9055 tctx = current->io_uring;
0f212204 9056 }
cf27f3b1
PB
9057 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
9058 node = kmalloc(sizeof(*node), GFP_KERNEL);
9059 if (!node)
9060 return -ENOMEM;
9061 node->ctx = ctx;
9062 node->task = current;
13bf43f5 9063
cf27f3b1
PB
9064 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
9065 node, GFP_KERNEL));
9066 if (ret) {
9067 kfree(node);
9068 return ret;
0f212204 9069 }
cf27f3b1
PB
9070
9071 mutex_lock(&ctx->uring_lock);
9072 list_add(&node->ctx_node, &ctx->tctx_list);
9073 mutex_unlock(&ctx->uring_lock);
0f212204 9074 }
cf27f3b1 9075 tctx->last = ctx;
0f212204
JA
9076 return 0;
9077}
9078
cf27f3b1
PB
9079/*
9080 * Note that this task has used io_uring. We use it for cancelation purposes.
9081 */
eef51daa 9082static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
cf27f3b1
PB
9083{
9084 struct io_uring_task *tctx = current->io_uring;
9085
9086 if (likely(tctx && tctx->last == ctx))
9087 return 0;
eef51daa 9088 return __io_uring_add_tctx_node(ctx);
cf27f3b1
PB
9089}
9090
0f212204
JA
9091/*
9092 * Remove this io_uring_file -> task mapping.
9093 */
eef51daa 9094static void io_uring_del_tctx_node(unsigned long index)
0f212204
JA
9095{
9096 struct io_uring_task *tctx = current->io_uring;
13bf43f5 9097 struct io_tctx_node *node;
2941267b 9098
eebd2e37
PB
9099 if (!tctx)
9100 return;
13bf43f5
PB
9101 node = xa_erase(&tctx->xa, index);
9102 if (!node)
2941267b 9103 return;
0f212204 9104
13bf43f5
PB
9105 WARN_ON_ONCE(current != node->task);
9106 WARN_ON_ONCE(list_empty(&node->ctx_node));
9107
9108 mutex_lock(&node->ctx->uring_lock);
9109 list_del(&node->ctx_node);
9110 mutex_unlock(&node->ctx->uring_lock);
9111
baf186c4 9112 if (tctx->last == node->ctx)
0f212204 9113 tctx->last = NULL;
13bf43f5 9114 kfree(node);
0f212204
JA
9115}
9116
8452d4a6 9117static void io_uring_clean_tctx(struct io_uring_task *tctx)
de7f1d9e 9118{
ba5ef6dc 9119 struct io_wq *wq = tctx->io_wq;
13bf43f5 9120 struct io_tctx_node *node;
de7f1d9e
PB
9121 unsigned long index;
9122
13bf43f5 9123 xa_for_each(&tctx->xa, index, node)
eef51daa 9124 io_uring_del_tctx_node(index);
b16ef427
ME
9125 if (wq) {
9126 /*
9127 * Must be after io_uring_del_task_file() (removes nodes under
9128 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
9129 */
9130 tctx->io_wq = NULL;
ba5ef6dc 9131 io_wq_put_and_exit(wq);
b16ef427 9132 }
de7f1d9e
PB
9133}
9134
3f48cf18 9135static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
521d6a73 9136{
3f48cf18
PB
9137 if (tracked)
9138 return atomic_read(&tctx->inflight_tracked);
521d6a73
PB
9139 return percpu_counter_sum(&tctx->inflight);
9140}
9141
09899b19
PB
9142static void io_uring_drop_tctx_refs(struct task_struct *task)
9143{
9144 struct io_uring_task *tctx = task->io_uring;
9145 unsigned int refs = tctx->cached_refs;
9146
9147 tctx->cached_refs = 0;
9148 percpu_counter_sub(&tctx->inflight, refs);
9149 put_task_struct_many(task, refs);
9150}
9151
78cc687b
PB
9152/*
9153 * Find any io_uring ctx that this task has registered or done IO on, and cancel
9154 * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
9155 */
9156static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
0e9ddb39 9157{
521d6a73 9158 struct io_uring_task *tctx = current->io_uring;
734551df 9159 struct io_ring_ctx *ctx;
0e9ddb39
PB
9160 s64 inflight;
9161 DEFINE_WAIT(wait);
fdaf083c 9162
78cc687b
PB
9163 WARN_ON_ONCE(sqd && sqd->thread != current);
9164
6d042ffb
PO
9165 if (!current->io_uring)
9166 return;
17a91051
PB
9167 if (tctx->io_wq)
9168 io_wq_exit_start(tctx->io_wq);
9169
09899b19 9170 io_uring_drop_tctx_refs(current);
0e9ddb39
PB
9171 atomic_inc(&tctx->in_idle);
9172 do {
9173 /* read completions before cancelations */
78cc687b 9174 inflight = tctx_inflight(tctx, !cancel_all);
0e9ddb39
PB
9175 if (!inflight)
9176 break;
fdaf083c 9177
78cc687b
PB
9178 if (!sqd) {
9179 struct io_tctx_node *node;
9180 unsigned long index;
0f212204 9181
78cc687b
PB
9182 xa_for_each(&tctx->xa, index, node) {
9183 /* sqpoll task will cancel all its requests */
9184 if (node->ctx->sq_data)
9185 continue;
9186 io_uring_try_cancel_requests(node->ctx, current,
9187 cancel_all);
9188 }
9189 } else {
9190 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
9191 io_uring_try_cancel_requests(ctx, current,
9192 cancel_all);
9193 }
17a91051 9194
0f212204 9195 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
0f212204 9196 /*
a1bb3cd5
PB
9197 * If we've seen completions, retry without waiting. This
9198 * avoids a race where a completion comes in before we did
9199 * prepare_to_wait().
0f212204 9200 */
3dd0c97a 9201 if (inflight == tctx_inflight(tctx, !cancel_all))
a1bb3cd5 9202 schedule();
f57555ed 9203 finish_wait(&tctx->wait, &wait);
d8a6df10 9204 } while (1);
fdaf083c 9205 atomic_dec(&tctx->in_idle);
de7f1d9e 9206
8452d4a6 9207 io_uring_clean_tctx(tctx);
3dd0c97a 9208 if (cancel_all) {
3f48cf18
PB
9209 /* for exec all current's requests should be gone, kill tctx */
9210 __io_uring_free(current);
9211 }
44e728b8
PB
9212}
9213
78cc687b
PB
9214void __io_uring_cancel(struct files_struct *files)
9215{
9216 io_uring_cancel_generic(!files, NULL);
9217}
9218
6c5c240e
RP
9219static void *io_uring_validate_mmap_request(struct file *file,
9220 loff_t pgoff, size_t sz)
2b188cc1 9221{
2b188cc1 9222 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 9223 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
9224 struct page *page;
9225 void *ptr;
9226
9227 switch (offset) {
9228 case IORING_OFF_SQ_RING:
75b28aff
HV
9229 case IORING_OFF_CQ_RING:
9230 ptr = ctx->rings;
2b188cc1
JA
9231 break;
9232 case IORING_OFF_SQES:
9233 ptr = ctx->sq_sqes;
9234 break;
2b188cc1 9235 default:
6c5c240e 9236 return ERR_PTR(-EINVAL);
2b188cc1
JA
9237 }
9238
9239 page = virt_to_head_page(ptr);
a50b854e 9240 if (sz > page_size(page))
6c5c240e
RP
9241 return ERR_PTR(-EINVAL);
9242
9243 return ptr;
9244}
9245
9246#ifdef CONFIG_MMU
9247
9248static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9249{
9250 size_t sz = vma->vm_end - vma->vm_start;
9251 unsigned long pfn;
9252 void *ptr;
9253
9254 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9255 if (IS_ERR(ptr))
9256 return PTR_ERR(ptr);
2b188cc1
JA
9257
9258 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9259 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9260}
9261
6c5c240e
RP
9262#else /* !CONFIG_MMU */
9263
9264static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9265{
9266 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9267}
9268
9269static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9270{
9271 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9272}
9273
9274static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9275 unsigned long addr, unsigned long len,
9276 unsigned long pgoff, unsigned long flags)
9277{
9278 void *ptr;
9279
9280 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9281 if (IS_ERR(ptr))
9282 return PTR_ERR(ptr);
9283
9284 return (unsigned long) ptr;
9285}
9286
9287#endif /* !CONFIG_MMU */
9288
d9d05217 9289static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
90554200
JA
9290{
9291 DEFINE_WAIT(wait);
9292
9293 do {
9294 if (!io_sqring_full(ctx))
9295 break;
90554200
JA
9296 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9297
9298 if (!io_sqring_full(ctx))
9299 break;
90554200
JA
9300 schedule();
9301 } while (!signal_pending(current));
9302
9303 finish_wait(&ctx->sqo_sq_wait, &wait);
5199328a 9304 return 0;
90554200
JA
9305}
9306
c73ebb68
HX
9307static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9308 struct __kernel_timespec __user **ts,
9309 const sigset_t __user **sig)
9310{
9311 struct io_uring_getevents_arg arg;
9312
9313 /*
9314 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9315 * is just a pointer to the sigset_t.
9316 */
9317 if (!(flags & IORING_ENTER_EXT_ARG)) {
9318 *sig = (const sigset_t __user *) argp;
9319 *ts = NULL;
9320 return 0;
9321 }
9322
9323 /*
9324 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9325 * timespec and sigset_t pointers if good.
9326 */
9327 if (*argsz != sizeof(arg))
9328 return -EINVAL;
9329 if (copy_from_user(&arg, argp, sizeof(arg)))
9330 return -EFAULT;
9331 *sig = u64_to_user_ptr(arg.sigmask);
9332 *argsz = arg.sigmask_sz;
9333 *ts = u64_to_user_ptr(arg.ts);
9334 return 0;
9335}
9336
2b188cc1 9337SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
c73ebb68
HX
9338 u32, min_complete, u32, flags, const void __user *, argp,
9339 size_t, argsz)
2b188cc1
JA
9340{
9341 struct io_ring_ctx *ctx;
2b188cc1
JA
9342 int submitted = 0;
9343 struct fd f;
33f993da 9344 long ret;
2b188cc1 9345
4c6e277c 9346 io_run_task_work();
b41e9852 9347
33f993da
PB
9348 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9349 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
2b188cc1
JA
9350 return -EINVAL;
9351
9352 f = fdget(fd);
33f993da 9353 if (unlikely(!f.file))
2b188cc1
JA
9354 return -EBADF;
9355
9356 ret = -EOPNOTSUPP;
33f993da 9357 if (unlikely(f.file->f_op != &io_uring_fops))
2b188cc1
JA
9358 goto out_fput;
9359
9360 ret = -ENXIO;
9361 ctx = f.file->private_data;
33f993da 9362 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
2b188cc1
JA
9363 goto out_fput;
9364
7e84e1c7 9365 ret = -EBADFD;
33f993da 9366 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
7e84e1c7
SG
9367 goto out;
9368
6c271ce2
JA
9369 /*
9370 * For SQ polling, the thread will do all submissions and completions.
9371 * Just return the requested submit count, and wake the thread if
9372 * we were asked to.
9373 */
b2a9eada 9374 ret = 0;
6c271ce2 9375 if (ctx->flags & IORING_SETUP_SQPOLL) {
6c2450ae 9376 io_cqring_overflow_flush(ctx, false);
89448c47 9377
21f96522
JA
9378 if (unlikely(ctx->sq_data->thread == NULL)) {
9379 ret = -EOWNERDEAD;
04147488 9380 goto out;
21f96522 9381 }
6c271ce2 9382 if (flags & IORING_ENTER_SQ_WAKEUP)
534ca6d6 9383 wake_up(&ctx->sq_data->wait);
d9d05217
PB
9384 if (flags & IORING_ENTER_SQ_WAIT) {
9385 ret = io_sqpoll_wait_sq(ctx);
9386 if (ret)
9387 goto out;
9388 }
6c271ce2 9389 submitted = to_submit;
b2a9eada 9390 } else if (to_submit) {
eef51daa 9391 ret = io_uring_add_tctx_node(ctx);
0f212204
JA
9392 if (unlikely(ret))
9393 goto out;
2b188cc1 9394 mutex_lock(&ctx->uring_lock);
0f212204 9395 submitted = io_submit_sqes(ctx, to_submit);
2b188cc1 9396 mutex_unlock(&ctx->uring_lock);
7c504e65
PB
9397
9398 if (submitted != to_submit)
9399 goto out;
2b188cc1
JA
9400 }
9401 if (flags & IORING_ENTER_GETEVENTS) {
c73ebb68
HX
9402 const sigset_t __user *sig;
9403 struct __kernel_timespec __user *ts;
9404
9405 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9406 if (unlikely(ret))
9407 goto out;
9408
2b188cc1
JA
9409 min_complete = min(min_complete, ctx->cq_entries);
9410
32b2244a
XW
9411 /*
9412 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9413 * space applications don't need to do io completion events
9414 * polling again, they can rely on io_sq_thread to do polling
9415 * work, which can reduce cpu usage and uring_lock contention.
9416 */
9417 if (ctx->flags & IORING_SETUP_IOPOLL &&
9418 !(ctx->flags & IORING_SETUP_SQPOLL)) {
7668b92a 9419 ret = io_iopoll_check(ctx, min_complete);
def596e9 9420 } else {
c73ebb68 9421 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
def596e9 9422 }
2b188cc1
JA
9423 }
9424
7c504e65 9425out:
6805b32e 9426 percpu_ref_put(&ctx->refs);
2b188cc1
JA
9427out_fput:
9428 fdput(f);
9429 return submitted ? submitted : ret;
9430}
9431
bebdb65e 9432#ifdef CONFIG_PROC_FS
61cf9370
MWO
9433static int io_uring_show_cred(struct seq_file *m, unsigned int id,
9434 const struct cred *cred)
87ce955b 9435{
87ce955b
JA
9436 struct user_namespace *uns = seq_user_ns(m);
9437 struct group_info *gi;
9438 kernel_cap_t cap;
9439 unsigned __capi;
9440 int g;
9441
9442 seq_printf(m, "%5d\n", id);
9443 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9444 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9445 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9446 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9447 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9448 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9449 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9450 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9451 seq_puts(m, "\n\tGroups:\t");
9452 gi = cred->group_info;
9453 for (g = 0; g < gi->ngroups; g++) {
9454 seq_put_decimal_ull(m, g ? " " : "",
9455 from_kgid_munged(uns, gi->gid[g]));
9456 }
9457 seq_puts(m, "\n\tCapEff:\t");
9458 cap = cred->cap_effective;
9459 CAP_FOR_EACH_U32(__capi)
9460 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9461 seq_putc(m, '\n');
9462 return 0;
9463}
9464
9465static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9466{
dbbe9c64 9467 struct io_sq_data *sq = NULL;
fad8e0de 9468 bool has_lock;
87ce955b
JA
9469 int i;
9470
fad8e0de
JA
9471 /*
9472 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9473 * since fdinfo case grabs it in the opposite direction of normal use
9474 * cases. If we fail to get the lock, we just don't iterate any
9475 * structures that could be going away outside the io_uring mutex.
9476 */
9477 has_lock = mutex_trylock(&ctx->uring_lock);
9478
5f3f26f9 9479 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
dbbe9c64 9480 sq = ctx->sq_data;
5f3f26f9
JA
9481 if (!sq->thread)
9482 sq = NULL;
9483 }
dbbe9c64
JQ
9484
9485 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9486 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
87ce955b 9487 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
fad8e0de 9488 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
7b29f92d 9489 struct file *f = io_file_from_index(ctx, i);
87ce955b 9490
87ce955b
JA
9491 if (f)
9492 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9493 else
9494 seq_printf(m, "%5u: <none>\n", i);
9495 }
9496 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
fad8e0de 9497 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
41edf1a5 9498 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
4751f53d 9499 unsigned int len = buf->ubuf_end - buf->ubuf;
87ce955b 9500
4751f53d 9501 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
87ce955b 9502 }
61cf9370
MWO
9503 if (has_lock && !xa_empty(&ctx->personalities)) {
9504 unsigned long index;
9505 const struct cred *cred;
9506
87ce955b 9507 seq_printf(m, "Personalities:\n");
61cf9370
MWO
9508 xa_for_each(&ctx->personalities, index, cred)
9509 io_uring_show_cred(m, index, cred);
87ce955b 9510 }
d7718a9d
JA
9511 seq_printf(m, "PollList:\n");
9512 spin_lock_irq(&ctx->completion_lock);
9513 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9514 struct hlist_head *list = &ctx->cancel_hash[i];
9515 struct io_kiocb *req;
9516
9517 hlist_for_each_entry(req, list, hash_node)
9518 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9519 req->task->task_works != NULL);
9520 }
9521 spin_unlock_irq(&ctx->completion_lock);
fad8e0de
JA
9522 if (has_lock)
9523 mutex_unlock(&ctx->uring_lock);
87ce955b
JA
9524}
9525
9526static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9527{
9528 struct io_ring_ctx *ctx = f->private_data;
9529
9530 if (percpu_ref_tryget(&ctx->refs)) {
9531 __io_uring_show_fdinfo(ctx, m);
9532 percpu_ref_put(&ctx->refs);
9533 }
9534}
bebdb65e 9535#endif
87ce955b 9536
2b188cc1
JA
9537static const struct file_operations io_uring_fops = {
9538 .release = io_uring_release,
9539 .mmap = io_uring_mmap,
6c5c240e
RP
9540#ifndef CONFIG_MMU
9541 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9542 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9543#endif
2b188cc1
JA
9544 .poll = io_uring_poll,
9545 .fasync = io_uring_fasync,
bebdb65e 9546#ifdef CONFIG_PROC_FS
87ce955b 9547 .show_fdinfo = io_uring_show_fdinfo,
bebdb65e 9548#endif
2b188cc1
JA
9549};
9550
9551static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9552 struct io_uring_params *p)
9553{
75b28aff
HV
9554 struct io_rings *rings;
9555 size_t size, sq_array_offset;
2b188cc1 9556
bd740481
JA
9557 /* make sure these are sane, as we already accounted them */
9558 ctx->sq_entries = p->sq_entries;
9559 ctx->cq_entries = p->cq_entries;
9560
75b28aff
HV
9561 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9562 if (size == SIZE_MAX)
9563 return -EOVERFLOW;
9564
9565 rings = io_mem_alloc(size);
9566 if (!rings)
2b188cc1
JA
9567 return -ENOMEM;
9568
75b28aff
HV
9569 ctx->rings = rings;
9570 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9571 rings->sq_ring_mask = p->sq_entries - 1;
9572 rings->cq_ring_mask = p->cq_entries - 1;
9573 rings->sq_ring_entries = p->sq_entries;
9574 rings->cq_ring_entries = p->cq_entries;
2b188cc1
JA
9575
9576 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
9577 if (size == SIZE_MAX) {
9578 io_mem_free(ctx->rings);
9579 ctx->rings = NULL;
2b188cc1 9580 return -EOVERFLOW;
eb065d30 9581 }
2b188cc1
JA
9582
9583 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
9584 if (!ctx->sq_sqes) {
9585 io_mem_free(ctx->rings);
9586 ctx->rings = NULL;
2b188cc1 9587 return -ENOMEM;
eb065d30 9588 }
2b188cc1 9589
2b188cc1
JA
9590 return 0;
9591}
9592
9faadcc8
PB
9593static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9594{
9595 int ret, fd;
9596
9597 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9598 if (fd < 0)
9599 return fd;
9600
eef51daa 9601 ret = io_uring_add_tctx_node(ctx);
9faadcc8
PB
9602 if (ret) {
9603 put_unused_fd(fd);
9604 return ret;
9605 }
9606 fd_install(fd, file);
9607 return fd;
9608}
9609
2b188cc1
JA
9610/*
9611 * Allocate an anonymous fd, this is what constitutes the application
9612 * visible backing of an io_uring instance. The application mmaps this
9613 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9614 * we have to tie this fd to a socket for file garbage collection purposes.
9615 */
9faadcc8 9616static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
2b188cc1
JA
9617{
9618 struct file *file;
9faadcc8 9619#if defined(CONFIG_UNIX)
2b188cc1
JA
9620 int ret;
9621
2b188cc1
JA
9622 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9623 &ctx->ring_sock);
9624 if (ret)
9faadcc8 9625 return ERR_PTR(ret);
2b188cc1
JA
9626#endif
9627
2b188cc1
JA
9628 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9629 O_RDWR | O_CLOEXEC);
2b188cc1 9630#if defined(CONFIG_UNIX)
9faadcc8
PB
9631 if (IS_ERR(file)) {
9632 sock_release(ctx->ring_sock);
9633 ctx->ring_sock = NULL;
9634 } else {
9635 ctx->ring_sock->file = file;
0f212204 9636 }
2b188cc1 9637#endif
9faadcc8 9638 return file;
2b188cc1
JA
9639}
9640
7f13657d
XW
9641static int io_uring_create(unsigned entries, struct io_uring_params *p,
9642 struct io_uring_params __user *params)
2b188cc1 9643{
2b188cc1 9644 struct io_ring_ctx *ctx;
9faadcc8 9645 struct file *file;
2b188cc1
JA
9646 int ret;
9647
8110c1a6 9648 if (!entries)
2b188cc1 9649 return -EINVAL;
8110c1a6
JA
9650 if (entries > IORING_MAX_ENTRIES) {
9651 if (!(p->flags & IORING_SETUP_CLAMP))
9652 return -EINVAL;
9653 entries = IORING_MAX_ENTRIES;
9654 }
2b188cc1
JA
9655
9656 /*
9657 * Use twice as many entries for the CQ ring. It's possible for the
9658 * application to drive a higher depth than the size of the SQ ring,
9659 * since the sqes are only used at submission time. This allows for
33a107f0
JA
9660 * some flexibility in overcommitting a bit. If the application has
9661 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9662 * of CQ ring entries manually.
2b188cc1
JA
9663 */
9664 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
9665 if (p->flags & IORING_SETUP_CQSIZE) {
9666 /*
9667 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9668 * to a power-of-two, if it isn't already. We do NOT impose
9669 * any cq vs sq ring sizing.
9670 */
eb2667b3 9671 if (!p->cq_entries)
33a107f0 9672 return -EINVAL;
8110c1a6
JA
9673 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9674 if (!(p->flags & IORING_SETUP_CLAMP))
9675 return -EINVAL;
9676 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9677 }
eb2667b3
JQ
9678 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9679 if (p->cq_entries < p->sq_entries)
9680 return -EINVAL;
33a107f0
JA
9681 } else {
9682 p->cq_entries = 2 * p->sq_entries;
9683 }
2b188cc1 9684
2b188cc1 9685 ctx = io_ring_ctx_alloc(p);
62e398be 9686 if (!ctx)
2b188cc1 9687 return -ENOMEM;
2b188cc1 9688 ctx->compat = in_compat_syscall();
62e398be
JA
9689 if (!capable(CAP_IPC_LOCK))
9690 ctx->user = get_uid(current_user());
2aede0e4
JA
9691
9692 /*
9693 * This is just grabbed for accounting purposes. When a process exits,
9694 * the mm is exited and dropped before the files, hence we need to hang
9695 * on to this mm purely for the purposes of being able to unaccount
9696 * memory (locked/pinned vm). It's not used for anything else.
9697 */
6b7898eb 9698 mmgrab(current->mm);
2aede0e4 9699 ctx->mm_account = current->mm;
6b7898eb 9700
2b188cc1
JA
9701 ret = io_allocate_scq_urings(ctx, p);
9702 if (ret)
9703 goto err;
9704
7e84e1c7 9705 ret = io_sq_offload_create(ctx, p);
2b188cc1
JA
9706 if (ret)
9707 goto err;
eae071c9 9708 /* always set a rsrc node */
47b228ce
PB
9709 ret = io_rsrc_node_switch_start(ctx);
9710 if (ret)
9711 goto err;
eae071c9 9712 io_rsrc_node_switch(ctx, NULL);
2b188cc1 9713
2b188cc1 9714 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
9715 p->sq_off.head = offsetof(struct io_rings, sq.head);
9716 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9717 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9718 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9719 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9720 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9721 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
9722
9723 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
9724 p->cq_off.head = offsetof(struct io_rings, cq.head);
9725 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9726 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9727 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9728 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9729 p->cq_off.cqes = offsetof(struct io_rings, cqes);
0d9b5b3a 9730 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
ac90f249 9731
7f13657d
XW
9732 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9733 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
5769a351 9734 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
c73ebb68 9735 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
9690557e
PB
9736 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
9737 IORING_FEAT_RSRC_TAGS;
7f13657d
XW
9738
9739 if (copy_to_user(params, p, sizeof(*p))) {
9740 ret = -EFAULT;
9741 goto err;
9742 }
d1719f70 9743
9faadcc8
PB
9744 file = io_uring_get_file(ctx);
9745 if (IS_ERR(file)) {
9746 ret = PTR_ERR(file);
9747 goto err;
9748 }
9749
044c1ab3
JA
9750 /*
9751 * Install ring fd as the very last thing, so we don't risk someone
9752 * having closed it before we finish setup
9753 */
9faadcc8
PB
9754 ret = io_uring_install_fd(ctx, file);
9755 if (ret < 0) {
9756 /* fput will clean it up */
9757 fput(file);
9758 return ret;
9759 }
044c1ab3 9760
c826bd7a 9761 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
9762 return ret;
9763err:
9764 io_ring_ctx_wait_and_kill(ctx);
9765 return ret;
9766}
9767
9768/*
9769 * Sets up an aio uring context, and returns the fd. Applications asks for a
9770 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9771 * params structure passed in.
9772 */
9773static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9774{
9775 struct io_uring_params p;
2b188cc1
JA
9776 int i;
9777
9778 if (copy_from_user(&p, params, sizeof(p)))
9779 return -EFAULT;
9780 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9781 if (p.resv[i])
9782 return -EINVAL;
9783 }
9784
6c271ce2 9785 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 9786 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
7e84e1c7
SG
9787 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9788 IORING_SETUP_R_DISABLED))
2b188cc1
JA
9789 return -EINVAL;
9790
7f13657d 9791 return io_uring_create(entries, &p, params);
2b188cc1
JA
9792}
9793
9794SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9795 struct io_uring_params __user *, params)
9796{
9797 return io_uring_setup(entries, params);
9798}
9799
66f4af93
JA
9800static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9801{
9802 struct io_uring_probe *p;
9803 size_t size;
9804 int i, ret;
9805
9806 size = struct_size(p, ops, nr_args);
9807 if (size == SIZE_MAX)
9808 return -EOVERFLOW;
9809 p = kzalloc(size, GFP_KERNEL);
9810 if (!p)
9811 return -ENOMEM;
9812
9813 ret = -EFAULT;
9814 if (copy_from_user(p, arg, size))
9815 goto out;
9816 ret = -EINVAL;
9817 if (memchr_inv(p, 0, size))
9818 goto out;
9819
9820 p->last_op = IORING_OP_LAST - 1;
9821 if (nr_args > IORING_OP_LAST)
9822 nr_args = IORING_OP_LAST;
9823
9824 for (i = 0; i < nr_args; i++) {
9825 p->ops[i].op = i;
9826 if (!io_op_defs[i].not_supported)
9827 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9828 }
9829 p->ops_len = i;
9830
9831 ret = 0;
9832 if (copy_to_user(arg, p, size))
9833 ret = -EFAULT;
9834out:
9835 kfree(p);
9836 return ret;
9837}
9838
071698e1
JA
9839static int io_register_personality(struct io_ring_ctx *ctx)
9840{
4379bf8b 9841 const struct cred *creds;
61cf9370 9842 u32 id;
1e6fa521 9843 int ret;
071698e1 9844
4379bf8b 9845 creds = get_current_cred();
1e6fa521 9846
61cf9370
MWO
9847 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
9848 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
a30f895a
JA
9849 if (ret < 0) {
9850 put_cred(creds);
9851 return ret;
9852 }
9853 return id;
071698e1
JA
9854}
9855
21b55dbc
SG
9856static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9857 unsigned int nr_args)
9858{
9859 struct io_uring_restriction *res;
9860 size_t size;
9861 int i, ret;
9862
7e84e1c7
SG
9863 /* Restrictions allowed only if rings started disabled */
9864 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9865 return -EBADFD;
9866
21b55dbc 9867 /* We allow only a single restrictions registration */
7e84e1c7 9868 if (ctx->restrictions.registered)
21b55dbc
SG
9869 return -EBUSY;
9870
9871 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9872 return -EINVAL;
9873
9874 size = array_size(nr_args, sizeof(*res));
9875 if (size == SIZE_MAX)
9876 return -EOVERFLOW;
9877
9878 res = memdup_user(arg, size);
9879 if (IS_ERR(res))
9880 return PTR_ERR(res);
9881
9882 ret = 0;
9883
9884 for (i = 0; i < nr_args; i++) {
9885 switch (res[i].opcode) {
9886 case IORING_RESTRICTION_REGISTER_OP:
9887 if (res[i].register_op >= IORING_REGISTER_LAST) {
9888 ret = -EINVAL;
9889 goto out;
9890 }
9891
9892 __set_bit(res[i].register_op,
9893 ctx->restrictions.register_op);
9894 break;
9895 case IORING_RESTRICTION_SQE_OP:
9896 if (res[i].sqe_op >= IORING_OP_LAST) {
9897 ret = -EINVAL;
9898 goto out;
9899 }
9900
9901 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9902 break;
9903 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9904 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9905 break;
9906 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9907 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9908 break;
9909 default:
9910 ret = -EINVAL;
9911 goto out;
9912 }
9913 }
9914
9915out:
9916 /* Reset all restrictions if an error happened */
9917 if (ret != 0)
9918 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9919 else
7e84e1c7 9920 ctx->restrictions.registered = true;
21b55dbc
SG
9921
9922 kfree(res);
9923 return ret;
9924}
9925
7e84e1c7
SG
9926static int io_register_enable_rings(struct io_ring_ctx *ctx)
9927{
9928 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9929 return -EBADFD;
9930
9931 if (ctx->restrictions.registered)
9932 ctx->restricted = 1;
9933
0298ef96
PB
9934 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9935 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
9936 wake_up(&ctx->sq_data->wait);
7e84e1c7
SG
9937 return 0;
9938}
9939
fdecb662 9940static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
c3bdad02 9941 struct io_uring_rsrc_update2 *up,
98f0b3b4
PB
9942 unsigned nr_args)
9943{
9944 __u32 tmp;
9945 int err;
9946
c3bdad02
PB
9947 if (up->resv)
9948 return -EINVAL;
98f0b3b4
PB
9949 if (check_add_overflow(up->offset, nr_args, &tmp))
9950 return -EOVERFLOW;
9951 err = io_rsrc_node_switch_start(ctx);
9952 if (err)
9953 return err;
9954
fdecb662
PB
9955 switch (type) {
9956 case IORING_RSRC_FILE:
98f0b3b4 9957 return __io_sqe_files_update(ctx, up, nr_args);
634d00df
PB
9958 case IORING_RSRC_BUFFER:
9959 return __io_sqe_buffers_update(ctx, up, nr_args);
98f0b3b4
PB
9960 }
9961 return -EINVAL;
9962}
9963
c3bdad02
PB
9964static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
9965 unsigned nr_args)
98f0b3b4 9966{
c3bdad02 9967 struct io_uring_rsrc_update2 up;
98f0b3b4
PB
9968
9969 if (!nr_args)
9970 return -EINVAL;
c3bdad02
PB
9971 memset(&up, 0, sizeof(up));
9972 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
9973 return -EFAULT;
9974 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
9975}
9976
9977static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
992da01a 9978 unsigned size, unsigned type)
c3bdad02
PB
9979{
9980 struct io_uring_rsrc_update2 up;
9981
9982 if (size != sizeof(up))
9983 return -EINVAL;
98f0b3b4
PB
9984 if (copy_from_user(&up, arg, sizeof(up)))
9985 return -EFAULT;
992da01a 9986 if (!up.nr || up.resv)
98f0b3b4 9987 return -EINVAL;
992da01a 9988 return __io_register_rsrc_update(ctx, type, &up, up.nr);
98f0b3b4
PB
9989}
9990
792e3582 9991static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
992da01a 9992 unsigned int size, unsigned int type)
792e3582
PB
9993{
9994 struct io_uring_rsrc_register rr;
9995
9996 /* keep it extendible */
9997 if (size != sizeof(rr))
9998 return -EINVAL;
9999
10000 memset(&rr, 0, sizeof(rr));
10001 if (copy_from_user(&rr, arg, size))
10002 return -EFAULT;
992da01a 10003 if (!rr.nr || rr.resv || rr.resv2)
792e3582
PB
10004 return -EINVAL;
10005
992da01a 10006 switch (type) {
792e3582
PB
10007 case IORING_RSRC_FILE:
10008 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
10009 rr.nr, u64_to_user_ptr(rr.tags));
634d00df
PB
10010 case IORING_RSRC_BUFFER:
10011 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
10012 rr.nr, u64_to_user_ptr(rr.tags));
792e3582
PB
10013 }
10014 return -EINVAL;
10015}
10016
fe76421d
JA
10017static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg,
10018 unsigned len)
10019{
10020 struct io_uring_task *tctx = current->io_uring;
10021 cpumask_var_t new_mask;
10022 int ret;
10023
10024 if (!tctx || !tctx->io_wq)
10025 return -EINVAL;
10026
10027 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
10028 return -ENOMEM;
10029
10030 cpumask_clear(new_mask);
10031 if (len > cpumask_size())
10032 len = cpumask_size();
10033
10034 if (copy_from_user(new_mask, arg, len)) {
10035 free_cpumask_var(new_mask);
10036 return -EFAULT;
10037 }
10038
10039 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
10040 free_cpumask_var(new_mask);
10041 return ret;
10042}
10043
10044static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
10045{
10046 struct io_uring_task *tctx = current->io_uring;
10047
10048 if (!tctx || !tctx->io_wq)
10049 return -EINVAL;
10050
10051 return io_wq_cpu_affinity(tctx->io_wq, NULL);
10052}
10053
071698e1
JA
10054static bool io_register_op_must_quiesce(int op)
10055{
10056 switch (op) {
bd54b6fe
BM
10057 case IORING_REGISTER_BUFFERS:
10058 case IORING_UNREGISTER_BUFFERS:
f4f7d21c 10059 case IORING_REGISTER_FILES:
071698e1
JA
10060 case IORING_UNREGISTER_FILES:
10061 case IORING_REGISTER_FILES_UPDATE:
10062 case IORING_REGISTER_PROBE:
10063 case IORING_REGISTER_PERSONALITY:
10064 case IORING_UNREGISTER_PERSONALITY:
992da01a
PB
10065 case IORING_REGISTER_FILES2:
10066 case IORING_REGISTER_FILES_UPDATE2:
10067 case IORING_REGISTER_BUFFERS2:
10068 case IORING_REGISTER_BUFFERS_UPDATE:
fe76421d
JA
10069 case IORING_REGISTER_IOWQ_AFF:
10070 case IORING_UNREGISTER_IOWQ_AFF:
071698e1
JA
10071 return false;
10072 default:
10073 return true;
10074 }
10075}
10076
e73c5c7c
PB
10077static int io_ctx_quiesce(struct io_ring_ctx *ctx)
10078{
10079 long ret;
10080
10081 percpu_ref_kill(&ctx->refs);
10082
10083 /*
10084 * Drop uring mutex before waiting for references to exit. If another
10085 * thread is currently inside io_uring_enter() it might need to grab the
10086 * uring_lock to make progress. If we hold it here across the drain
10087 * wait, then we can deadlock. It's safe to drop the mutex here, since
10088 * no new references will come in after we've killed the percpu ref.
10089 */
10090 mutex_unlock(&ctx->uring_lock);
10091 do {
10092 ret = wait_for_completion_interruptible(&ctx->ref_comp);
10093 if (!ret)
10094 break;
10095 ret = io_run_task_work_sig();
10096 } while (ret >= 0);
10097 mutex_lock(&ctx->uring_lock);
10098
10099 if (ret)
10100 io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
10101 return ret;
10102}
10103
edafccee
JA
10104static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
10105 void __user *arg, unsigned nr_args)
b19062a5
JA
10106 __releases(ctx->uring_lock)
10107 __acquires(ctx->uring_lock)
edafccee
JA
10108{
10109 int ret;
10110
35fa71a0
JA
10111 /*
10112 * We're inside the ring mutex, if the ref is already dying, then
10113 * someone else killed the ctx or is already going through
10114 * io_uring_register().
10115 */
10116 if (percpu_ref_is_dying(&ctx->refs))
10117 return -ENXIO;
10118
75c4021a
PB
10119 if (ctx->restricted) {
10120 if (opcode >= IORING_REGISTER_LAST)
10121 return -EINVAL;
10122 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
10123 if (!test_bit(opcode, ctx->restrictions.register_op))
10124 return -EACCES;
10125 }
10126
071698e1 10127 if (io_register_op_must_quiesce(opcode)) {
e73c5c7c
PB
10128 ret = io_ctx_quiesce(ctx);
10129 if (ret)
f70865db 10130 return ret;
05f3fb3c 10131 }
edafccee
JA
10132
10133 switch (opcode) {
10134 case IORING_REGISTER_BUFFERS:
634d00df 10135 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
edafccee
JA
10136 break;
10137 case IORING_UNREGISTER_BUFFERS:
10138 ret = -EINVAL;
10139 if (arg || nr_args)
10140 break;
0a96bbe4 10141 ret = io_sqe_buffers_unregister(ctx);
edafccee 10142 break;
6b06314c 10143 case IORING_REGISTER_FILES:
792e3582 10144 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
6b06314c
JA
10145 break;
10146 case IORING_UNREGISTER_FILES:
10147 ret = -EINVAL;
10148 if (arg || nr_args)
10149 break;
10150 ret = io_sqe_files_unregister(ctx);
10151 break;
c3a31e60 10152 case IORING_REGISTER_FILES_UPDATE:
c3bdad02 10153 ret = io_register_files_update(ctx, arg, nr_args);
c3a31e60 10154 break;
9b402849 10155 case IORING_REGISTER_EVENTFD:
f2842ab5 10156 case IORING_REGISTER_EVENTFD_ASYNC:
9b402849
JA
10157 ret = -EINVAL;
10158 if (nr_args != 1)
10159 break;
10160 ret = io_eventfd_register(ctx, arg);
f2842ab5
JA
10161 if (ret)
10162 break;
10163 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
10164 ctx->eventfd_async = 1;
10165 else
10166 ctx->eventfd_async = 0;
9b402849
JA
10167 break;
10168 case IORING_UNREGISTER_EVENTFD:
10169 ret = -EINVAL;
10170 if (arg || nr_args)
10171 break;
10172 ret = io_eventfd_unregister(ctx);
10173 break;
66f4af93
JA
10174 case IORING_REGISTER_PROBE:
10175 ret = -EINVAL;
10176 if (!arg || nr_args > 256)
10177 break;
10178 ret = io_probe(ctx, arg, nr_args);
10179 break;
071698e1
JA
10180 case IORING_REGISTER_PERSONALITY:
10181 ret = -EINVAL;
10182 if (arg || nr_args)
10183 break;
10184 ret = io_register_personality(ctx);
10185 break;
10186 case IORING_UNREGISTER_PERSONALITY:
10187 ret = -EINVAL;
10188 if (arg)
10189 break;
10190 ret = io_unregister_personality(ctx, nr_args);
10191 break;
7e84e1c7
SG
10192 case IORING_REGISTER_ENABLE_RINGS:
10193 ret = -EINVAL;
10194 if (arg || nr_args)
10195 break;
10196 ret = io_register_enable_rings(ctx);
10197 break;
21b55dbc
SG
10198 case IORING_REGISTER_RESTRICTIONS:
10199 ret = io_register_restrictions(ctx, arg, nr_args);
10200 break;
992da01a
PB
10201 case IORING_REGISTER_FILES2:
10202 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
10203 break;
10204 case IORING_REGISTER_FILES_UPDATE2:
10205 ret = io_register_rsrc_update(ctx, arg, nr_args,
10206 IORING_RSRC_FILE);
10207 break;
10208 case IORING_REGISTER_BUFFERS2:
10209 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
792e3582 10210 break;
992da01a
PB
10211 case IORING_REGISTER_BUFFERS_UPDATE:
10212 ret = io_register_rsrc_update(ctx, arg, nr_args,
10213 IORING_RSRC_BUFFER);
c3bdad02 10214 break;
fe76421d
JA
10215 case IORING_REGISTER_IOWQ_AFF:
10216 ret = -EINVAL;
10217 if (!arg || !nr_args)
10218 break;
10219 ret = io_register_iowq_aff(ctx, arg, nr_args);
10220 break;
10221 case IORING_UNREGISTER_IOWQ_AFF:
10222 ret = -EINVAL;
10223 if (arg || nr_args)
10224 break;
10225 ret = io_unregister_iowq_aff(ctx);
10226 break;
edafccee
JA
10227 default:
10228 ret = -EINVAL;
10229 break;
10230 }
10231
071698e1 10232 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 10233 /* bring the ctx back to life */
05f3fb3c 10234 percpu_ref_reinit(&ctx->refs);
0f158b4c 10235 reinit_completion(&ctx->ref_comp);
05f3fb3c 10236 }
edafccee
JA
10237 return ret;
10238}
10239
10240SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
10241 void __user *, arg, unsigned int, nr_args)
10242{
10243 struct io_ring_ctx *ctx;
10244 long ret = -EBADF;
10245 struct fd f;
10246
10247 f = fdget(fd);
10248 if (!f.file)
10249 return -EBADF;
10250
10251 ret = -EOPNOTSUPP;
10252 if (f.file->f_op != &io_uring_fops)
10253 goto out_fput;
10254
10255 ctx = f.file->private_data;
10256
b6c23dd5
PB
10257 io_run_task_work();
10258
edafccee
JA
10259 mutex_lock(&ctx->uring_lock);
10260 ret = __io_uring_register(ctx, opcode, arg, nr_args);
10261 mutex_unlock(&ctx->uring_lock);
c826bd7a
DD
10262 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
10263 ctx->cq_ev_fd != NULL, ret);
edafccee
JA
10264out_fput:
10265 fdput(f);
10266 return ret;
10267}
10268
2b188cc1
JA
10269static int __init io_uring_init(void)
10270{
d7f62e82
SM
10271#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
10272 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
10273 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
10274} while (0)
10275
10276#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
10277 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
10278 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
10279 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
10280 BUILD_BUG_SQE_ELEM(1, __u8, flags);
10281 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
10282 BUILD_BUG_SQE_ELEM(4, __s32, fd);
10283 BUILD_BUG_SQE_ELEM(8, __u64, off);
10284 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
10285 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7d67af2c 10286 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
d7f62e82
SM
10287 BUILD_BUG_SQE_ELEM(24, __u32, len);
10288 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
10289 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
10290 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
10291 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
5769a351
JX
10292 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
10293 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
d7f62e82
SM
10294 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
10295 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
10296 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
10297 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
10298 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
10299 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
10300 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
10301 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7d67af2c 10302 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
d7f62e82
SM
10303 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
10304 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
16340eab 10305 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
d7f62e82 10306 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7d67af2c 10307 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
d7f62e82 10308
b0d658ec
PB
10309 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
10310 sizeof(struct io_uring_rsrc_update));
10311 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
10312 sizeof(struct io_uring_rsrc_update2));
10313 /* should fit into one byte */
10314 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
10315
d3656344 10316 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
84557871 10317 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
16340eab 10318
91f245d5
JA
10319 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
10320 SLAB_ACCOUNT);
2b188cc1
JA
10321 return 0;
10322};
10323__initcall(io_uring_init);