io_uring: use TWA_SIGNAL_NO_IPI if IORING_SETUP_COOP_TASKRUN is used
[linux-block.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
d068b506 14 * through a control-dependency in io_get_cqe (smp_store_release to
1e84b97b
SB
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
52de1fe1 47#include <net/compat.h>
2b188cc1
JA
48#include <linux/refcount.h>
49#include <linux/uio.h>
6b47ee6e 50#include <linux/bits.h>
2b188cc1
JA
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
2b188cc1
JA
58#include <linux/percpu.h>
59#include <linux/slab.h>
edce22e1 60#include <linux/blk-mq.h>
edafccee 61#include <linux/bvec.h>
2b188cc1
JA
62#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
6b06314c 65#include <net/scm.h>
2b188cc1
JA
66#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
edafccee
JA
70#include <linux/sizes.h>
71#include <linux/hugetlb.h>
aa4c3967 72#include <linux/highmem.h>
15b71abe
JA
73#include <linux/namei.h>
74#include <linux/fsnotify.h>
4840e418 75#include <linux/fadvise.h>
3e4827b0 76#include <linux/eventpoll.h>
7d67af2c 77#include <linux/splice.h>
b41e9852 78#include <linux/task_work.h>
bcf5a063 79#include <linux/pagemap.h>
0f212204 80#include <linux/io_uring.h>
5bd2182d 81#include <linux/audit.h>
cdc1404a 82#include <linux/security.h>
2b188cc1 83
c826bd7a
DD
84#define CREATE_TRACE_POINTS
85#include <trace/events/io_uring.h>
86
2b188cc1
JA
87#include <uapi/linux/io_uring.h>
88
89#include "internal.h"
561fb04a 90#include "io-wq.h"
2b188cc1 91
5277deaa 92#define IORING_MAX_ENTRIES 32768
33a107f0 93#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
4ce8ad95 94#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
65e19f54 95
187f08c1 96/* only define max */
042b0d85 97#define IORING_MAX_FIXED_FILES (1U << 15)
21b55dbc
SG
98#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
99 IORING_REGISTER_LAST + IORING_OP_LAST)
2b188cc1 100
187f08c1 101#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
2d091d62
PB
102#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
103#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
104
489809e2
PB
105#define IORING_MAX_REG_BUFFERS (1U << 14)
106
68fe256a
PB
107#define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
108 IOSQE_IO_HARDLINK | IOSQE_ASYNC)
109
5562a8d7
PB
110#define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
111 IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
68fe256a 112
c854357b 113#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
d5361233 114 REQ_F_POLLED | REQ_F_CREDS | REQ_F_ASYNC_DATA)
b16fed66 115
a538be5b
PB
116#define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
117 IO_REQ_CLEAN_FLAGS)
118
09899b19
PB
119#define IO_TCTX_REFS_CACHE_NR (1U << 10)
120
2b188cc1
JA
121struct io_uring {
122 u32 head ____cacheline_aligned_in_smp;
123 u32 tail ____cacheline_aligned_in_smp;
124};
125
1e84b97b 126/*
75b28aff
HV
127 * This data is shared with the application through the mmap at offsets
128 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
129 *
130 * The offsets to the member fields are published through struct
131 * io_sqring_offsets when calling io_uring_setup.
132 */
75b28aff 133struct io_rings {
1e84b97b
SB
134 /*
135 * Head and tail offsets into the ring; the offsets need to be
136 * masked to get valid indices.
137 *
75b28aff
HV
138 * The kernel controls head of the sq ring and the tail of the cq ring,
139 * and the application controls tail of the sq ring and the head of the
140 * cq ring.
1e84b97b 141 */
75b28aff 142 struct io_uring sq, cq;
1e84b97b 143 /*
75b28aff 144 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
145 * ring_entries - 1)
146 */
75b28aff
HV
147 u32 sq_ring_mask, cq_ring_mask;
148 /* Ring sizes (constant, power of 2) */
149 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
150 /*
151 * Number of invalid entries dropped by the kernel due to
152 * invalid index stored in array
153 *
154 * Written by the kernel, shouldn't be modified by the
155 * application (i.e. get number of "new events" by comparing to
156 * cached value).
157 *
158 * After a new SQ head value was read by the application this
159 * counter includes all submissions that were dropped reaching
160 * the new SQ head (and possibly more).
161 */
75b28aff 162 u32 sq_dropped;
1e84b97b 163 /*
0d9b5b3a 164 * Runtime SQ flags
1e84b97b
SB
165 *
166 * Written by the kernel, shouldn't be modified by the
167 * application.
168 *
169 * The application needs a full memory barrier before checking
170 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
171 */
3a4b89a2 172 atomic_t sq_flags;
0d9b5b3a
SG
173 /*
174 * Runtime CQ flags
175 *
176 * Written by the application, shouldn't be modified by the
177 * kernel.
178 */
fe7e3257 179 u32 cq_flags;
1e84b97b
SB
180 /*
181 * Number of completion events lost because the queue was full;
182 * this should be avoided by the application by making sure
0b4295b5 183 * there are not more requests pending than there is space in
1e84b97b
SB
184 * the completion queue.
185 *
186 * Written by the kernel, shouldn't be modified by the
187 * application (i.e. get number of "new events" by comparing to
188 * cached value).
189 *
190 * As completion events come in out of order this counter is not
191 * ordered with any other data.
192 */
75b28aff 193 u32 cq_overflow;
1e84b97b
SB
194 /*
195 * Ring buffer of completion events.
196 *
197 * The kernel writes completion events fresh every time they are
198 * produced, so the application is allowed to modify pending
199 * entries.
200 */
75b28aff 201 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
202};
203
45d189c6 204enum io_uring_cmd_flags {
51aac424 205 IO_URING_F_COMPLETE_DEFER = 1,
3b44b371 206 IO_URING_F_UNLOCKED = 2,
51aac424
PB
207 /* int's last bit, sign checks are usually faster than a bit test */
208 IO_URING_F_NONBLOCK = INT_MIN,
45d189c6
PB
209};
210
edafccee
JA
211struct io_mapped_ubuf {
212 u64 ubuf;
4751f53d 213 u64 ubuf_end;
edafccee 214 unsigned int nr_bvecs;
de293938 215 unsigned long acct_pages;
41edf1a5 216 struct bio_vec bvec[];
edafccee
JA
217};
218
50238531
BM
219struct io_ring_ctx;
220
6c2450ae
PB
221struct io_overflow_cqe {
222 struct io_uring_cqe cqe;
223 struct list_head list;
224};
225
5e45690a
JA
226/*
227 * FFS_SCM is only available on 64-bit archs, for 32-bit we just define it as 0
228 * and define IO_URING_SCM_ALL. For this case, we use SCM for all files as we
229 * can't safely always dereference the file when the task has exited and ring
230 * cleanup is done. If a file is tracked and part of SCM, then unix gc on
231 * process exit may reap it before __io_sqe_files_unregister() is run.
232 */
233#define FFS_NOWAIT 0x1UL
234#define FFS_ISREG 0x2UL
235#if defined(CONFIG_64BIT)
236#define FFS_SCM 0x4UL
237#else
238#define IO_URING_SCM_ALL
239#define FFS_SCM 0x0UL
240#endif
241#define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG|FFS_SCM)
242
a04b0ac0
PB
243struct io_fixed_file {
244 /* file * with additional FFS_* flags */
245 unsigned long file_ptr;
246};
247
269bbe5f
BM
248struct io_rsrc_put {
249 struct list_head list;
b60c8dce 250 u64 tag;
50238531
BM
251 union {
252 void *rsrc;
253 struct file *file;
bd54b6fe 254 struct io_mapped_ubuf *buf;
50238531 255 };
269bbe5f
BM
256};
257
aeca241b 258struct io_file_table {
042b0d85 259 struct io_fixed_file *files;
31b51510
JA
260};
261
b895c9a6 262struct io_rsrc_node {
05589553
XW
263 struct percpu_ref refs;
264 struct list_head node;
269bbe5f 265 struct list_head rsrc_list;
b895c9a6 266 struct io_rsrc_data *rsrc_data;
4a38aed2 267 struct llist_node llist;
e297822b 268 bool done;
05589553
XW
269};
270
40ae0ff7
PB
271typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
272
b895c9a6 273struct io_rsrc_data {
05f3fb3c
JA
274 struct io_ring_ctx *ctx;
275
2d091d62
PB
276 u64 **tags;
277 unsigned int nr;
40ae0ff7 278 rsrc_put_fn *do_put;
3e942498 279 atomic_t refs;
05f3fb3c 280 struct completion done;
8bad28d8 281 bool quiesce;
05f3fb3c
JA
282};
283
dbc7d452
JA
284struct io_buffer_list {
285 struct list_head list;
286 struct list_head buf_list;
287 __u16 bgid;
288};
289
5a2e745d
JA
290struct io_buffer {
291 struct list_head list;
292 __u64 addr;
d1f82808 293 __u32 len;
5a2e745d 294 __u16 bid;
b1c62645 295 __u16 bgid;
5a2e745d
JA
296};
297
21b55dbc
SG
298struct io_restriction {
299 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
300 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
301 u8 sqe_flags_allowed;
302 u8 sqe_flags_required;
7e84e1c7 303 bool registered;
21b55dbc
SG
304};
305
37d1e2e3
JA
306enum {
307 IO_SQ_THREAD_SHOULD_STOP = 0,
308 IO_SQ_THREAD_SHOULD_PARK,
309};
310
534ca6d6
JA
311struct io_sq_data {
312 refcount_t refs;
9e138a48 313 atomic_t park_pending;
09a6f4ef 314 struct mutex lock;
69fb2131
JA
315
316 /* ctx's that are using this sqd */
317 struct list_head ctx_list;
69fb2131 318
534ca6d6
JA
319 struct task_struct *thread;
320 struct wait_queue_head wait;
08369246
XW
321
322 unsigned sq_thread_idle;
37d1e2e3
JA
323 int sq_cpu;
324 pid_t task_pid;
5c2469e0 325 pid_t task_tgid;
37d1e2e3
JA
326
327 unsigned long state;
37d1e2e3 328 struct completion exited;
534ca6d6
JA
329};
330
6dd0be1e 331#define IO_COMPL_BATCH 32
6ff119a6 332#define IO_REQ_CACHE_SIZE 32
bf019da7 333#define IO_REQ_ALLOC_BATCH 8
258b29a9 334
a1ab7b35
PB
335struct io_submit_link {
336 struct io_kiocb *head;
337 struct io_kiocb *last;
338};
339
258b29a9 340struct io_submit_state {
5a158c6b
PB
341 /* inline/task_work completion list, under ->uring_lock */
342 struct io_wq_work_node free_list;
343 /* batch completion logic */
344 struct io_wq_work_list compl_reqs;
a1ab7b35 345 struct io_submit_link link;
258b29a9 346
258b29a9 347 bool plug_started;
4b628aeb 348 bool need_plug;
3d4aeb9f 349 bool flush_cqes;
5ca7a8b3 350 unsigned short submit_nr;
5a158c6b 351 struct blk_plug plug;
258b29a9
PB
352};
353
77bc59b4
UA
354struct io_ev_fd {
355 struct eventfd_ctx *cq_ev_fd;
c75312dd 356 unsigned int eventfd_async: 1;
77bc59b4
UA
357 struct rcu_head rcu;
358};
359
dbc7d452
JA
360#define IO_BUFFERS_HASH_BITS 5
361
2b188cc1 362struct io_ring_ctx {
b52ecf8c 363 /* const or read-mostly hot data */
2b188cc1
JA
364 struct {
365 struct percpu_ref refs;
2b188cc1 366
b52ecf8c 367 struct io_rings *rings;
2b188cc1 368 unsigned int flags;
9f010507 369 enum task_work_notify_mode notify_method;
e1d85334 370 unsigned int compat: 1;
e1d85334 371 unsigned int drain_next: 1;
21b55dbc 372 unsigned int restricted: 1;
f18ee4cf 373 unsigned int off_timeout_used: 1;
10c66904 374 unsigned int drain_active: 1;
5562a8d7 375 unsigned int drain_disabled: 1;
9aa8dfde 376 unsigned int has_evfd: 1;
773697b6 377 unsigned int syscall_iopoll: 1;
b52ecf8c 378 } ____cacheline_aligned_in_smp;
2b188cc1 379
7f1129d2 380 /* submission data */
b52ecf8c 381 struct {
0499e582
PB
382 struct mutex uring_lock;
383
75b28aff
HV
384 /*
385 * Ring buffer of indices into array of io_uring_sqe, which is
386 * mmapped by the application using the IORING_OFF_SQES offset.
387 *
388 * This indirection could e.g. be used to assign fixed
389 * io_uring_sqe entries to operations and only submit them to
390 * the queue when needed.
391 *
392 * The kernel modifies neither the indices array nor the entries
393 * array.
394 */
395 u32 *sq_array;
c7af47cf 396 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
397 unsigned cached_sq_head;
398 unsigned sq_entries;
de0617e4 399 struct list_head defer_list;
7f1129d2
PB
400
401 /*
402 * Fixed resources fast path, should be accessed only under
403 * uring_lock, and updated through io_uring_register(2)
404 */
405 struct io_rsrc_node *rsrc_node;
ab409402 406 int rsrc_cached_refs;
8e29da69 407 atomic_t cancel_seq;
7f1129d2
PB
408 struct io_file_table file_table;
409 unsigned nr_user_files;
410 unsigned nr_user_bufs;
411 struct io_mapped_ubuf **user_bufs;
412
413 struct io_submit_state submit_state;
5262f567 414 struct list_head timeout_list;
ef9dd637 415 struct list_head ltimeout_list;
1d7bb1d5 416 struct list_head cq_overflow_list;
dbc7d452 417 struct list_head *io_buffers;
cc3cec83 418 struct list_head io_buffers_cache;
4d9237e3 419 struct list_head apoll_cache;
7f1129d2
PB
420 struct xarray personalities;
421 u32 pers_next;
422 unsigned sq_thread_idle;
2b188cc1
JA
423 } ____cacheline_aligned_in_smp;
424
d0acdee2 425 /* IRQ completion list, under ->completion_lock */
c2b6c6bc 426 struct io_wq_work_list locked_free_list;
d0acdee2 427 unsigned int locked_free_nr;
3c1a2ead 428
7c30f36a 429 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
534ca6d6
JA
430 struct io_sq_data *sq_data; /* if using sq thread polling */
431
90554200 432 struct wait_queue_head sqo_sq_wait;
69fb2131 433 struct list_head sqd_list;
75b28aff 434
10988a0a 435 unsigned long check_cq;
5ed7a37d 436
206aefde 437 struct {
d8da428b
PB
438 /*
439 * We cache a range of free CQEs we can use, once exhausted it
440 * should go through a slower range setup, see __io_get_cqe()
441 */
442 struct io_uring_cqe *cqe_cached;
443 struct io_uring_cqe *cqe_sentinel;
444
206aefde
JA
445 unsigned cached_cq_tail;
446 unsigned cq_entries;
77bc59b4 447 struct io_ev_fd __rcu *io_ev_fd;
0499e582
PB
448 struct wait_queue_head cq_wait;
449 unsigned cq_extra;
450 atomic_t cq_timeouts;
0499e582 451 unsigned cq_last_tm_flush;
206aefde 452 } ____cacheline_aligned_in_smp;
2b188cc1 453
2b188cc1
JA
454 struct {
455 spinlock_t completion_lock;
e94f141b 456
89850fce
JA
457 spinlock_t timeout_lock;
458
def596e9 459 /*
540e32a0 460 * ->iopoll_list is protected by the ctx->uring_lock for
def596e9
JA
461 * io_uring instances that don't use IORING_SETUP_SQPOLL.
462 * For SQPOLL, only the single threaded io_sq_thread() will
463 * manipulate the list, hence no extra locking is needed there.
464 */
5eef4e87 465 struct io_wq_work_list iopoll_list;
78076bb6
JA
466 struct hlist_head *cancel_hash;
467 unsigned cancel_hash_bits;
915b3dde 468 bool poll_multi_queue;
cc3cec83
JA
469
470 struct list_head io_buffers_comp;
2b188cc1 471 } ____cacheline_aligned_in_smp;
85faa7b8 472
21b55dbc 473 struct io_restriction restrictions;
3c1a2ead 474
b13a8918
PB
475 /* slow path rsrc auxilary data, used by update/register */
476 struct {
477 struct io_rsrc_node *rsrc_backup_node;
478 struct io_mapped_ubuf *dummy_ubuf;
479 struct io_rsrc_data *file_data;
480 struct io_rsrc_data *buf_data;
481
482 struct delayed_work rsrc_put_work;
483 struct llist_head rsrc_put_llist;
484 struct list_head rsrc_ref_list;
485 spinlock_t rsrc_ref_lock;
cc3cec83
JA
486
487 struct list_head io_buffers_pages;
b13a8918
PB
488 };
489
3c1a2ead 490 /* Keep this last, we don't need it for the fast path */
b986af7e
PB
491 struct {
492 #if defined(CONFIG_UNIX)
493 struct socket *ring_sock;
494 #endif
495 /* hashed buffered write serialization */
496 struct io_wq_hash *hash_map;
497
498 /* Only used for accounting purposes */
499 struct user_struct *user;
500 struct mm_struct *mm_account;
501
502 /* ctx exit and cancelation */
9011bf9a
PB
503 struct llist_head fallback_llist;
504 struct delayed_work fallback_work;
b986af7e
PB
505 struct work_struct exit_work;
506 struct list_head tctx_list;
507 struct completion ref_comp;
e139a1ec
PB
508 u32 iowq_limits[2];
509 bool iowq_limits_set;
b986af7e 510 };
2b188cc1
JA
511};
512
e7a6c00d
JA
513/*
514 * Arbitrary limit, can be raised if need be
515 */
516#define IO_RINGFD_REG_MAX 16
517
53e043b2
SM
518struct io_uring_task {
519 /* submission side */
09899b19 520 int cached_refs;
53e043b2
SM
521 struct xarray xa;
522 struct wait_queue_head wait;
ee53fb2b
SM
523 const struct io_ring_ctx *last;
524 struct io_wq *io_wq;
53e043b2
SM
525 struct percpu_counter inflight;
526 atomic_t in_idle;
53e043b2
SM
527
528 spinlock_t task_lock;
529 struct io_wq_work_list task_list;
4813c377 530 struct io_wq_work_list prior_task_list;
53e043b2 531 struct callback_head task_work;
e7a6c00d 532 struct file **registered_rings;
6294f368 533 bool task_running;
53e043b2
SM
534};
535
09bb8394
JA
536/*
537 * First field must be the file pointer in all the
538 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
539 */
221c5eb2
JA
540struct io_poll_iocb {
541 struct file *file;
018043be 542 struct wait_queue_head *head;
221c5eb2 543 __poll_t events;
392edb45 544 struct wait_queue_entry wait;
221c5eb2
JA
545};
546
9d805892 547struct io_poll_update {
018043be 548 struct file *file;
9d805892
PB
549 u64 old_user_data;
550 u64 new_user_data;
551 __poll_t events;
b69de288
JA
552 bool update_events;
553 bool update_user_data;
018043be
PB
554};
555
b5dba59e
JA
556struct io_close {
557 struct file *file;
b5dba59e 558 int fd;
7df778be 559 u32 file_slot;
b5dba59e
JA
560};
561
ad8a48ac
JA
562struct io_timeout_data {
563 struct io_kiocb *req;
564 struct hrtimer timer;
565 struct timespec64 ts;
566 enum hrtimer_mode mode;
50c1df2b 567 u32 flags;
ad8a48ac
JA
568};
569
8ed8d3c3
JA
570struct io_accept {
571 struct file *file;
572 struct sockaddr __user *addr;
573 int __user *addr_len;
574 int flags;
aaa4db12 575 u32 file_slot;
09952e3e 576 unsigned long nofile;
8ed8d3c3
JA
577};
578
579struct io_sync {
580 struct file *file;
581 loff_t len;
582 loff_t off;
583 int flags;
d63d1b5e 584 int mode;
8ed8d3c3
JA
585};
586
fbf23849
JA
587struct io_cancel {
588 struct file *file;
589 u64 addr;
8e29da69 590 u32 flags;
4bf94615 591 s32 fd;
fbf23849
JA
592};
593
b29472ee
JA
594struct io_timeout {
595 struct file *file;
bfe68a22
PB
596 u32 off;
597 u32 target_seq;
135fcde8 598 struct list_head list;
90cd7e42
PB
599 /* head of the link, used by linked timeouts only */
600 struct io_kiocb *head;
89b263f6
JA
601 /* for linked completions */
602 struct io_kiocb *prev;
b29472ee
JA
603};
604
0bdf7a2d
PB
605struct io_timeout_rem {
606 struct file *file;
607 u64 addr;
9c8e11b3
PB
608
609 /* timeout update */
610 struct timespec64 ts;
611 u32 flags;
f1042b6c 612 bool ltimeout;
0bdf7a2d
PB
613};
614
9adbd45d
JA
615struct io_rw {
616 /* NOTE: kiocb has the file as the first member, so don't do it here */
617 struct kiocb kiocb;
618 u64 addr;
584b0180
JA
619 u32 len;
620 u32 flags;
9adbd45d
JA
621};
622
3fbb51c1
JA
623struct io_connect {
624 struct file *file;
625 struct sockaddr __user *addr;
626 int addr_len;
627};
628
e47293fd
JA
629struct io_sr_msg {
630 struct file *file;
fddaface 631 union {
4af3417a
PB
632 struct compat_msghdr __user *umsg_compat;
633 struct user_msghdr __user *umsg;
634 void __user *buf;
fddaface 635 };
e47293fd 636 int msg_flags;
bcda7baa 637 int bgid;
fddaface 638 size_t len;
7ba89d2a 639 size_t done_io;
e47293fd
JA
640};
641
15b71abe
JA
642struct io_open {
643 struct file *file;
644 int dfd;
b9445598 645 u32 file_slot;
15b71abe 646 struct filename *filename;
c12cedf2 647 struct open_how how;
4022e7af 648 unsigned long nofile;
15b71abe
JA
649};
650
269bbe5f 651struct io_rsrc_update {
05f3fb3c
JA
652 struct file *file;
653 u64 arg;
654 u32 nr_args;
655 u32 offset;
656};
657
4840e418
JA
658struct io_fadvise {
659 struct file *file;
660 u64 offset;
661 u32 len;
662 u32 advice;
663};
664
c1ca757b
JA
665struct io_madvise {
666 struct file *file;
667 u64 addr;
668 u32 len;
669 u32 advice;
670};
671
3e4827b0
JA
672struct io_epoll {
673 struct file *file;
674 int epfd;
675 int op;
676 int fd;
677 struct epoll_event event;
e47293fd
JA
678};
679
7d67af2c
PB
680struct io_splice {
681 struct file *file_out;
7d67af2c
PB
682 loff_t off_out;
683 loff_t off_in;
684 u64 len;
a3e4bc23 685 int splice_fd_in;
7d67af2c
PB
686 unsigned int flags;
687};
688
ddf0322d
JA
689struct io_provide_buf {
690 struct file *file;
691 __u64 addr;
38134ada 692 __u32 len;
ddf0322d
JA
693 __u32 bgid;
694 __u16 nbufs;
695 __u16 bid;
696};
697
1d9e1288
BM
698struct io_statx {
699 struct file *file;
700 int dfd;
701 unsigned int mask;
702 unsigned int flags;
1b6fe6e0 703 struct filename *filename;
1d9e1288
BM
704 struct statx __user *buffer;
705};
706
36f4fa68
JA
707struct io_shutdown {
708 struct file *file;
709 int how;
710};
711
80a261fd
JA
712struct io_rename {
713 struct file *file;
714 int old_dfd;
715 int new_dfd;
716 struct filename *oldpath;
717 struct filename *newpath;
718 int flags;
719};
720
14a1143b
JA
721struct io_unlink {
722 struct file *file;
723 int dfd;
724 int flags;
725 struct filename *filename;
726};
727
e34a02dc
DK
728struct io_mkdir {
729 struct file *file;
730 int dfd;
731 umode_t mode;
732 struct filename *filename;
733};
734
7a8721f8
DK
735struct io_symlink {
736 struct file *file;
737 int new_dfd;
738 struct filename *oldpath;
739 struct filename *newpath;
740};
741
cf30da90
DK
742struct io_hardlink {
743 struct file *file;
744 int old_dfd;
745 int new_dfd;
746 struct filename *oldpath;
747 struct filename *newpath;
748 int flags;
749};
750
4f57f06c
JA
751struct io_msg {
752 struct file *file;
753 u64 user_data;
754 u32 len;
755};
756
f499a021
JA
757struct io_async_connect {
758 struct sockaddr_storage address;
759};
760
03b1230c
JA
761struct io_async_msghdr {
762 struct iovec fast_iov[UIO_FASTIOV];
257e84a5
PB
763 /* points to an allocated iov, if NULL we use fast_iov instead */
764 struct iovec *free_iov;
03b1230c
JA
765 struct sockaddr __user *uaddr;
766 struct msghdr msg;
b537916c 767 struct sockaddr_storage addr;
03b1230c
JA
768};
769
538941e2 770struct io_rw_state {
ff6165b2 771 struct iov_iter iter;
cd658695 772 struct iov_iter_state iter_state;
c88598a9 773 struct iovec fast_iov[UIO_FASTIOV];
538941e2
PB
774};
775
776struct io_async_rw {
777 struct io_rw_state s;
778 const struct iovec *free_iovec;
227c0c96 779 size_t bytes_done;
bcf5a063 780 struct wait_page_queue wpq;
f67676d1
JA
781};
782
6b47ee6e
PB
783enum {
784 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
785 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
786 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
787 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
788 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
bcda7baa 789 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
04c76b41 790 REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
6b47ee6e 791
dddca226 792 /* first byte is taken by user flags, shift it to not overlap */
93d2bcd2 793 REQ_F_FAIL_BIT = 8,
6b47ee6e
PB
794 REQ_F_INFLIGHT_BIT,
795 REQ_F_CUR_POS_BIT,
796 REQ_F_NOWAIT_BIT,
6b47ee6e 797 REQ_F_LINK_TIMEOUT_BIT,
99bc4c38 798 REQ_F_NEED_CLEANUP_BIT,
d7718a9d 799 REQ_F_POLLED_BIT,
bcda7baa 800 REQ_F_BUFFER_SELECTED_BIT,
e342c807 801 REQ_F_COMPLETE_INLINE_BIT,
230d50d4 802 REQ_F_REISSUE_BIT,
b8e64b53 803 REQ_F_CREDS_BIT,
20e60a38 804 REQ_F_REFCOUNT_BIT,
4d13d1a4 805 REQ_F_ARM_LTIMEOUT_BIT,
d886e185 806 REQ_F_ASYNC_DATA_BIT,
04c76b41 807 REQ_F_SKIP_LINK_CQES_BIT,
91eac1c6
JA
808 REQ_F_SINGLE_POLL_BIT,
809 REQ_F_DOUBLE_POLL_BIT,
8a3e8ee5 810 REQ_F_PARTIAL_IO_BIT,
7b29f92d 811 /* keep async read/write and isreg together and in order */
35645ac3 812 REQ_F_SUPPORT_NOWAIT_BIT,
7b29f92d 813 REQ_F_ISREG_BIT,
84557871
JA
814
815 /* not a real bit, just to check we're not overflowing the space */
816 __REQ_F_LAST_BIT,
6b47ee6e
PB
817};
818
819enum {
820 /* ctx owns file */
821 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
822 /* drain existing IO first */
823 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
824 /* linked sqes */
825 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
826 /* doesn't sever on completion < 0 */
827 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
828 /* IOSQE_ASYNC */
829 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
bcda7baa
JA
830 /* IOSQE_BUFFER_SELECT */
831 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
04c76b41
PB
832 /* IOSQE_CQE_SKIP_SUCCESS */
833 REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT),
6b47ee6e 834
6b47ee6e 835 /* fail rest of links */
93d2bcd2 836 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
b05a1bcd 837 /* on inflight list, should be cancelled and waited on exit reliably */
6b47ee6e
PB
838 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
839 /* read/write uses file position */
840 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
841 /* must not punt to workers */
842 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
900fad45 843 /* has or had linked timeout */
6b47ee6e 844 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
99bc4c38
PB
845 /* needs cleanup */
846 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
d7718a9d
JA
847 /* already went through poll handler */
848 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
bcda7baa
JA
849 /* buffer already selected */
850 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
e342c807
PB
851 /* completion is deferred through io_comp_state */
852 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
230d50d4
JA
853 /* caller should reissue async */
854 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
35645ac3
PB
855 /* supports async reads/writes */
856 REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT),
7b29f92d
JA
857 /* regular file */
858 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
b8e64b53
PB
859 /* has creds assigned */
860 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
20e60a38
PB
861 /* skip refcounting if not set */
862 REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
4d13d1a4
PB
863 /* there is a linked timeout that has to be armed */
864 REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
d886e185
PB
865 /* ->async_data allocated */
866 REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT),
04c76b41
PB
867 /* don't post CQEs while failing linked requests */
868 REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT),
91eac1c6
JA
869 /* single poll may be active */
870 REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT),
871 /* double poll may active */
872 REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT),
8a3e8ee5
JA
873 /* request has already done partial IO */
874 REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
d7718a9d
JA
875};
876
877struct async_poll {
878 struct io_poll_iocb poll;
807abcb0 879 struct io_poll_iocb *double_poll;
6b47ee6e
PB
880};
881
f237c30a 882typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
5b0a6acc 883
7cbf1722 884struct io_task_work {
5b0a6acc
PB
885 union {
886 struct io_wq_work_node node;
887 struct llist_node fallback_node;
888 };
889 io_req_tw_func_t func;
7cbf1722
JA
890};
891
992da01a
PB
892enum {
893 IORING_RSRC_FILE = 0,
894 IORING_RSRC_BUFFER = 1,
895};
896
cef216fc
PB
897struct io_cqe {
898 __u64 user_data;
899 __s32 res;
900 /* fd initially, then cflags for completion */
901 union {
902 __u32 flags;
903 int fd;
904 };
905};
906
10988a0a
DY
907enum {
908 IO_CHECK_CQ_OVERFLOW_BIT,
155bc950 909 IO_CHECK_CQ_DROPPED_BIT,
10988a0a
DY
910};
911
09bb8394
JA
912/*
913 * NOTE! Each of the iocb union members has the file pointer
914 * as the first entry in their struct definition. So you can
915 * access the file pointer through any of the sub-structs,
63c36549 916 * or directly as just 'file' in this struct.
09bb8394 917 */
2b188cc1 918struct io_kiocb {
221c5eb2 919 union {
09bb8394 920 struct file *file;
9adbd45d 921 struct io_rw rw;
221c5eb2 922 struct io_poll_iocb poll;
9d805892 923 struct io_poll_update poll_update;
8ed8d3c3
JA
924 struct io_accept accept;
925 struct io_sync sync;
fbf23849 926 struct io_cancel cancel;
b29472ee 927 struct io_timeout timeout;
0bdf7a2d 928 struct io_timeout_rem timeout_rem;
3fbb51c1 929 struct io_connect connect;
e47293fd 930 struct io_sr_msg sr_msg;
15b71abe 931 struct io_open open;
b5dba59e 932 struct io_close close;
269bbe5f 933 struct io_rsrc_update rsrc_update;
4840e418 934 struct io_fadvise fadvise;
c1ca757b 935 struct io_madvise madvise;
3e4827b0 936 struct io_epoll epoll;
7d67af2c 937 struct io_splice splice;
ddf0322d 938 struct io_provide_buf pbuf;
1d9e1288 939 struct io_statx statx;
36f4fa68 940 struct io_shutdown shutdown;
80a261fd 941 struct io_rename rename;
14a1143b 942 struct io_unlink unlink;
e34a02dc 943 struct io_mkdir mkdir;
7a8721f8 944 struct io_symlink symlink;
cf30da90 945 struct io_hardlink hardlink;
4f57f06c 946 struct io_msg msg;
221c5eb2 947 };
2b188cc1 948
d625c6ee 949 u8 opcode;
65a6543d
XW
950 /* polled IO has completed */
951 u8 iopoll_completed;
4f4eeba8 952 u16 buf_index;
d17e56eb
PB
953 unsigned int flags;
954
cef216fc 955 struct io_cqe cqe;
4f4eeba8 956
010e8e6b 957 struct io_ring_ctx *ctx;
010e8e6b 958 struct task_struct *task;
d7718a9d 959
c1bdf8ed 960 struct io_rsrc_node *rsrc_node;
d886e185
PB
961 /* store used ubuf, so we can prevent reloading */
962 struct io_mapped_ubuf *imu;
fcb323cc 963
2804ecd8
JA
964 union {
965 /* used by request caches, completion batching and iopoll */
966 struct io_wq_work_node comp_list;
967 /* cache ->apoll->events */
968 int apoll_events;
969 };
d17e56eb 970 atomic_t refs;
521d61fc 971 atomic_t poll_refs;
5b0a6acc 972 struct io_task_work io_task_work;
010e8e6b
PB
973 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
974 struct hlist_node hash_node;
7e3709d5 975 /* internal polling, see IORING_FEAT_FAST_POLL */
010e8e6b 976 struct async_poll *apoll;
d886e185
PB
977 /* opcode allocated if it needs to store data for async defer */
978 void *async_data;
7e3709d5 979 /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
30d51dd4 980 struct io_buffer *kbuf;
41cdcc22 981 /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
34d2bfe7 982 struct io_kiocb *link;
41cdcc22 983 /* custom credentials, valid IFF REQ_F_CREDS is set */
521d61fc
JA
984 const struct cred *creds;
985 struct io_wq_work work;
2b188cc1 986};
05589553 987
13bf43f5
PB
988struct io_tctx_node {
989 struct list_head ctx_node;
990 struct task_struct *task;
13bf43f5
PB
991 struct io_ring_ctx *ctx;
992};
993
27dc8338
PB
994struct io_defer_entry {
995 struct list_head list;
996 struct io_kiocb *req;
9cf7c104 997 u32 seq;
2b188cc1
JA
998};
999
b21432b4
JA
1000struct io_cancel_data {
1001 struct io_ring_ctx *ctx;
4bf94615
JA
1002 union {
1003 u64 data;
1004 struct file *file;
1005 };
8e29da69
JA
1006 u32 flags;
1007 int seq;
b21432b4
JA
1008};
1009
d3656344 1010struct io_op_def {
d3656344
JA
1011 /* needs req->file assigned */
1012 unsigned needs_file : 1;
6d63416d
PB
1013 /* should block plug */
1014 unsigned plug : 1;
d3656344
JA
1015 /* hash wq insertion if file is a regular file */
1016 unsigned hash_reg_file : 1;
1017 /* unbound wq insertion if file is a non-regular file */
1018 unsigned unbound_nonreg_file : 1;
8a72758c
JA
1019 /* set if opcode supports polled "wait" */
1020 unsigned pollin : 1;
1021 unsigned pollout : 1;
52dd8640 1022 unsigned poll_exclusive : 1;
bcda7baa
JA
1023 /* op supports buffer selection */
1024 unsigned buffer_select : 1;
26f0505a
PB
1025 /* do prep async if is going to be punted */
1026 unsigned needs_async_setup : 1;
6d63416d
PB
1027 /* opcode is not supported by this kernel */
1028 unsigned not_supported : 1;
5bd2182d
PM
1029 /* skip auditing */
1030 unsigned audit_skip : 1;
e8c2bc1f
JA
1031 /* size of async data needed, if any */
1032 unsigned short async_size;
d3656344
JA
1033};
1034
0918682b 1035static const struct io_op_def io_op_defs[] = {
0463b6c5
PB
1036 [IORING_OP_NOP] = {},
1037 [IORING_OP_READV] = {
d3656344
JA
1038 .needs_file = 1,
1039 .unbound_nonreg_file = 1,
8a72758c 1040 .pollin = 1,
4d954c25 1041 .buffer_select = 1,
26f0505a 1042 .needs_async_setup = 1,
27926b68 1043 .plug = 1,
5bd2182d 1044 .audit_skip = 1,
e8c2bc1f 1045 .async_size = sizeof(struct io_async_rw),
d3656344 1046 },
0463b6c5 1047 [IORING_OP_WRITEV] = {
d3656344
JA
1048 .needs_file = 1,
1049 .hash_reg_file = 1,
1050 .unbound_nonreg_file = 1,
8a72758c 1051 .pollout = 1,
26f0505a 1052 .needs_async_setup = 1,
27926b68 1053 .plug = 1,
5bd2182d 1054 .audit_skip = 1,
e8c2bc1f 1055 .async_size = sizeof(struct io_async_rw),
d3656344 1056 },
0463b6c5 1057 [IORING_OP_FSYNC] = {
d3656344 1058 .needs_file = 1,
5bd2182d 1059 .audit_skip = 1,
d3656344 1060 },
0463b6c5 1061 [IORING_OP_READ_FIXED] = {
d3656344
JA
1062 .needs_file = 1,
1063 .unbound_nonreg_file = 1,
8a72758c 1064 .pollin = 1,
27926b68 1065 .plug = 1,
5bd2182d 1066 .audit_skip = 1,
e8c2bc1f 1067 .async_size = sizeof(struct io_async_rw),
d3656344 1068 },
0463b6c5 1069 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
1070 .needs_file = 1,
1071 .hash_reg_file = 1,
1072 .unbound_nonreg_file = 1,
8a72758c 1073 .pollout = 1,
27926b68 1074 .plug = 1,
5bd2182d 1075 .audit_skip = 1,
e8c2bc1f 1076 .async_size = sizeof(struct io_async_rw),
d3656344 1077 },
0463b6c5 1078 [IORING_OP_POLL_ADD] = {
d3656344
JA
1079 .needs_file = 1,
1080 .unbound_nonreg_file = 1,
5bd2182d
PM
1081 .audit_skip = 1,
1082 },
1083 [IORING_OP_POLL_REMOVE] = {
1084 .audit_skip = 1,
d3656344 1085 },
0463b6c5 1086 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344 1087 .needs_file = 1,
5bd2182d 1088 .audit_skip = 1,
d3656344 1089 },
0463b6c5 1090 [IORING_OP_SENDMSG] = {
d3656344
JA
1091 .needs_file = 1,
1092 .unbound_nonreg_file = 1,
8a72758c 1093 .pollout = 1,
26f0505a 1094 .needs_async_setup = 1,
e8c2bc1f 1095 .async_size = sizeof(struct io_async_msghdr),
d3656344 1096 },
0463b6c5 1097 [IORING_OP_RECVMSG] = {
d3656344
JA
1098 .needs_file = 1,
1099 .unbound_nonreg_file = 1,
8a72758c 1100 .pollin = 1,
52de1fe1 1101 .buffer_select = 1,
26f0505a 1102 .needs_async_setup = 1,
e8c2bc1f 1103 .async_size = sizeof(struct io_async_msghdr),
d3656344 1104 },
0463b6c5 1105 [IORING_OP_TIMEOUT] = {
5bd2182d 1106 .audit_skip = 1,
e8c2bc1f 1107 .async_size = sizeof(struct io_timeout_data),
d3656344 1108 },
9c8e11b3
PB
1109 [IORING_OP_TIMEOUT_REMOVE] = {
1110 /* used by timeout updates' prep() */
5bd2182d 1111 .audit_skip = 1,
9c8e11b3 1112 },
0463b6c5 1113 [IORING_OP_ACCEPT] = {
d3656344
JA
1114 .needs_file = 1,
1115 .unbound_nonreg_file = 1,
8a72758c 1116 .pollin = 1,
52dd8640 1117 .poll_exclusive = 1,
d3656344 1118 },
5bd2182d
PM
1119 [IORING_OP_ASYNC_CANCEL] = {
1120 .audit_skip = 1,
1121 },
0463b6c5 1122 [IORING_OP_LINK_TIMEOUT] = {
5bd2182d 1123 .audit_skip = 1,
e8c2bc1f 1124 .async_size = sizeof(struct io_timeout_data),
d3656344 1125 },
0463b6c5 1126 [IORING_OP_CONNECT] = {
d3656344
JA
1127 .needs_file = 1,
1128 .unbound_nonreg_file = 1,
8a72758c 1129 .pollout = 1,
26f0505a 1130 .needs_async_setup = 1,
e8c2bc1f 1131 .async_size = sizeof(struct io_async_connect),
d3656344 1132 },
0463b6c5 1133 [IORING_OP_FALLOCATE] = {
d3656344 1134 .needs_file = 1,
d3656344 1135 },
44526bed
JA
1136 [IORING_OP_OPENAT] = {},
1137 [IORING_OP_CLOSE] = {},
5bd2182d
PM
1138 [IORING_OP_FILES_UPDATE] = {
1139 .audit_skip = 1,
1140 },
1141 [IORING_OP_STATX] = {
1142 .audit_skip = 1,
1143 },
0463b6c5 1144 [IORING_OP_READ] = {
3a6820f2
JA
1145 .needs_file = 1,
1146 .unbound_nonreg_file = 1,
8a72758c 1147 .pollin = 1,
bcda7baa 1148 .buffer_select = 1,
27926b68 1149 .plug = 1,
5bd2182d 1150 .audit_skip = 1,
e8c2bc1f 1151 .async_size = sizeof(struct io_async_rw),
3a6820f2 1152 },
0463b6c5 1153 [IORING_OP_WRITE] = {
3a6820f2 1154 .needs_file = 1,
7b3188e7 1155 .hash_reg_file = 1,
3a6820f2 1156 .unbound_nonreg_file = 1,
8a72758c 1157 .pollout = 1,
27926b68 1158 .plug = 1,
5bd2182d 1159 .audit_skip = 1,
e8c2bc1f 1160 .async_size = sizeof(struct io_async_rw),
3a6820f2 1161 },
0463b6c5 1162 [IORING_OP_FADVISE] = {
4840e418 1163 .needs_file = 1,
5bd2182d 1164 .audit_skip = 1,
c1ca757b 1165 },
44526bed 1166 [IORING_OP_MADVISE] = {},
0463b6c5 1167 [IORING_OP_SEND] = {
fddaface
JA
1168 .needs_file = 1,
1169 .unbound_nonreg_file = 1,
8a72758c 1170 .pollout = 1,
5bd2182d 1171 .audit_skip = 1,
fddaface 1172 },
0463b6c5 1173 [IORING_OP_RECV] = {
fddaface
JA
1174 .needs_file = 1,
1175 .unbound_nonreg_file = 1,
8a72758c 1176 .pollin = 1,
bcda7baa 1177 .buffer_select = 1,
5bd2182d 1178 .audit_skip = 1,
fddaface 1179 },
0463b6c5 1180 [IORING_OP_OPENAT2] = {
cebdb986 1181 },
3e4827b0
JA
1182 [IORING_OP_EPOLL_CTL] = {
1183 .unbound_nonreg_file = 1,
5bd2182d 1184 .audit_skip = 1,
3e4827b0 1185 },
7d67af2c
PB
1186 [IORING_OP_SPLICE] = {
1187 .needs_file = 1,
1188 .hash_reg_file = 1,
1189 .unbound_nonreg_file = 1,
5bd2182d
PM
1190 .audit_skip = 1,
1191 },
1192 [IORING_OP_PROVIDE_BUFFERS] = {
1193 .audit_skip = 1,
1194 },
1195 [IORING_OP_REMOVE_BUFFERS] = {
1196 .audit_skip = 1,
ddf0322d 1197 },
f2a8d5c7
PB
1198 [IORING_OP_TEE] = {
1199 .needs_file = 1,
1200 .hash_reg_file = 1,
1201 .unbound_nonreg_file = 1,
5bd2182d 1202 .audit_skip = 1,
f2a8d5c7 1203 },
36f4fa68
JA
1204 [IORING_OP_SHUTDOWN] = {
1205 .needs_file = 1,
1206 },
44526bed
JA
1207 [IORING_OP_RENAMEAT] = {},
1208 [IORING_OP_UNLINKAT] = {},
e34a02dc 1209 [IORING_OP_MKDIRAT] = {},
7a8721f8 1210 [IORING_OP_SYMLINKAT] = {},
cf30da90 1211 [IORING_OP_LINKAT] = {},
4f57f06c
JA
1212 [IORING_OP_MSG_RING] = {
1213 .needs_file = 1,
1214 },
d3656344
JA
1215};
1216
0756a869
PB
1217/* requests with any of those set should undergo io_disarm_next() */
1218#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
da1a08c5 1219#define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
0756a869 1220
7a612350 1221static bool io_disarm_next(struct io_kiocb *req);
eef51daa 1222static void io_uring_del_tctx_node(unsigned long index);
9936c7c2
PB
1223static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1224 struct task_struct *task,
3dd0c97a 1225 bool cancel_all);
78cc687b 1226static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
1ffc5422 1227
4e118cd9 1228static void __io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags);
c7dae4ba 1229static void io_dismantle_req(struct io_kiocb *req);
94ae5e77 1230static void io_queue_linked_timeout(struct io_kiocb *req);
fdecb662 1231static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
c3bdad02 1232 struct io_uring_rsrc_update2 *up,
98f0b3b4 1233 unsigned nr_args);
68fb8979 1234static void io_clean_op(struct io_kiocb *req);
5106dd6e
JA
1235static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
1236 unsigned issue_flags);
1237static inline struct file *io_file_get_normal(struct io_kiocb *req, int fd);
d5361233
JA
1238static void io_drop_inflight_file(struct io_kiocb *req);
1239static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags);
cbc2e203 1240static void io_queue_sqe(struct io_kiocb *req);
269bbe5f 1241static void io_rsrc_put_work(struct work_struct *work);
de0617e4 1242
907d1df3 1243static void io_req_task_queue(struct io_kiocb *req);
c450178d 1244static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
179ae0d1 1245static int io_req_prep_async(struct io_kiocb *req);
de0617e4 1246
b9445598
PB
1247static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
1248 unsigned int issue_flags, u32 slot_index);
7df778be
PB
1249static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
1250
f1042b6c 1251static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
9aa8dfde 1252static void io_eventfd_signal(struct io_ring_ctx *ctx);
4e118cd9 1253static void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
b9445598 1254
2b188cc1
JA
1255static struct kmem_cache *req_cachep;
1256
0918682b 1257static const struct file_operations io_uring_fops;
2b188cc1
JA
1258
1259struct sock *io_uring_get_socket(struct file *file)
1260{
1261#if defined(CONFIG_UNIX)
1262 if (file->f_op == &io_uring_fops) {
1263 struct io_ring_ctx *ctx = file->private_data;
1264
1265 return ctx->ring_sock->sk;
1266 }
1267#endif
1268 return NULL;
1269}
1270EXPORT_SYMBOL(io_uring_get_socket);
1271
1f59bc0f
PB
1272#if defined(CONFIG_UNIX)
1273static inline bool io_file_need_scm(struct file *filp)
1274{
5e45690a
JA
1275#if defined(IO_URING_SCM_ALL)
1276 return true;
1277#else
1f59bc0f 1278 return !!unix_get_socket(filp);
5e45690a 1279#endif
1f59bc0f
PB
1280}
1281#else
1282static inline bool io_file_need_scm(struct file *filp)
1283{
5e45690a 1284 return false;
1f59bc0f
PB
1285}
1286#endif
1287
f8929630
PB
1288static void io_ring_submit_unlock(struct io_ring_ctx *ctx, unsigned issue_flags)
1289{
1290 lockdep_assert_held(&ctx->uring_lock);
1291 if (issue_flags & IO_URING_F_UNLOCKED)
1292 mutex_unlock(&ctx->uring_lock);
1293}
1294
1295static void io_ring_submit_lock(struct io_ring_ctx *ctx, unsigned issue_flags)
1296{
1297 /*
1298 * "Normal" inline submissions always hold the uring_lock, since we
1299 * grab it from the system call. Same is true for the SQPOLL offload.
1300 * The only exception is when we've detached the request and issue it
1301 * from an async worker thread, grab the lock for that case.
1302 */
1303 if (issue_flags & IO_URING_F_UNLOCKED)
1304 mutex_lock(&ctx->uring_lock);
1305 lockdep_assert_held(&ctx->uring_lock);
1306}
1307
f237c30a
PB
1308static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
1309{
1310 if (!*locked) {
1311 mutex_lock(&ctx->uring_lock);
1312 *locked = true;
1313 }
1314}
1315
f2f87370
PB
1316#define io_for_each_link(pos, head) \
1317 for (pos = (head); pos; pos = pos->link)
1318
21c843d5
PB
1319/*
1320 * Shamelessly stolen from the mm implementation of page reference checking,
1321 * see commit f958d7b528b1 for details.
1322 */
1323#define req_ref_zero_or_close_to_overflow(req) \
1324 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1325
1326static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1327{
20e60a38 1328 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
21c843d5
PB
1329 return atomic_inc_not_zero(&req->refs);
1330}
1331
21c843d5
PB
1332static inline bool req_ref_put_and_test(struct io_kiocb *req)
1333{
20e60a38
PB
1334 if (likely(!(req->flags & REQ_F_REFCOUNT)))
1335 return true;
1336
21c843d5
PB
1337 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1338 return atomic_dec_and_test(&req->refs);
1339}
1340
21c843d5
PB
1341static inline void req_ref_get(struct io_kiocb *req)
1342{
20e60a38 1343 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
21c843d5
PB
1344 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1345 atomic_inc(&req->refs);
1346}
1347
c450178d
PB
1348static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
1349{
6f33b0bc 1350 if (!wq_list_empty(&ctx->submit_state.compl_reqs))
c450178d
PB
1351 __io_submit_flush_completions(ctx);
1352}
1353
48dcd38d 1354static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
20e60a38
PB
1355{
1356 if (!(req->flags & REQ_F_REFCOUNT)) {
1357 req->flags |= REQ_F_REFCOUNT;
48dcd38d 1358 atomic_set(&req->refs, nr);
20e60a38
PB
1359 }
1360}
1361
48dcd38d
PB
1362static inline void io_req_set_refcount(struct io_kiocb *req)
1363{
1364 __io_req_set_refcount(req, 1);
1365}
1366
ab409402
PB
1367#define IO_RSRC_REF_BATCH 100
1368
25a15d3c
PB
1369static void io_rsrc_put_node(struct io_rsrc_node *node, int nr)
1370{
1371 percpu_ref_put_many(&node->refs, nr);
1372}
1373
ab409402
PB
1374static inline void io_req_put_rsrc_locked(struct io_kiocb *req,
1375 struct io_ring_ctx *ctx)
1376 __must_hold(&ctx->uring_lock)
36f72fe2 1377{
c1bdf8ed 1378 struct io_rsrc_node *node = req->rsrc_node;
ab409402 1379
c1bdf8ed
PB
1380 if (node) {
1381 if (node == ctx->rsrc_node)
ab409402
PB
1382 ctx->rsrc_cached_refs++;
1383 else
25a15d3c 1384 io_rsrc_put_node(node, 1);
ab409402
PB
1385 }
1386}
1387
7ac1edc4 1388static inline void io_req_put_rsrc(struct io_kiocb *req)
ab409402 1389{
c1bdf8ed 1390 if (req->rsrc_node)
25a15d3c 1391 io_rsrc_put_node(req->rsrc_node, 1);
ab409402
PB
1392}
1393
1394static __cold void io_rsrc_refs_drop(struct io_ring_ctx *ctx)
1395 __must_hold(&ctx->uring_lock)
1396{
1397 if (ctx->rsrc_cached_refs) {
25a15d3c 1398 io_rsrc_put_node(ctx->rsrc_node, ctx->rsrc_cached_refs);
ab409402
PB
1399 ctx->rsrc_cached_refs = 0;
1400 }
1401}
1402
1403static void io_rsrc_refs_refill(struct io_ring_ctx *ctx)
1404 __must_hold(&ctx->uring_lock)
1405{
1406 ctx->rsrc_cached_refs += IO_RSRC_REF_BATCH;
1407 percpu_ref_get_many(&ctx->rsrc_node->refs, IO_RSRC_REF_BATCH);
1408}
36f72fe2 1409
a46be971 1410static inline void io_req_set_rsrc_node(struct io_kiocb *req,
5106dd6e
JA
1411 struct io_ring_ctx *ctx,
1412 unsigned int issue_flags)
36f72fe2 1413{
c1bdf8ed
PB
1414 if (!req->rsrc_node) {
1415 req->rsrc_node = ctx->rsrc_node;
5106dd6e
JA
1416
1417 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1418 lockdep_assert_held(&ctx->uring_lock);
1419 ctx->rsrc_cached_refs--;
1420 if (unlikely(ctx->rsrc_cached_refs < 0))
1421 io_rsrc_refs_refill(ctx);
1422 } else {
c1bdf8ed 1423 percpu_ref_get(&req->rsrc_node->refs);
5106dd6e 1424 }
36f72fe2
PB
1425 }
1426}
1427
cc3cec83 1428static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list)
3648e526 1429{
d1fd1c20 1430 struct io_buffer *kbuf = req->kbuf;
3648e526
HX
1431 unsigned int cflags;
1432
cc3cec83 1433 cflags = IORING_CQE_F_BUFFER | (kbuf->bid << IORING_CQE_BUFFER_SHIFT);
3648e526 1434 req->flags &= ~REQ_F_BUFFER_SELECTED;
cc3cec83 1435 list_add(&kbuf->list, list);
d1fd1c20 1436 req->kbuf = NULL;
3648e526
HX
1437 return cflags;
1438}
1439
cc3cec83 1440static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
3648e526 1441{
8197b053
PB
1442 lockdep_assert_held(&req->ctx->completion_lock);
1443
3648e526
HX
1444 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
1445 return 0;
cc3cec83
JA
1446 return __io_put_kbuf(req, &req->ctx->io_buffers_comp);
1447}
1448
1449static inline unsigned int io_put_kbuf(struct io_kiocb *req,
1450 unsigned issue_flags)
1451{
1452 unsigned int cflags;
1453
1454 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
1455 return 0;
1456
1457 /*
1458 * We can add this buffer back to two lists:
1459 *
1460 * 1) The io_buffers_cache list. This one is protected by the
1461 * ctx->uring_lock. If we already hold this lock, add back to this
1462 * list as we can grab it from issue as well.
1463 * 2) The io_buffers_comp list. This one is protected by the
1464 * ctx->completion_lock.
1465 *
1466 * We migrate buffers from the comp_list to the issue cache list
1467 * when we need one.
1468 */
1469 if (issue_flags & IO_URING_F_UNLOCKED) {
1470 struct io_ring_ctx *ctx = req->ctx;
1471
1472 spin_lock(&ctx->completion_lock);
1473 cflags = __io_put_kbuf(req, &ctx->io_buffers_comp);
1474 spin_unlock(&ctx->completion_lock);
1475 } else {
ab0ac095
PB
1476 lockdep_assert_held(&req->ctx->uring_lock);
1477
cc3cec83
JA
1478 cflags = __io_put_kbuf(req, &req->ctx->io_buffers_cache);
1479 }
1480
1481 return cflags;
3648e526
HX
1482}
1483
dbc7d452
JA
1484static struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
1485 unsigned int bgid)
1486{
1487 struct list_head *hash_list;
1488 struct io_buffer_list *bl;
1489
1490 hash_list = &ctx->io_buffers[hash_32(bgid, IO_BUFFERS_HASH_BITS)];
1491 list_for_each_entry(bl, hash_list, list)
1492 if (bl->bgid == bgid || bgid == -1U)
1493 return bl;
1494
1495 return NULL;
1496}
1497
4d55f238 1498static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
b1c62645
JA
1499{
1500 struct io_ring_ctx *ctx = req->ctx;
dbc7d452
JA
1501 struct io_buffer_list *bl;
1502 struct io_buffer *buf;
b1c62645
JA
1503
1504 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
1505 return;
8a3e8ee5
JA
1506 /* don't recycle if we already did IO to this buffer */
1507 if (req->flags & REQ_F_PARTIAL_IO)
1508 return;
b1c62645 1509
f8929630 1510 io_ring_submit_lock(ctx, issue_flags);
b1c62645
JA
1511
1512 buf = req->kbuf;
dbc7d452
JA
1513 bl = io_buffer_get_list(ctx, buf->bgid);
1514 list_add(&buf->list, &bl->buf_list);
b1c62645
JA
1515 req->flags &= ~REQ_F_BUFFER_SELECTED;
1516 req->kbuf = NULL;
4d55f238 1517
f8929630 1518 io_ring_submit_unlock(ctx, issue_flags);
b1c62645
JA
1519}
1520
3dd0c97a
PB
1521static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1522 bool cancel_all)
6af3f48b 1523 __must_hold(&req->ctx->timeout_lock)
08d23634 1524{
68207680 1525 if (task && head->task != task)
08d23634 1526 return false;
d5361233 1527 return cancel_all;
6af3f48b
PB
1528}
1529
1530/*
1531 * As io_match_task() but protected against racing with linked timeouts.
1532 * User must not hold timeout_lock.
1533 */
1534static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
1535 bool cancel_all)
1536{
6af3f48b
PB
1537 if (task && head->task != task)
1538 return false;
d5361233 1539 return cancel_all;
6af3f48b
PB
1540}
1541
d886e185
PB
1542static inline bool req_has_async_data(struct io_kiocb *req)
1543{
1544 return req->flags & REQ_F_ASYNC_DATA;
1545}
1546
93d2bcd2 1547static inline void req_set_fail(struct io_kiocb *req)
c40f6379 1548{
93d2bcd2 1549 req->flags |= REQ_F_FAIL;
04c76b41
PB
1550 if (req->flags & REQ_F_CQE_SKIP) {
1551 req->flags &= ~REQ_F_CQE_SKIP;
1552 req->flags |= REQ_F_SKIP_LINK_CQES;
1553 }
c40f6379 1554}
4a38aed2 1555
a8295b98
HX
1556static inline void req_fail_link_node(struct io_kiocb *req, int res)
1557{
1558 req_set_fail(req);
cef216fc 1559 req->cqe.res = res;
a8295b98
HX
1560}
1561
fa05457a
PB
1562static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
1563{
1564 wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
1565}
1566
c072481d 1567static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
2b188cc1
JA
1568{
1569 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1570
0f158b4c 1571 complete(&ctx->ref_comp);
2b188cc1
JA
1572}
1573
8eb7e2d0
PB
1574static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1575{
1576 return !req->timeout.off;
1577}
1578
c072481d 1579static __cold void io_fallback_req_func(struct work_struct *work)
f56165e6
PB
1580{
1581 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
1582 fallback_work.work);
1583 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
1584 struct io_kiocb *req, *tmp;
f237c30a 1585 bool locked = false;
f56165e6
PB
1586
1587 percpu_ref_get(&ctx->refs);
1588 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
f237c30a 1589 req->io_task_work.func(req, &locked);
5636c00d 1590
f237c30a 1591 if (locked) {
c450178d 1592 io_submit_flush_completions(ctx);
f237c30a
PB
1593 mutex_unlock(&ctx->uring_lock);
1594 }
f56165e6
PB
1595 percpu_ref_put(&ctx->refs);
1596}
1597
c072481d 1598static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
2b188cc1
JA
1599{
1600 struct io_ring_ctx *ctx;
dbc7d452 1601 int i, hash_bits;
2b188cc1
JA
1602
1603 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1604 if (!ctx)
1605 return NULL;
1606
78076bb6
JA
1607 /*
1608 * Use 5 bits less than the max cq entries, that should give us around
1609 * 32 entries per hash list if totally full and uniformly spread.
1610 */
1611 hash_bits = ilog2(p->cq_entries);
1612 hash_bits -= 5;
1613 if (hash_bits <= 0)
1614 hash_bits = 1;
1615 ctx->cancel_hash_bits = hash_bits;
1616 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1617 GFP_KERNEL);
1618 if (!ctx->cancel_hash)
1619 goto err;
1620 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1621
6224843d
PB
1622 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1623 if (!ctx->dummy_ubuf)
1624 goto err;
1625 /* set invalid range, so io_import_fixed() fails meeting it */
1626 ctx->dummy_ubuf->ubuf = -1UL;
1627
dbc7d452
JA
1628 ctx->io_buffers = kcalloc(1U << IO_BUFFERS_HASH_BITS,
1629 sizeof(struct list_head), GFP_KERNEL);
1630 if (!ctx->io_buffers)
1631 goto err;
1632 for (i = 0; i < (1U << IO_BUFFERS_HASH_BITS); i++)
1633 INIT_LIST_HEAD(&ctx->io_buffers[i]);
1634
21482896 1635 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
1636 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1637 goto err;
2b188cc1
JA
1638
1639 ctx->flags = p->flags;
90554200 1640 init_waitqueue_head(&ctx->sqo_sq_wait);
69fb2131 1641 INIT_LIST_HEAD(&ctx->sqd_list);
1d7bb1d5 1642 INIT_LIST_HEAD(&ctx->cq_overflow_list);
cc3cec83 1643 INIT_LIST_HEAD(&ctx->io_buffers_cache);
4d9237e3 1644 INIT_LIST_HEAD(&ctx->apoll_cache);
0f158b4c 1645 init_completion(&ctx->ref_comp);
61cf9370 1646 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
2b188cc1 1647 mutex_init(&ctx->uring_lock);
311997b3 1648 init_waitqueue_head(&ctx->cq_wait);
2b188cc1 1649 spin_lock_init(&ctx->completion_lock);
89850fce 1650 spin_lock_init(&ctx->timeout_lock);
5eef4e87 1651 INIT_WQ_LIST(&ctx->iopoll_list);
cc3cec83
JA
1652 INIT_LIST_HEAD(&ctx->io_buffers_pages);
1653 INIT_LIST_HEAD(&ctx->io_buffers_comp);
de0617e4 1654 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 1655 INIT_LIST_HEAD(&ctx->timeout_list);
ef9dd637 1656 INIT_LIST_HEAD(&ctx->ltimeout_list);
d67d2263
BM
1657 spin_lock_init(&ctx->rsrc_ref_lock);
1658 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
269bbe5f
BM
1659 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1660 init_llist_head(&ctx->rsrc_put_llist);
13bf43f5 1661 INIT_LIST_HEAD(&ctx->tctx_list);
c2b6c6bc
PB
1662 ctx->submit_state.free_list.next = NULL;
1663 INIT_WQ_LIST(&ctx->locked_free_list);
9011bf9a 1664 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
6f33b0bc 1665 INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
2b188cc1 1666 return ctx;
206aefde 1667err:
6224843d 1668 kfree(ctx->dummy_ubuf);
78076bb6 1669 kfree(ctx->cancel_hash);
dbc7d452 1670 kfree(ctx->io_buffers);
206aefde
JA
1671 kfree(ctx);
1672 return NULL;
2b188cc1
JA
1673}
1674
8f6ed49a
PB
1675static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1676{
1677 struct io_rings *r = ctx->rings;
1678
1679 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1680 ctx->cq_extra--;
1681}
1682
9cf7c104 1683static bool req_need_defer(struct io_kiocb *req, u32 seq)
7adf4eaf 1684{
2bc9930e
JA
1685 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1686 struct io_ring_ctx *ctx = req->ctx;
a197f664 1687
8f6ed49a 1688 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
2bc9930e 1689 }
de0617e4 1690
9d858b21 1691 return false;
de0617e4
JA
1692}
1693
c97d8a0f
PB
1694static inline bool io_req_ffs_set(struct io_kiocb *req)
1695{
35645ac3 1696 return req->flags & REQ_F_FIXED_FILE;
c97d8a0f
PB
1697}
1698
fd08e530
PB
1699static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
1700{
906c6caa
PB
1701 if (WARN_ON_ONCE(!req->link))
1702 return NULL;
1703
4d13d1a4
PB
1704 req->flags &= ~REQ_F_ARM_LTIMEOUT;
1705 req->flags |= REQ_F_LINK_TIMEOUT;
fd08e530
PB
1706
1707 /* linked timeouts should have two refs once prep'ed */
48dcd38d 1708 io_req_set_refcount(req);
4d13d1a4
PB
1709 __io_req_set_refcount(req->link, 2);
1710 return req->link;
fd08e530
PB
1711}
1712
1713static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
1714{
4d13d1a4 1715 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
fd08e530
PB
1716 return NULL;
1717 return __io_prep_linked_timeout(req);
1718}
1719
cb2d344c
PB
1720static noinline void __io_arm_ltimeout(struct io_kiocb *req)
1721{
1722 io_queue_linked_timeout(__io_prep_linked_timeout(req));
1723}
1724
1725static inline void io_arm_ltimeout(struct io_kiocb *req)
1726{
1727 if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
1728 __io_arm_ltimeout(req);
1729}
1730
1e6fa521
JA
1731static void io_prep_async_work(struct io_kiocb *req)
1732{
1733 const struct io_op_def *def = &io_op_defs[req->opcode];
1e6fa521
JA
1734 struct io_ring_ctx *ctx = req->ctx;
1735
b8e64b53
PB
1736 if (!(req->flags & REQ_F_CREDS)) {
1737 req->flags |= REQ_F_CREDS;
c10d1f98 1738 req->creds = get_current_cred();
b8e64b53 1739 }
003e8dcc 1740
e1d675df
PB
1741 req->work.list.next = NULL;
1742 req->work.flags = 0;
8e29da69 1743 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
feaadc4f
PB
1744 if (req->flags & REQ_F_FORCE_ASYNC)
1745 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1746
1e6fa521
JA
1747 if (req->flags & REQ_F_ISREG) {
1748 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
1749 io_wq_hash_work(&req->work, file_inode(req->file));
4b982bd0 1750 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
1e6fa521
JA
1751 if (def->unbound_nonreg_file)
1752 req->work.flags |= IO_WQ_WORK_UNBOUND;
1753 }
561fb04a 1754}
cccf0ee8 1755
cbdcb435 1756static void io_prep_async_link(struct io_kiocb *req)
561fb04a 1757{
cbdcb435 1758 struct io_kiocb *cur;
54a91f3b 1759
44eff40a
PB
1760 if (req->flags & REQ_F_LINK_TIMEOUT) {
1761 struct io_ring_ctx *ctx = req->ctx;
1762
674ee8e1 1763 spin_lock_irq(&ctx->timeout_lock);
44eff40a
PB
1764 io_for_each_link(cur, req)
1765 io_prep_async_work(cur);
674ee8e1 1766 spin_unlock_irq(&ctx->timeout_lock);
44eff40a
PB
1767 } else {
1768 io_for_each_link(cur, req)
1769 io_prep_async_work(cur);
1770 }
561fb04a
JA
1771}
1772
fff4e40e
PB
1773static inline void io_req_add_compl_list(struct io_kiocb *req)
1774{
775a1f2f 1775 struct io_submit_state *state = &req->ctx->submit_state;
fff4e40e 1776
3d4aeb9f 1777 if (!(req->flags & REQ_F_CQE_SKIP))
775a1f2f 1778 state->flush_cqes = true;
fff4e40e
PB
1779 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
1780}
1781
77955efb 1782static void io_queue_iowq(struct io_kiocb *req, bool *dont_use)
561fb04a 1783{
cbdcb435 1784 struct io_kiocb *link = io_prep_linked_timeout(req);
5aa75ed5 1785 struct io_uring_task *tctx = req->task->io_uring;
561fb04a 1786
3bfe6106
JA
1787 BUG_ON(!tctx);
1788 BUG_ON(!tctx->io_wq);
561fb04a 1789
cbdcb435
PB
1790 /* init ->work of the whole link before punting */
1791 io_prep_async_link(req);
991468dc
JA
1792
1793 /*
1794 * Not expected to happen, but if we do have a bug where this _can_
1795 * happen, catch it here and ensure the request is marked as
1796 * canceled. That will make io-wq go through the usual work cancel
1797 * procedure rather than attempt to run this request (or create a new
1798 * worker for it).
1799 */
1800 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
1801 req->work.flags |= IO_WQ_WORK_CANCEL;
1802
971cf9c1
PB
1803 trace_io_uring_queue_async_work(req->ctx, req, req->cqe.user_data,
1804 req->opcode, req->flags, &req->work,
1805 io_wq_is_hashed(&req->work));
ebf93667 1806 io_wq_enqueue(tctx->io_wq, &req->work);
7271ef3a
JA
1807 if (link)
1808 io_queue_linked_timeout(link);
cbdcb435
PB
1809}
1810
1ee4160c 1811static void io_kill_timeout(struct io_kiocb *req, int status)
8c855885 1812 __must_hold(&req->ctx->completion_lock)
89850fce 1813 __must_hold(&req->ctx->timeout_lock)
5262f567 1814{
e8c2bc1f 1815 struct io_timeout_data *io = req->async_data;
5262f567 1816
fd9c7bc5 1817 if (hrtimer_try_to_cancel(&io->timer) != -1) {
2ae2eb9d
PB
1818 if (status)
1819 req_set_fail(req);
01cec8c1
PB
1820 atomic_set(&req->ctx->cq_timeouts,
1821 atomic_read(&req->ctx->cq_timeouts) + 1);
135fcde8 1822 list_del_init(&req->timeout.list);
4e118cd9 1823 io_req_tw_post_queue(req, status, 0);
5262f567
JA
1824 }
1825}
1826
c072481d 1827static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
de0617e4 1828{
441b8a78 1829 while (!list_empty(&ctx->defer_list)) {
27dc8338
PB
1830 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1831 struct io_defer_entry, list);
de0617e4 1832
9cf7c104 1833 if (req_need_defer(de->req, de->seq))
04518945 1834 break;
27dc8338 1835 list_del_init(&de->list);
907d1df3 1836 io_req_task_queue(de->req);
27dc8338 1837 kfree(de);
441b8a78 1838 }
04518945
PB
1839}
1840
c072481d 1841static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
89850fce 1842 __must_hold(&ctx->completion_lock)
de0617e4 1843{
441b8a78 1844 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
e677edbc 1845 struct io_kiocb *req, *tmp;
f010505b 1846
79ebeaee 1847 spin_lock_irq(&ctx->timeout_lock);
e677edbc 1848 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
f010505b 1849 u32 events_needed, events_got;
de0617e4 1850
8eb7e2d0 1851 if (io_is_timeout_noseq(req))
360428f8 1852 break;
f010505b
MDG
1853
1854 /*
1855 * Since seq can easily wrap around over time, subtract
1856 * the last seq at which timeouts were flushed before comparing.
1857 * Assuming not more than 2^31-1 events have happened since,
1858 * these subtractions won't have wrapped, so we can check if
1859 * target is in [last_seq, current_seq] by comparing the two.
1860 */
1861 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1862 events_got = seq - ctx->cq_last_tm_flush;
1863 if (events_got < events_needed)
360428f8 1864 break;
bfe68a22 1865
1ee4160c 1866 io_kill_timeout(req, 0);
f18ee4cf 1867 }
f010505b 1868 ctx->cq_last_tm_flush = seq;
79ebeaee 1869 spin_unlock_irq(&ctx->timeout_lock);
360428f8 1870}
5262f567 1871
9333f6b4
PB
1872static inline void io_commit_cqring(struct io_ring_ctx *ctx)
1873{
1874 /* order cqe stores with ring update */
1875 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
1876}
1877
9aa8dfde 1878static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
360428f8 1879{
9aa8dfde
PB
1880 if (ctx->off_timeout_used || ctx->drain_active) {
1881 spin_lock(&ctx->completion_lock);
1882 if (ctx->off_timeout_used)
1883 io_flush_timeouts(ctx);
1884 if (ctx->drain_active)
1885 io_queue_deferred(ctx);
1886 io_commit_cqring(ctx);
1887 spin_unlock(&ctx->completion_lock);
1888 }
1889 if (ctx->has_evfd)
1890 io_eventfd_signal(ctx);
de0617e4
JA
1891}
1892
90554200
JA
1893static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1894{
1895 struct io_rings *r = ctx->rings;
1896
a566c556 1897 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
90554200
JA
1898}
1899
888aae2e
PB
1900static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1901{
1902 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1903}
1904
d8da428b
PB
1905/*
1906 * writes to the cq entry need to come after reading head; the
1907 * control dependency is enough as we're using WRITE_ONCE to
1908 * fill the cq entry
1909 */
1910static noinline struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
2b188cc1 1911{
75b28aff 1912 struct io_rings *rings = ctx->rings;
d8da428b
PB
1913 unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
1914 unsigned int free, queued, len;
1915
1916 /* userspace may cheat modifying the tail, be safe and do min */
1917 queued = min(__io_cqring_events(ctx), ctx->cq_entries);
1918 free = ctx->cq_entries - queued;
1919 /* we need a contiguous range, limit based on the current array offset */
1920 len = min(free, ctx->cq_entries - off);
1921 if (!len)
2b188cc1
JA
1922 return NULL;
1923
d8da428b
PB
1924 ctx->cached_cq_tail++;
1925 ctx->cqe_cached = &rings->cqes[off];
1926 ctx->cqe_sentinel = ctx->cqe_cached + len;
1927 return ctx->cqe_cached++;
1928}
1929
1930static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
1931{
1932 if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
1933 ctx->cached_cq_tail++;
1934 return ctx->cqe_cached++;
1935 }
1936 return __io_get_cqe(ctx);
2b188cc1
JA
1937}
1938
77bc59b4 1939static void io_eventfd_signal(struct io_ring_ctx *ctx)
f2842ab5 1940{
77bc59b4
UA
1941 struct io_ev_fd *ev_fd;
1942
77bc59b4
UA
1943 rcu_read_lock();
1944 /*
1945 * rcu_dereference ctx->io_ev_fd once and use it for both for checking
1946 * and eventfd_signal
1947 */
1948 ev_fd = rcu_dereference(ctx->io_ev_fd);
1949
1950 /*
1951 * Check again if ev_fd exists incase an io_eventfd_unregister call
1952 * completed between the NULL check of ctx->io_ev_fd at the start of
1953 * the function and rcu_read_lock.
1954 */
1955 if (unlikely(!ev_fd))
1956 goto out;
7e55a19c 1957 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
77bc59b4
UA
1958 goto out;
1959
c75312dd 1960 if (!ev_fd->eventfd_async || io_wq_current_is_worker())
77bc59b4 1961 eventfd_signal(ev_fd->cq_ev_fd, 1);
77bc59b4
UA
1962out:
1963 rcu_read_unlock();
f2842ab5
JA
1964}
1965
9aa8dfde
PB
1966static inline void io_cqring_wake(struct io_ring_ctx *ctx)
1967{
1968 /*
1969 * wake_up_all() may seem excessive, but io_wake_function() and
1970 * io_should_wake() handle the termination of the loop and only
1971 * wake as many waiters as we need to.
1972 */
1973 if (wq_has_sleeper(&ctx->cq_wait))
1974 wake_up_all(&ctx->cq_wait);
1975}
1976
2c5d763c
JA
1977/*
1978 * This should only get called when at least one event has been posted.
1979 * Some applications rely on the eventfd notification count only changing
1980 * IFF a new CQE has been added to the CQ ring. There's no depedency on
1981 * 1:1 relationship between how many times this function is called (and
1982 * hence the eventfd count) and number of CQEs posted to the CQ ring.
1983 */
66fc25ca 1984static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1d7bb1d5 1985{
9aa8dfde
PB
1986 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
1987 ctx->has_evfd))
9333f6b4
PB
1988 __io_commit_cqring_flush(ctx);
1989
9aa8dfde 1990 io_cqring_wake(ctx);
1d7bb1d5
JA
1991}
1992
80c18e4a
PB
1993static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1994{
9aa8dfde
PB
1995 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
1996 ctx->has_evfd))
9333f6b4
PB
1997 __io_commit_cqring_flush(ctx);
1998
9aa8dfde
PB
1999 if (ctx->flags & IORING_SETUP_SQPOLL)
2000 io_cqring_wake(ctx);
80c18e4a
PB
2001}
2002
c4a2ed72 2003/* Returns true if there are no backlogged entries after the flush */
6c2450ae 2004static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1d7bb1d5 2005{
b18032bb 2006 bool all_flushed, posted;
1d7bb1d5 2007
a566c556 2008 if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
e23de15f 2009 return false;
1d7bb1d5 2010
b18032bb 2011 posted = false;
79ebeaee 2012 spin_lock(&ctx->completion_lock);
6c2450ae 2013 while (!list_empty(&ctx->cq_overflow_list)) {
d068b506 2014 struct io_uring_cqe *cqe = io_get_cqe(ctx);
6c2450ae 2015 struct io_overflow_cqe *ocqe;
e6c8aa9a 2016
1d7bb1d5
JA
2017 if (!cqe && !force)
2018 break;
6c2450ae
PB
2019 ocqe = list_first_entry(&ctx->cq_overflow_list,
2020 struct io_overflow_cqe, list);
2021 if (cqe)
2022 memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
2023 else
8f6ed49a
PB
2024 io_account_cq_overflow(ctx);
2025
b18032bb 2026 posted = true;
6c2450ae
PB
2027 list_del(&ocqe->list);
2028 kfree(ocqe);
1d7bb1d5
JA
2029 }
2030
09e88404
PB
2031 all_flushed = list_empty(&ctx->cq_overflow_list);
2032 if (all_flushed) {
10988a0a 2033 clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
3a4b89a2 2034 atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
09e88404 2035 }
46930143 2036
60053be8 2037 io_commit_cqring(ctx);
79ebeaee 2038 spin_unlock(&ctx->completion_lock);
b18032bb
JA
2039 if (posted)
2040 io_cqring_ev_posted(ctx);
09e88404 2041 return all_flushed;
1d7bb1d5
JA
2042}
2043
90f67366 2044static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
6c503150 2045{
ca0a2651
JA
2046 bool ret = true;
2047
10988a0a 2048 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
6c503150
PB
2049 /* iopoll syncs against uring_lock, not completion_lock */
2050 if (ctx->flags & IORING_SETUP_IOPOLL)
2051 mutex_lock(&ctx->uring_lock);
90f67366 2052 ret = __io_cqring_overflow_flush(ctx, false);
6c503150
PB
2053 if (ctx->flags & IORING_SETUP_IOPOLL)
2054 mutex_unlock(&ctx->uring_lock);
2055 }
ca0a2651
JA
2056
2057 return ret;
6c503150
PB
2058}
2059
9d170164 2060static void __io_put_task(struct task_struct *task, int nr)
6a290a14
PB
2061{
2062 struct io_uring_task *tctx = task->io_uring;
2063
9d170164
PB
2064 percpu_counter_sub(&tctx->inflight, nr);
2065 if (unlikely(atomic_read(&tctx->in_idle)))
2066 wake_up(&tctx->wait);
2067 put_task_struct_many(task, nr);
2068}
2069
2070/* must to be called somewhat shortly after putting a request */
2071static inline void io_put_task(struct task_struct *task, int nr)
2072{
2073 if (likely(task == current))
2074 task->io_uring->cached_refs += nr;
2075 else
2076 __io_put_task(task, nr);
6a290a14
PB
2077}
2078
9a10867a
PB
2079static void io_task_refs_refill(struct io_uring_task *tctx)
2080{
2081 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
2082
2083 percpu_counter_add(&tctx->inflight, refill);
2084 refcount_add(refill, &current->usage);
2085 tctx->cached_refs += refill;
2086}
2087
2088static inline void io_get_task_refs(int nr)
2089{
2090 struct io_uring_task *tctx = current->io_uring;
2091
2092 tctx->cached_refs -= nr;
2093 if (unlikely(tctx->cached_refs < 0))
2094 io_task_refs_refill(tctx);
2095}
2096
3cc7fdb9
PB
2097static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
2098{
2099 struct io_uring_task *tctx = task->io_uring;
2100 unsigned int refs = tctx->cached_refs;
2101
2102 if (refs) {
2103 tctx->cached_refs = 0;
2104 percpu_counter_sub(&tctx->inflight, refs);
2105 put_task_struct_many(task, refs);
2106 }
2107}
2108
d4d19c19 2109static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
54daa9b2 2110 s32 res, u32 cflags)
2b188cc1 2111{
cce4b8b0 2112 struct io_overflow_cqe *ocqe;
2b188cc1 2113
cce4b8b0 2114 ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
08dcd028 2115 trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
cce4b8b0
PB
2116 if (!ocqe) {
2117 /*
2118 * If we're in ring overflow flush mode, or in task cancel mode,
2119 * or cannot allocate an overflow entry, then we need to drop it
2120 * on the floor.
2121 */
8f6ed49a 2122 io_account_cq_overflow(ctx);
155bc950 2123 set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq);
cce4b8b0 2124 return false;
2b188cc1 2125 }
cce4b8b0 2126 if (list_empty(&ctx->cq_overflow_list)) {
10988a0a 2127 set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
3a4b89a2 2128 atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
20c0b380 2129
cce4b8b0 2130 }
d4d19c19 2131 ocqe->cqe.user_data = user_data;
cce4b8b0
PB
2132 ocqe->cqe.res = res;
2133 ocqe->cqe.flags = cflags;
2134 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
2135 return true;
2b188cc1
JA
2136}
2137
ae4da189 2138static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
913a571a 2139 s32 res, u32 cflags)
2b188cc1
JA
2140{
2141 struct io_uring_cqe *cqe;
2142
2143 /*
2144 * If we can't get a cq entry, userspace overflowed the
2145 * submission (by quite a lot). Increment the overflow count in
2146 * the ring.
2147 */
d068b506 2148 cqe = io_get_cqe(ctx);
1d7bb1d5 2149 if (likely(cqe)) {
d4d19c19 2150 WRITE_ONCE(cqe->user_data, user_data);
2b188cc1 2151 WRITE_ONCE(cqe->res, res);
bcda7baa 2152 WRITE_ONCE(cqe->flags, cflags);
8d13326e 2153 return true;
2b188cc1 2154 }
d4d19c19 2155 return io_cqring_event_overflow(ctx, user_data, res, cflags);
2b188cc1
JA
2156}
2157
90e7c35f
PB
2158static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
2159 struct io_kiocb *req)
2160{
2161 struct io_uring_cqe *cqe;
2162
2163 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
2164 req->cqe.res, req->cqe.flags);
2165
2166 /*
2167 * If we can't get a cq entry, userspace overflowed the
2168 * submission (by quite a lot). Increment the overflow count in
2169 * the ring.
2170 */
2171 cqe = io_get_cqe(ctx);
2172 if (likely(cqe)) {
2173 memcpy(cqe, &req->cqe, sizeof(*cqe));
2174 return true;
2175 }
2176 return io_cqring_event_overflow(ctx, req->cqe.user_data,
2177 req->cqe.res, req->cqe.flags);
2178}
2179
ae4da189 2180static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
d5ec1dfa 2181{
cef216fc
PB
2182 trace_io_uring_complete(req->ctx, req, req->cqe.user_data, res, cflags);
2183 return __io_fill_cqe(req->ctx, req->cqe.user_data, res, cflags);
d5ec1dfa
SR
2184}
2185
913a571a
PB
2186static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
2187 s32 res, u32 cflags)
bcda7baa 2188{
913a571a 2189 ctx->cq_extra++;
502c87d6 2190 trace_io_uring_complete(ctx, NULL, user_data, res, cflags);
ae4da189 2191 return __io_fill_cqe(ctx, user_data, res, cflags);
bcda7baa
JA
2192}
2193
a37fae8a
HX
2194static void __io_req_complete_post(struct io_kiocb *req, s32 res,
2195 u32 cflags)
2b188cc1 2196{
78e19bbe 2197 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 2198
04c76b41 2199 if (!(req->flags & REQ_F_CQE_SKIP))
ae4da189 2200 __io_fill_cqe_req(req, res, cflags);
c7dae4ba
JA
2201 /*
2202 * If we're the last reference to this request, add to our locked
2203 * free_list cache.
2204 */
de9b4cca 2205 if (req_ref_put_and_test(req)) {
da1a08c5 2206 if (req->flags & IO_REQ_LINK_FLAGS) {
0756a869 2207 if (req->flags & IO_DISARM_MASK)
7a612350
PB
2208 io_disarm_next(req);
2209 if (req->link) {
2210 io_req_task_queue(req->link);
2211 req->link = NULL;
2212 }
2213 }
7ac1edc4 2214 io_req_put_rsrc(req);
8197b053
PB
2215 /*
2216 * Selected buffer deallocation in io_clean_op() assumes that
2217 * we don't hold ->completion_lock. Clean them here to avoid
2218 * deadlocks.
2219 */
2220 io_put_kbuf_comp(req);
c7dae4ba
JA
2221 io_dismantle_req(req);
2222 io_put_task(req->task, 1);
c2b6c6bc 2223 wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
d0acdee2 2224 ctx->locked_free_nr++;
180f829f 2225 }
a37fae8a
HX
2226}
2227
2228static void io_req_complete_post(struct io_kiocb *req, s32 res,
2229 u32 cflags)
2230{
2231 struct io_ring_ctx *ctx = req->ctx;
2232
2233 spin_lock(&ctx->completion_lock);
2234 __io_req_complete_post(req, res, cflags);
7a612350 2235 io_commit_cqring(ctx);
79ebeaee 2236 spin_unlock(&ctx->completion_lock);
a3f34907 2237 io_cqring_ev_posted(ctx);
4e3d9ff9
JA
2238}
2239
54daa9b2
PB
2240static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
2241 u32 cflags)
229a7b63 2242{
cef216fc
PB
2243 req->cqe.res = res;
2244 req->cqe.flags = cflags;
e342c807 2245 req->flags |= REQ_F_COMPLETE_INLINE;
e1e16097
JA
2246}
2247
889fca73 2248static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
54daa9b2 2249 s32 res, u32 cflags)
bcda7baa 2250{
889fca73
PB
2251 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
2252 io_req_complete_state(req, res, cflags);
a38d68db 2253 else
c7dae4ba 2254 io_req_complete_post(req, res, cflags);
bcda7baa
JA
2255}
2256
54daa9b2 2257static inline void io_req_complete(struct io_kiocb *req, s32 res)
0ddf92e8 2258{
889fca73 2259 __io_req_complete(req, 0, res, 0);
0ddf92e8
JA
2260}
2261
54daa9b2 2262static void io_req_complete_failed(struct io_kiocb *req, s32 res)
f41db273 2263{
93d2bcd2 2264 req_set_fail(req);
ab0ac095 2265 io_req_complete_post(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
f41db273
PB
2266}
2267
864ea921
PB
2268/*
2269 * Don't initialise the fields below on every allocation, but do that in
2270 * advance and keep them valid across allocations.
2271 */
2272static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
2273{
2274 req->ctx = ctx;
2275 req->link = NULL;
2276 req->async_data = NULL;
2277 /* not necessary, but safer to zero */
cef216fc 2278 req->cqe.res = 0;
864ea921
PB
2279}
2280
dac7a098 2281static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
cd0ca2e0 2282 struct io_submit_state *state)
dac7a098 2283{
79ebeaee 2284 spin_lock(&ctx->completion_lock);
c2b6c6bc 2285 wq_list_splice(&ctx->locked_free_list, &state->free_list);
d0acdee2 2286 ctx->locked_free_nr = 0;
79ebeaee 2287 spin_unlock(&ctx->completion_lock);
dac7a098
PB
2288}
2289
88ab95be
PB
2290static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
2291{
2292 return !ctx->submit_state.free_list.next;
2293}
2294
5d5901a3
PB
2295/*
2296 * A request might get retired back into the request caches even before opcode
2297 * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
2298 * Because of that, io_alloc_req() should be called only under ->uring_lock
2299 * and with extra caution to not get a request that is still worked on.
2300 */
c072481d 2301static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
5d5901a3 2302 __must_hold(&ctx->uring_lock)
2b188cc1 2303{
864ea921 2304 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
3ab665b7 2305 void *reqs[IO_REQ_ALLOC_BATCH];
864ea921 2306 int ret, i;
e5d1bc0a 2307
23a5c43b
PB
2308 /*
2309 * If we have more than a batch's worth of requests in our IRQ side
2310 * locked cache, grab the lock and move them over to our submission
2311 * side cache.
2312 */
a6d97a8a 2313 if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) {
23a5c43b 2314 io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
88ab95be 2315 if (!io_req_cache_empty(ctx))
23a5c43b
PB
2316 return true;
2317 }
e5d1bc0a 2318
3ab665b7 2319 ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
fd6fab2c 2320
864ea921
PB
2321 /*
2322 * Bulk alloc is all-or-nothing. If we fail to get a batch,
2323 * retry single alloc to be on the safe side.
2324 */
2325 if (unlikely(ret <= 0)) {
3ab665b7
PB
2326 reqs[0] = kmem_cache_alloc(req_cachep, gfp);
2327 if (!reqs[0])
a33ae9ce 2328 return false;
864ea921 2329 ret = 1;
2b188cc1 2330 }
864ea921 2331
37f0e767 2332 percpu_ref_get_many(&ctx->refs, ret);
3ab665b7 2333 for (i = 0; i < ret; i++) {
23a5c43b 2334 struct io_kiocb *req = reqs[i];
3ab665b7
PB
2335
2336 io_preinit_req(req, ctx);
fa05457a 2337 io_req_add_to_cache(req, ctx);
3ab665b7 2338 }
a33ae9ce
PB
2339 return true;
2340}
2341
2342static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
2343{
88ab95be 2344 if (unlikely(io_req_cache_empty(ctx)))
a33ae9ce
PB
2345 return __io_alloc_req_refill(ctx);
2346 return true;
2347}
2348
2349static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
2350{
2351 struct io_wq_work_node *node;
2352
2353 node = wq_stack_extract(&ctx->submit_state.free_list);
c2b6c6bc 2354 return container_of(node, struct io_kiocb, comp_list);
2b188cc1
JA
2355}
2356
e1d767f0 2357static inline void io_put_file(struct file *file)
8da11c19 2358{
e1d767f0 2359 if (file)
8da11c19
PB
2360 fput(file);
2361}
2362
6b639522 2363static inline void io_dismantle_req(struct io_kiocb *req)
2b188cc1 2364{
094bae49 2365 unsigned int flags = req->flags;
929a3af9 2366
867f8fa5 2367 if (unlikely(flags & IO_REQ_CLEAN_FLAGS))
3a0a6902 2368 io_clean_op(req);
e1d767f0
PB
2369 if (!(flags & REQ_F_FIXED_FILE))
2370 io_put_file(req->file);
e65ef56d
JA
2371}
2372
f5c6cf2a 2373static __cold void io_free_req(struct io_kiocb *req)
c6ca97b3 2374{
51a4cc11 2375 struct io_ring_ctx *ctx = req->ctx;
c6ca97b3 2376
7ac1edc4 2377 io_req_put_rsrc(req);
216578e5 2378 io_dismantle_req(req);
7c660731 2379 io_put_task(req->task, 1);
c6ca97b3 2380
79ebeaee 2381 spin_lock(&ctx->completion_lock);
c2b6c6bc 2382 wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
c34b025f 2383 ctx->locked_free_nr++;
79ebeaee 2384 spin_unlock(&ctx->completion_lock);
e65ef56d
JA
2385}
2386
f2f87370
PB
2387static inline void io_remove_next_linked(struct io_kiocb *req)
2388{
2389 struct io_kiocb *nxt = req->link;
2390
2391 req->link = nxt->link;
2392 nxt->link = NULL;
2393}
2394
81ec803b 2395static struct io_kiocb *io_disarm_linked_timeout(struct io_kiocb *req)
33cc89a9 2396 __must_hold(&req->ctx->completion_lock)
89b263f6 2397 __must_hold(&req->ctx->timeout_lock)
2665abfd 2398{
33cc89a9 2399 struct io_kiocb *link = req->link;
f2f87370 2400
b97e736a 2401 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
c9abd7ad 2402 struct io_timeout_data *io = link->async_data;
7c86ffee 2403
f2f87370 2404 io_remove_next_linked(req);
90cd7e42 2405 link->timeout.head = NULL;
fd9c7bc5 2406 if (hrtimer_try_to_cancel(&io->timer) != -1) {
ef9dd637 2407 list_del(&link->timeout.list);
81ec803b 2408 return link;
c9abd7ad
PB
2409 }
2410 }
81ec803b 2411 return NULL;
7c86ffee
PB
2412}
2413
d148ca4b 2414static void io_fail_links(struct io_kiocb *req)
33cc89a9 2415 __must_hold(&req->ctx->completion_lock)
9e645e11 2416{
33cc89a9 2417 struct io_kiocb *nxt, *link = req->link;
04c76b41 2418 bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES;
9e645e11 2419
f2f87370 2420 req->link = NULL;
f2f87370 2421 while (link) {
a8295b98
HX
2422 long res = -ECANCELED;
2423
2424 if (link->flags & REQ_F_FAIL)
cef216fc 2425 res = link->cqe.res;
a8295b98 2426
f2f87370
PB
2427 nxt = link->link;
2428 link->link = NULL;
2665abfd 2429
cef216fc 2430 trace_io_uring_fail_link(req->ctx, req, req->cqe.user_data,
502c87d6
SR
2431 req->opcode, link);
2432
4e118cd9
PB
2433 if (ignore_cqes)
2434 link->flags |= REQ_F_CQE_SKIP;
2435 else
04c76b41 2436 link->flags &= ~REQ_F_CQE_SKIP;
4e118cd9 2437 __io_req_complete_post(link, res, 0);
f2f87370 2438 link = nxt;
9e645e11 2439 }
33cc89a9 2440}
9e645e11 2441
33cc89a9
PB
2442static bool io_disarm_next(struct io_kiocb *req)
2443 __must_hold(&req->ctx->completion_lock)
2444{
81ec803b 2445 struct io_kiocb *link = NULL;
33cc89a9
PB
2446 bool posted = false;
2447
0756a869 2448 if (req->flags & REQ_F_ARM_LTIMEOUT) {
81ec803b 2449 link = req->link;
906c6caa 2450 req->flags &= ~REQ_F_ARM_LTIMEOUT;
0756a869
PB
2451 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
2452 io_remove_next_linked(req);
4e118cd9 2453 io_req_tw_post_queue(link, -ECANCELED, 0);
0756a869
PB
2454 posted = true;
2455 }
2456 } else if (req->flags & REQ_F_LINK_TIMEOUT) {
89b263f6
JA
2457 struct io_ring_ctx *ctx = req->ctx;
2458
2459 spin_lock_irq(&ctx->timeout_lock);
81ec803b 2460 link = io_disarm_linked_timeout(req);
89b263f6 2461 spin_unlock_irq(&ctx->timeout_lock);
81ec803b
PB
2462 if (link) {
2463 posted = true;
2464 io_req_tw_post_queue(link, -ECANCELED, 0);
2465 }
89b263f6 2466 }
93d2bcd2 2467 if (unlikely((req->flags & REQ_F_FAIL) &&
e4335ed3 2468 !(req->flags & REQ_F_HARDLINK))) {
33cc89a9
PB
2469 posted |= (req->link != NULL);
2470 io_fail_links(req);
2471 }
2472 return posted;
9e645e11
JA
2473}
2474
d81499bf
PB
2475static void __io_req_find_next_prep(struct io_kiocb *req)
2476{
2477 struct io_ring_ctx *ctx = req->ctx;
2478 bool posted;
2479
2480 spin_lock(&ctx->completion_lock);
2481 posted = io_disarm_next(req);
60053be8 2482 io_commit_cqring(ctx);
d81499bf
PB
2483 spin_unlock(&ctx->completion_lock);
2484 if (posted)
2485 io_cqring_ev_posted(ctx);
2486}
2487
2488static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
c69f8dbe 2489{
33cc89a9 2490 struct io_kiocb *nxt;
944e58bf 2491
9e645e11
JA
2492 /*
2493 * If LINK is set, we have dependent requests in this chain. If we
2494 * didn't fail this request, queue the first one up, moving any other
2495 * dependencies to the next request. In case of failure, fail the rest
2496 * of the chain.
2497 */
d81499bf
PB
2498 if (unlikely(req->flags & IO_DISARM_MASK))
2499 __io_req_find_next_prep(req);
33cc89a9
PB
2500 nxt = req->link;
2501 req->link = NULL;
2502 return nxt;
4d7dd462 2503}
9e645e11 2504
f237c30a 2505static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
2c32395d
PB
2506{
2507 if (!ctx)
2508 return;
f237c30a 2509 if (*locked) {
c450178d 2510 io_submit_flush_completions(ctx);
2c32395d 2511 mutex_unlock(&ctx->uring_lock);
f237c30a 2512 *locked = false;
2c32395d
PB
2513 }
2514 percpu_ref_put(&ctx->refs);
2515}
2516
f28c240e
HX
2517static inline void ctx_commit_and_unlock(struct io_ring_ctx *ctx)
2518{
2519 io_commit_cqring(ctx);
2520 spin_unlock(&ctx->completion_lock);
2521 io_cqring_ev_posted(ctx);
2522}
2523
2524static void handle_prev_tw_list(struct io_wq_work_node *node,
2525 struct io_ring_ctx **ctx, bool *uring_locked)
2526{
2527 if (*ctx && !*uring_locked)
2528 spin_lock(&(*ctx)->completion_lock);
2529
2530 do {
2531 struct io_wq_work_node *next = node->next;
2532 struct io_kiocb *req = container_of(node, struct io_kiocb,
2533 io_task_work.node);
2534
34d2bfe7
JA
2535 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
2536
f28c240e
HX
2537 if (req->ctx != *ctx) {
2538 if (unlikely(!*uring_locked && *ctx))
2539 ctx_commit_and_unlock(*ctx);
2540
2541 ctx_flush_and_put(*ctx, uring_locked);
2542 *ctx = req->ctx;
2543 /* if not contended, grab and improve batching */
2544 *uring_locked = mutex_trylock(&(*ctx)->uring_lock);
2545 percpu_ref_get(&(*ctx)->refs);
2546 if (unlikely(!*uring_locked))
2547 spin_lock(&(*ctx)->completion_lock);
2548 }
2549 if (likely(*uring_locked))
2550 req->io_task_work.func(req, uring_locked);
2551 else
cef216fc 2552 __io_req_complete_post(req, req->cqe.res,
cc3cec83 2553 io_put_kbuf_comp(req));
f28c240e
HX
2554 node = next;
2555 } while (node);
2556
2557 if (unlikely(!*uring_locked))
2558 ctx_commit_and_unlock(*ctx);
2559}
2560
2561static void handle_tw_list(struct io_wq_work_node *node,
2562 struct io_ring_ctx **ctx, bool *locked)
9f8d032a
HX
2563{
2564 do {
2565 struct io_wq_work_node *next = node->next;
2566 struct io_kiocb *req = container_of(node, struct io_kiocb,
2567 io_task_work.node);
2568
34d2bfe7
JA
2569 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
2570
9f8d032a
HX
2571 if (req->ctx != *ctx) {
2572 ctx_flush_and_put(*ctx, locked);
2573 *ctx = req->ctx;
2574 /* if not contended, grab and improve batching */
2575 *locked = mutex_trylock(&(*ctx)->uring_lock);
2576 percpu_ref_get(&(*ctx)->refs);
2577 }
2578 req->io_task_work.func(req, locked);
2579 node = next;
2580 } while (node);
2581}
2582
7cbf1722 2583static void tctx_task_work(struct callback_head *cb)
c40f6379 2584{
f28c240e 2585 bool uring_locked = false;
ebd0df2e 2586 struct io_ring_ctx *ctx = NULL;
3f18407d
PB
2587 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
2588 task_work);
c40f6379 2589
16f72070 2590 while (1) {
f28c240e 2591 struct io_wq_work_node *node1, *node2;
3f18407d
PB
2592
2593 spin_lock_irq(&tctx->task_lock);
f28c240e
HX
2594 node1 = tctx->prior_task_list.first;
2595 node2 = tctx->task_list.first;
3f18407d 2596 INIT_WQ_LIST(&tctx->task_list);
f28c240e
HX
2597 INIT_WQ_LIST(&tctx->prior_task_list);
2598 if (!node2 && !node1)
6294f368 2599 tctx->task_running = false;
3f18407d 2600 spin_unlock_irq(&tctx->task_lock);
f28c240e 2601 if (!node2 && !node1)
6294f368 2602 break;
3f18407d 2603
f28c240e
HX
2604 if (node1)
2605 handle_prev_tw_list(node1, &ctx, &uring_locked);
f28c240e
HX
2606 if (node2)
2607 handle_tw_list(node2, &ctx, &uring_locked);
7cbf1722 2608 cond_resched();
68ca8fc0 2609
a6d97a8a
PB
2610 if (data_race(!tctx->task_list.first) &&
2611 data_race(!tctx->prior_task_list.first) && uring_locked)
68ca8fc0 2612 io_submit_flush_completions(ctx);
3f18407d 2613 }
ebd0df2e 2614
f28c240e 2615 ctx_flush_and_put(ctx, &uring_locked);
3cc7fdb9
PB
2616
2617 /* relaxed read is enough as only the task itself sets ->in_idle */
2618 if (unlikely(atomic_read(&tctx->in_idle)))
2619 io_uring_drop_tctx_refs(current);
7cbf1722
JA
2620}
2621
4813c377 2622static void io_req_task_work_add(struct io_kiocb *req, bool priority)
7cbf1722 2623{
c15b79de 2624 struct task_struct *tsk = req->task;
9f010507 2625 struct io_ring_ctx *ctx = req->ctx;
7cbf1722 2626 struct io_uring_task *tctx = tsk->io_uring;
e09ee510 2627 struct io_wq_work_node *node;
0b81e80c 2628 unsigned long flags;
6294f368 2629 bool running;
7cbf1722
JA
2630
2631 WARN_ON_ONCE(!tctx);
2632
d5361233
JA
2633 io_drop_inflight_file(req);
2634
0b81e80c 2635 spin_lock_irqsave(&tctx->task_lock, flags);
4813c377
HX
2636 if (priority)
2637 wq_list_add_tail(&req->io_task_work.node, &tctx->prior_task_list);
2638 else
2639 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
6294f368
PB
2640 running = tctx->task_running;
2641 if (!running)
2642 tctx->task_running = true;
0b81e80c 2643 spin_unlock_irqrestore(&tctx->task_lock, flags);
7cbf1722
JA
2644
2645 /* task_work already pending, we're done */
6294f368 2646 if (running)
e09ee510 2647 return;
7cbf1722 2648
9f010507 2649 if (likely(!task_work_add(tsk, &tctx->task_work, ctx->notify_method)))
e09ee510 2650 return;
2215bed9 2651
0b81e80c 2652 spin_lock_irqsave(&tctx->task_lock, flags);
6294f368 2653 tctx->task_running = false;
4813c377 2654 node = wq_list_merge(&tctx->prior_task_list, &tctx->task_list);
0b81e80c 2655 spin_unlock_irqrestore(&tctx->task_lock, flags);
7cbf1722 2656
e09ee510
PB
2657 while (node) {
2658 req = container_of(node, struct io_kiocb, io_task_work.node);
2659 node = node->next;
2660 if (llist_add(&req->io_task_work.fallback_node,
2661 &req->ctx->fallback_llist))
2662 schedule_delayed_work(&req->ctx->fallback_work, 1);
2663 }
eab30c4d
PB
2664}
2665
4e118cd9
PB
2666static void io_req_tw_post(struct io_kiocb *req, bool *locked)
2667{
2668 io_req_complete_post(req, req->cqe.res, req->cqe.flags);
2669}
2670
2671static void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags)
2672{
2673 req->cqe.res = res;
2674 req->cqe.flags = cflags;
2675 req->io_task_work.func = io_req_tw_post;
2676 io_req_task_work_add(req, false);
2677}
2678
f237c30a 2679static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
c40f6379 2680{
b18a1a45 2681 /* not needed for normal modes, but SQPOLL depends on it */
971cf9c1 2682 io_tw_lock(req->ctx, locked);
cef216fc 2683 io_req_complete_failed(req, req->cqe.res);
c40f6379
JA
2684}
2685
f237c30a 2686static void io_req_task_submit(struct io_kiocb *req, bool *locked)
c40f6379 2687{
971cf9c1 2688 io_tw_lock(req->ctx, locked);
316319e8 2689 /* req->task == current here, checking PF_EXITING is safe */
af066f31 2690 if (likely(!(req->task->flags & PF_EXITING)))
cbc2e203 2691 io_queue_sqe(req);
81b6d05c 2692 else
2593553a 2693 io_req_complete_failed(req, -EFAULT);
c40f6379
JA
2694}
2695
2c4b8eb6 2696static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
c40f6379 2697{
cef216fc 2698 req->cqe.res = ret;
5b0a6acc 2699 req->io_task_work.func = io_req_task_cancel;
4813c377 2700 io_req_task_work_add(req, false);
c40f6379
JA
2701}
2702
2c4b8eb6 2703static void io_req_task_queue(struct io_kiocb *req)
a3df7698 2704{
5b0a6acc 2705 req->io_task_work.func = io_req_task_submit;
4813c377 2706 io_req_task_work_add(req, false);
a3df7698
PB
2707}
2708
773af691
JA
2709static void io_req_task_queue_reissue(struct io_kiocb *req)
2710{
77955efb 2711 req->io_task_work.func = io_queue_iowq;
4813c377 2712 io_req_task_work_add(req, false);
773af691
JA
2713}
2714
57859f4d 2715static void io_queue_next(struct io_kiocb *req)
c69f8dbe 2716{
57859f4d 2717 struct io_kiocb *nxt = io_req_find_next(req);
944e58bf 2718
57859f4d
PB
2719 if (nxt)
2720 io_req_task_queue(nxt);
c69f8dbe
JL
2721}
2722
3aa83bfb 2723static void io_free_batch_list(struct io_ring_ctx *ctx,
1cce17ac 2724 struct io_wq_work_node *node)
3aa83bfb 2725 __must_hold(&ctx->uring_lock)
5af1d13e 2726{
d4b7a5ef 2727 struct task_struct *task = NULL;
37f0e767 2728 int task_refs = 0;
5af1d13e 2729
3aa83bfb
PB
2730 do {
2731 struct io_kiocb *req = container_of(node, struct io_kiocb,
2732 comp_list);
2d6500d4 2733
a538be5b
PB
2734 if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
2735 if (req->flags & REQ_F_REFCOUNT) {
2736 node = req->comp_list.next;
2737 if (!req_ref_put_and_test(req))
2738 continue;
2739 }
b605a7fa
PB
2740 if ((req->flags & REQ_F_POLLED) && req->apoll) {
2741 struct async_poll *apoll = req->apoll;
2742
2743 if (apoll->double_poll)
2744 kfree(apoll->double_poll);
2745 list_add(&apoll->poll.wait.entry,
2746 &ctx->apoll_cache);
2747 req->flags &= ~REQ_F_POLLED;
2748 }
da1a08c5 2749 if (req->flags & IO_REQ_LINK_FLAGS)
57859f4d 2750 io_queue_next(req);
a538be5b
PB
2751 if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
2752 io_clean_op(req);
c1e53a69 2753 }
a538be5b
PB
2754 if (!(req->flags & REQ_F_FIXED_FILE))
2755 io_put_file(req->file);
2d6500d4 2756
ab409402 2757 io_req_put_rsrc_locked(req, ctx);
5af1d13e 2758
d4b7a5ef
PB
2759 if (req->task != task) {
2760 if (task)
2761 io_put_task(task, task_refs);
2762 task = req->task;
2763 task_refs = 0;
2764 }
2765 task_refs++;
c1e53a69 2766 node = req->comp_list.next;
fa05457a 2767 io_req_add_to_cache(req, ctx);
3aa83bfb 2768 } while (node);
d4b7a5ef 2769
d4b7a5ef
PB
2770 if (task)
2771 io_put_task(task, task_refs);
7a743e22
PB
2772}
2773
c450178d 2774static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
a141dd89 2775 __must_hold(&ctx->uring_lock)
905c172f 2776{
6f33b0bc 2777 struct io_wq_work_node *node, *prev;
cd0ca2e0 2778 struct io_submit_state *state = &ctx->submit_state;
905c172f 2779
3d4aeb9f
PB
2780 if (state->flush_cqes) {
2781 spin_lock(&ctx->completion_lock);
2782 wq_list_for_each(node, prev, &state->compl_reqs) {
2783 struct io_kiocb *req = container_of(node, struct io_kiocb,
6f33b0bc 2784 comp_list);
5182ed2e 2785
3d4aeb9f 2786 if (!(req->flags & REQ_F_CQE_SKIP))
90e7c35f 2787 __io_fill_cqe_req_filled(ctx, req);
3d4aeb9f
PB
2788 }
2789
2790 io_commit_cqring(ctx);
2791 spin_unlock(&ctx->completion_lock);
2792 io_cqring_ev_posted(ctx);
2793 state->flush_cqes = false;
905c172f 2794 }
5182ed2e 2795
1cce17ac 2796 io_free_batch_list(ctx, state->compl_reqs.first);
6f33b0bc 2797 INIT_WQ_LIST(&state->compl_reqs);
7a743e22
PB
2798}
2799
ba816ad6
JA
2800/*
2801 * Drop reference to request, return next in chain (if there is one) if this
2802 * was the last reference to this request.
2803 */
0d85035a 2804static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
e65ef56d 2805{
9b5f7bd9
PB
2806 struct io_kiocb *nxt = NULL;
2807
de9b4cca 2808 if (req_ref_put_and_test(req)) {
da1a08c5 2809 if (unlikely(req->flags & IO_REQ_LINK_FLAGS))
7819a1f6 2810 nxt = io_req_find_next(req);
f5c6cf2a 2811 io_free_req(req);
2a44f467 2812 }
9b5f7bd9 2813 return nxt;
2b188cc1
JA
2814}
2815
0d85035a 2816static inline void io_put_req(struct io_kiocb *req)
e65ef56d 2817{
f5c6cf2a
PB
2818 if (req_ref_put_and_test(req)) {
2819 io_queue_next(req);
e65ef56d 2820 io_free_req(req);
f5c6cf2a 2821 }
2b188cc1
JA
2822}
2823
6c503150 2824static unsigned io_cqring_events(struct io_ring_ctx *ctx)
a3a0e43f
JA
2825{
2826 /* See comment at the top of this file */
2827 smp_rmb();
e23de15f 2828 return __io_cqring_events(ctx);
a3a0e43f
JA
2829}
2830
fb5ccc98
PB
2831static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2832{
2833 struct io_rings *rings = ctx->rings;
2834
2835 /* make sure SQ entry isn't read before tail */
2836 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2837}
2838
4c6e277c
JA
2839static inline bool io_run_task_work(void)
2840{
7f62d40d 2841 if (test_thread_flag(TIF_NOTIFY_SIGNAL) || task_work_pending(current)) {
4c6e277c 2842 __set_current_state(TASK_RUNNING);
7c5d8fa6
EB
2843 clear_notify_signal();
2844 if (task_work_pending(current))
2845 task_work_run();
4c6e277c
JA
2846 return true;
2847 }
2848
2849 return false;
bcda7baa
JA
2850}
2851
5ba3c874 2852static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
def596e9 2853{
5eef4e87 2854 struct io_wq_work_node *pos, *start, *prev;
d729cf9a 2855 unsigned int poll_flags = BLK_POLL_NOSLEEP;
b688f11e 2856 DEFINE_IO_COMP_BATCH(iob);
5ba3c874 2857 int nr_events = 0;
def596e9
JA
2858
2859 /*
2860 * Only spin for completions if we don't have multiple devices hanging
87a115fb 2861 * off our complete list.
def596e9 2862 */
87a115fb 2863 if (ctx->poll_multi_queue || force_nonspin)
ef99b2d3 2864 poll_flags |= BLK_POLL_ONESHOT;
def596e9 2865
5eef4e87
PB
2866 wq_list_for_each(pos, start, &ctx->iopoll_list) {
2867 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
9adbd45d 2868 struct kiocb *kiocb = &req->rw.kiocb;
a2416e1e 2869 int ret;
def596e9
JA
2870
2871 /*
581f9810
BM
2872 * Move completed and retryable entries to our local lists.
2873 * If we find a request that requires polling, break out
2874 * and complete those lists first, if we have entries there.
def596e9 2875 */
e3f721e6 2876 if (READ_ONCE(req->iopoll_completed))
def596e9
JA
2877 break;
2878
b688f11e 2879 ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags);
a2416e1e
PB
2880 if (unlikely(ret < 0))
2881 return ret;
2882 else if (ret)
ef99b2d3 2883 poll_flags |= BLK_POLL_ONESHOT;
def596e9 2884
3aadc23e 2885 /* iopoll may have completed current req */
b688f11e
JA
2886 if (!rq_list_empty(iob.req_list) ||
2887 READ_ONCE(req->iopoll_completed))
e3f721e6 2888 break;
def596e9
JA
2889 }
2890
b688f11e
JA
2891 if (!rq_list_empty(iob.req_list))
2892 iob.complete(&iob);
5eef4e87
PB
2893 else if (!pos)
2894 return 0;
def596e9 2895
5eef4e87
PB
2896 prev = start;
2897 wq_list_for_each_resume(pos, prev) {
2898 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
2899
b3fa03fd
PB
2900 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
2901 if (!smp_load_acquire(&req->iopoll_completed))
e3f721e6 2902 break;
c0713540 2903 nr_events++;
83a13a41
PB
2904 if (unlikely(req->flags & REQ_F_CQE_SKIP))
2905 continue;
cef216fc 2906 __io_fill_cqe_req(req, req->cqe.res, io_put_kbuf(req, 0));
e3f721e6 2907 }
def596e9 2908
f5ed3bcd
PB
2909 if (unlikely(!nr_events))
2910 return 0;
2911
2912 io_commit_cqring(ctx);
2913 io_cqring_ev_posted_iopoll(ctx);
1cce17ac 2914 pos = start ? start->next : ctx->iopoll_list.first;
5eef4e87 2915 wq_list_cut(&ctx->iopoll_list, prev, start);
1cce17ac 2916 io_free_batch_list(ctx, pos);
5ba3c874 2917 return nr_events;
def596e9
JA
2918}
2919
def596e9
JA
2920/*
2921 * We can't just wait for polled events to come to us, we have to actively
2922 * find and complete them.
2923 */
c072481d 2924static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
def596e9
JA
2925{
2926 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2927 return;
2928
2929 mutex_lock(&ctx->uring_lock);
5eef4e87 2930 while (!wq_list_empty(&ctx->iopoll_list)) {
b2edc0a7 2931 /* let it sleep and repeat later if can't complete a request */
5ba3c874 2932 if (io_do_iopoll(ctx, true) == 0)
b2edc0a7 2933 break;
08f5439f
JA
2934 /*
2935 * Ensure we allow local-to-the-cpu processing to take place,
2936 * in this case we need to ensure that we reap all events.
3fcee5a6 2937 * Also let task_work, etc. to progress by releasing the mutex
08f5439f 2938 */
3fcee5a6
PB
2939 if (need_resched()) {
2940 mutex_unlock(&ctx->uring_lock);
2941 cond_resched();
2942 mutex_lock(&ctx->uring_lock);
2943 }
def596e9
JA
2944 }
2945 mutex_unlock(&ctx->uring_lock);
2946}
2947
7668b92a 2948static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
def596e9 2949{
7668b92a 2950 unsigned int nr_events = 0;
e9979b36 2951 int ret = 0;
155bc950 2952 unsigned long check_cq;
500f9fba 2953
f39c8a5b
PB
2954 /*
2955 * Don't enter poll loop if we already have events pending.
2956 * If we do, we can potentially be spinning for commands that
2957 * already triggered a CQE (eg in error).
2958 */
155bc950
DY
2959 check_cq = READ_ONCE(ctx->check_cq);
2960 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
f39c8a5b
PB
2961 __io_cqring_overflow_flush(ctx, false);
2962 if (io_cqring_events(ctx))
d487b43c 2963 return 0;
155bc950
DY
2964
2965 /*
2966 * Similarly do not spin if we have not informed the user of any
2967 * dropped CQE.
2968 */
2969 if (unlikely(check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)))
2970 return -EBADR;
2971
def596e9 2972 do {
500f9fba
JA
2973 /*
2974 * If a submit got punted to a workqueue, we can have the
2975 * application entering polling for a command before it gets
2976 * issued. That app will hold the uring_lock for the duration
2977 * of the poll right here, so we need to take a breather every
2978 * now and then to ensure that the issue has a chance to add
2979 * the poll to the issued list. Otherwise we can spin here
2980 * forever, while the workqueue is stuck trying to acquire the
2981 * very same mutex.
2982 */
5eef4e87 2983 if (wq_list_empty(&ctx->iopoll_list)) {
8f487ef2
PB
2984 u32 tail = ctx->cached_cq_tail;
2985
500f9fba 2986 mutex_unlock(&ctx->uring_lock);
4c6e277c 2987 io_run_task_work();
500f9fba 2988 mutex_lock(&ctx->uring_lock);
def596e9 2989
8f487ef2
PB
2990 /* some requests don't go through iopoll_list */
2991 if (tail != ctx->cached_cq_tail ||
5eef4e87 2992 wq_list_empty(&ctx->iopoll_list))
e9979b36 2993 break;
500f9fba 2994 }
5ba3c874
PB
2995 ret = io_do_iopoll(ctx, !min);
2996 if (ret < 0)
2997 break;
2998 nr_events += ret;
2999 ret = 0;
3000 } while (nr_events < min && !need_resched());
d487b43c 3001
def596e9
JA
3002 return ret;
3003}
3004
491381ce 3005static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 3006{
491381ce
JA
3007 /*
3008 * Tell lockdep we inherited freeze protection from submission
3009 * thread.
3010 */
3011 if (req->flags & REQ_F_ISREG) {
1c98679d 3012 struct super_block *sb = file_inode(req->file)->i_sb;
2b188cc1 3013
1c98679d
PB
3014 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
3015 sb_end_write(sb);
2b188cc1
JA
3016 }
3017}
3018
b63534c4 3019#ifdef CONFIG_BLOCK
dc2a6e9a 3020static bool io_resubmit_prep(struct io_kiocb *req)
b63534c4 3021{
ab454438 3022 struct io_async_rw *rw = req->async_data;
b63534c4 3023
d886e185 3024 if (!req_has_async_data(req))
ab454438 3025 return !io_req_prep_async(req);
538941e2 3026 iov_iter_restore(&rw->s.iter, &rw->s.iter_state);
ab454438 3027 return true;
b63534c4 3028}
b63534c4 3029
3e6a0d3c 3030static bool io_rw_should_reissue(struct io_kiocb *req)
b63534c4 3031{
355afaeb 3032 umode_t mode = file_inode(req->file)->i_mode;
3e6a0d3c 3033 struct io_ring_ctx *ctx = req->ctx;
b63534c4 3034
355afaeb
JA
3035 if (!S_ISBLK(mode) && !S_ISREG(mode))
3036 return false;
3e6a0d3c
JA
3037 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
3038 !(ctx->flags & IORING_SETUP_IOPOLL)))
b63534c4 3039 return false;
7c977a58
JA
3040 /*
3041 * If ref is dying, we might be running poll reap from the exit work.
3042 * Don't attempt to reissue from that path, just let it fail with
3043 * -EAGAIN.
3044 */
3e6a0d3c
JA
3045 if (percpu_ref_is_dying(&ctx->refs))
3046 return false;
ef046888
JA
3047 /*
3048 * Play it safe and assume not safe to re-import and reissue if we're
3049 * not in the original thread group (or in task context).
3050 */
3051 if (!same_thread_group(req->task, current) || !in_task())
3052 return false;
3e6a0d3c
JA
3053 return true;
3054}
e82ad485 3055#else
a1ff1e3f 3056static bool io_resubmit_prep(struct io_kiocb *req)
e82ad485
JA
3057{
3058 return false;
3059}
e82ad485 3060static bool io_rw_should_reissue(struct io_kiocb *req)
3e6a0d3c 3061{
b63534c4
JA
3062 return false;
3063}
3e6a0d3c 3064#endif
b63534c4 3065
8ef12efe 3066static bool __io_complete_rw_common(struct io_kiocb *req, long res)
a1d7c393 3067{
f63cf519 3068 if (req->rw.kiocb.ki_flags & IOCB_WRITE) {
b65c128f 3069 kiocb_end_write(req);
f63cf519
JA
3070 fsnotify_modify(req->file);
3071 } else {
3072 fsnotify_access(req->file);
3073 }
cef216fc 3074 if (unlikely(res != req->cqe.res)) {
9532b99b
PB
3075 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
3076 io_rw_should_reissue(req)) {
3077 req->flags |= REQ_F_REISSUE;
8ef12efe 3078 return true;
9532b99b 3079 }
93d2bcd2 3080 req_set_fail(req);
cef216fc 3081 req->cqe.res = res;
9532b99b 3082 }
8ef12efe
JA
3083 return false;
3084}
3085
cc8e9ba7 3086static inline void io_req_task_complete(struct io_kiocb *req, bool *locked)
8ef12efe 3087{
cef216fc 3088 int res = req->cqe.res;
126180b9
PB
3089
3090 if (*locked) {
cc3cec83 3091 io_req_complete_state(req, res, io_put_kbuf(req, 0));
fff4e40e 3092 io_req_add_compl_list(req);
126180b9 3093 } else {
cc3cec83
JA
3094 io_req_complete_post(req, res,
3095 io_put_kbuf(req, IO_URING_F_UNLOCKED));
126180b9 3096 }
8ef12efe
JA
3097}
3098
00f6e68b 3099static void __io_complete_rw(struct io_kiocb *req, long res,
8ef12efe
JA
3100 unsigned int issue_flags)
3101{
3102 if (__io_complete_rw_common(req, res))
3103 return;
cef216fc 3104 __io_req_complete(req, issue_flags, req->cqe.res,
cc3cec83 3105 io_put_kbuf(req, issue_flags));
ba816ad6
JA
3106}
3107
6b19b766 3108static void io_complete_rw(struct kiocb *kiocb, long res)
ba816ad6 3109{
9adbd45d 3110 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6 3111
8ef12efe
JA
3112 if (__io_complete_rw_common(req, res))
3113 return;
cef216fc 3114 req->cqe.res = res;
8ef12efe 3115 req->io_task_work.func = io_req_task_complete;
f28c240e 3116 io_req_task_work_add(req, !!(req->ctx->flags & IORING_SETUP_SQPOLL));
2b188cc1
JA
3117}
3118
6b19b766 3119static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
def596e9 3120{
9adbd45d 3121 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 3122
491381ce
JA
3123 if (kiocb->ki_flags & IOCB_WRITE)
3124 kiocb_end_write(req);
cef216fc 3125 if (unlikely(res != req->cqe.res)) {
b66ceaf3
PB
3126 if (res == -EAGAIN && io_rw_should_reissue(req)) {
3127 req->flags |= REQ_F_REISSUE;
3128 return;
9532b99b 3129 }
cef216fc 3130 req->cqe.res = res;
8c130827 3131 }
bbde017a 3132
b3fa03fd
PB
3133 /* order with io_iopoll_complete() checking ->iopoll_completed */
3134 smp_store_release(&req->iopoll_completed, 1);
def596e9
JA
3135}
3136
3137/*
3138 * After the iocb has been issued, it's safe to be found on the poll list.
3139 * Adding the kiocb to the list AFTER submission ensures that we don't
f39c8a5b 3140 * find it from a io_do_iopoll() thread before the issuer is done
def596e9
JA
3141 * accessing the kiocb cookie.
3142 */
9882131c 3143static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
def596e9
JA
3144{
3145 struct io_ring_ctx *ctx = req->ctx;
3b44b371 3146 const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
cb3d8972
PB
3147
3148 /* workqueue context doesn't hold uring_lock, grab it now */
3b44b371 3149 if (unlikely(needs_lock))
cb3d8972 3150 mutex_lock(&ctx->uring_lock);
def596e9
JA
3151
3152 /*
3153 * Track whether we have multiple files in our lists. This will impact
3154 * how we do polling eventually, not spinning if we're on potentially
3155 * different devices.
3156 */
5eef4e87 3157 if (wq_list_empty(&ctx->iopoll_list)) {
915b3dde
HX
3158 ctx->poll_multi_queue = false;
3159 } else if (!ctx->poll_multi_queue) {
def596e9
JA
3160 struct io_kiocb *list_req;
3161
5eef4e87
PB
3162 list_req = container_of(ctx->iopoll_list.first, struct io_kiocb,
3163 comp_list);
30da1b45 3164 if (list_req->file != req->file)
915b3dde 3165 ctx->poll_multi_queue = true;
def596e9
JA
3166 }
3167
3168 /*
3169 * For fast devices, IO may have already completed. If it has, add
3170 * it to the front so we find it first.
3171 */
65a6543d 3172 if (READ_ONCE(req->iopoll_completed))
5eef4e87 3173 wq_list_add_head(&req->comp_list, &ctx->iopoll_list);
def596e9 3174 else
5eef4e87 3175 wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
bdcd3eab 3176
3b44b371 3177 if (unlikely(needs_lock)) {
cb3d8972
PB
3178 /*
3179 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
3180 * in sq thread task context or in io worker task context. If
3181 * current task context is sq thread, we don't need to check
3182 * whether should wake up sq thread.
3183 */
3184 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
3185 wq_has_sleeper(&ctx->sq_data->wait))
3186 wake_up(&ctx->sq_data->wait);
3187
3188 mutex_unlock(&ctx->uring_lock);
3189 }
def596e9
JA
3190}
3191
4503b767
JA
3192static bool io_bdev_nowait(struct block_device *bdev)
3193{
9ba0d0c8 3194 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
4503b767
JA
3195}
3196
2b188cc1
JA
3197/*
3198 * If we tracked the file through the SCM inflight mechanism, we could support
3199 * any file. For now, just ensure that anything potentially problematic is done
3200 * inline.
3201 */
88459b50 3202static bool __io_file_supports_nowait(struct file *file, umode_t mode)
2b188cc1 3203{
4503b767 3204 if (S_ISBLK(mode)) {
4e7b5671
CH
3205 if (IS_ENABLED(CONFIG_BLOCK) &&
3206 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
4503b767
JA
3207 return true;
3208 return false;
3209 }
976517f1 3210 if (S_ISSOCK(mode))
2b188cc1 3211 return true;
4503b767 3212 if (S_ISREG(mode)) {
4e7b5671
CH
3213 if (IS_ENABLED(CONFIG_BLOCK) &&
3214 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
4503b767
JA
3215 file->f_op != &io_uring_fops)
3216 return true;
3217 return false;
3218 }
2b188cc1 3219
c5b85625
JA
3220 /* any ->read/write should understand O_NONBLOCK */
3221 if (file->f_flags & O_NONBLOCK)
3222 return true;
35645ac3 3223 return file->f_mode & FMODE_NOWAIT;
2b188cc1 3224}
c5b85625 3225
88459b50
PB
3226/*
3227 * If we tracked the file through the SCM inflight mechanism, we could support
3228 * any file. For now, just ensure that anything potentially problematic is done
3229 * inline.
3230 */
3231static unsigned int io_file_get_flags(struct file *file)
3232{
3233 umode_t mode = file_inode(file)->i_mode;
3234 unsigned int res = 0;
af197f50 3235
88459b50
PB
3236 if (S_ISREG(mode))
3237 res |= FFS_ISREG;
3238 if (__io_file_supports_nowait(file, mode))
3239 res |= FFS_NOWAIT;
5e45690a
JA
3240 if (io_file_need_scm(file))
3241 res |= FFS_SCM;
88459b50 3242 return res;
2b188cc1
JA
3243}
3244
35645ac3 3245static inline bool io_file_supports_nowait(struct io_kiocb *req)
7b29f92d 3246{
88459b50 3247 return req->flags & REQ_F_SUPPORT_NOWAIT;
7b29f92d
JA
3248}
3249
b9a6b8f9 3250static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2b188cc1 3251{
9adbd45d 3252 struct kiocb *kiocb = &req->rw.kiocb;
09bb8394
JA
3253 unsigned ioprio;
3254 int ret;
2b188cc1 3255
2b188cc1 3256 kiocb->ki_pos = READ_ONCE(sqe->off);
9adbd45d 3257
fb27274a
PB
3258 ioprio = READ_ONCE(sqe->ioprio);
3259 if (ioprio) {
3260 ret = ioprio_check_cap(ioprio);
3261 if (ret)
3262 return ret;
3263
3264 kiocb->ki_ioprio = ioprio;
3265 } else {
3266 kiocb->ki_ioprio = get_current_ioprio();
eae071c9
PB
3267 }
3268
578c0ee2 3269 req->imu = NULL;
3529d8c2
JA
3270 req->rw.addr = READ_ONCE(sqe->addr);
3271 req->rw.len = READ_ONCE(sqe->len);
584b0180 3272 req->rw.flags = READ_ONCE(sqe->rw_flags);
4f4eeba8 3273 req->buf_index = READ_ONCE(sqe->buf_index);
2b188cc1 3274 return 0;
2b188cc1
JA
3275}
3276
3277static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
3278{
3279 switch (ret) {
3280 case -EIOCBQUEUED:
3281 break;
3282 case -ERESTARTSYS:
3283 case -ERESTARTNOINTR:
3284 case -ERESTARTNOHAND:
3285 case -ERESTART_RESTARTBLOCK:
3286 /*
3287 * We can't just restart the syscall, since previously
3288 * submitted sqes may already be in progress. Just fail this
3289 * IO with EINTR.
3290 */
3291 ret = -EINTR;
df561f66 3292 fallthrough;
2b188cc1 3293 default:
6b19b766 3294 kiocb->ki_complete(kiocb, ret);
2b188cc1
JA
3295 }
3296}
3297
b4aec400 3298static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
d34e1e5b
DY
3299{
3300 struct kiocb *kiocb = &req->rw.kiocb;
3301
6f83ab22
JA
3302 if (kiocb->ki_pos != -1)
3303 return &kiocb->ki_pos;
3304
3305 if (!(req->file->f_mode & FMODE_STREAM)) {
3306 req->flags |= REQ_F_CUR_POS;
3307 kiocb->ki_pos = req->file->f_pos;
3308 return &kiocb->ki_pos;
d34e1e5b 3309 }
6f83ab22
JA
3310
3311 kiocb->ki_pos = 0;
3312 return NULL;
d34e1e5b
DY
3313}
3314
2ea537ca 3315static void kiocb_done(struct io_kiocb *req, ssize_t ret,
889fca73 3316 unsigned int issue_flags)
ba816ad6 3317{
e8c2bc1f 3318 struct io_async_rw *io = req->async_data;
ba04291e 3319
227c0c96 3320 /* add previously done IO, if any */
d886e185 3321 if (req_has_async_data(req) && io->bytes_done > 0) {
227c0c96 3322 if (ret < 0)
e8c2bc1f 3323 ret = io->bytes_done;
227c0c96 3324 else
e8c2bc1f 3325 ret += io->bytes_done;
227c0c96
JA
3326 }
3327
ba04291e 3328 if (req->flags & REQ_F_CUR_POS)
2ea537ca
PB
3329 req->file->f_pos = req->rw.kiocb.ki_pos;
3330 if (ret >= 0 && (req->rw.kiocb.ki_complete == io_complete_rw))
00f6e68b 3331 __io_complete_rw(req, ret, issue_flags);
ba816ad6 3332 else
2ea537ca 3333 io_rw_done(&req->rw.kiocb, ret);
97284637 3334
b66ceaf3 3335 if (req->flags & REQ_F_REISSUE) {
97284637 3336 req->flags &= ~REQ_F_REISSUE;
b91ef187 3337 if (io_resubmit_prep(req))
773af691 3338 io_req_task_queue_reissue(req);
b91ef187
PB
3339 else
3340 io_req_task_queue_fail(req, ret);
97284637 3341 }
ba816ad6
JA
3342}
3343
eae071c9
PB
3344static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
3345 struct io_mapped_ubuf *imu)
edafccee 3346{
9adbd45d 3347 size_t len = req->rw.len;
75769e3f 3348 u64 buf_end, buf_addr = req->rw.addr;
edafccee 3349 size_t offset;
edafccee 3350
75769e3f 3351 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
edafccee
JA
3352 return -EFAULT;
3353 /* not inside the mapped region */
4751f53d 3354 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
edafccee
JA
3355 return -EFAULT;
3356
3357 /*
3358 * May not be a start of buffer, set size appropriately
3359 * and advance us to the beginning.
3360 */
3361 offset = buf_addr - imu->ubuf;
3362 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
3363
3364 if (offset) {
3365 /*
3366 * Don't use iov_iter_advance() here, as it's really slow for
3367 * using the latter parts of a big fixed buffer - it iterates
3368 * over each segment manually. We can cheat a bit here, because
3369 * we know that:
3370 *
3371 * 1) it's a BVEC iter, we set it up
3372 * 2) all bvecs are PAGE_SIZE in size, except potentially the
3373 * first and last bvec
3374 *
3375 * So just find our index, and adjust the iterator afterwards.
3376 * If the offset is within the first bvec (or the whole first
3377 * bvec, just use iov_iter_advance(). This makes it easier
3378 * since we can just skip the first segment, which may not
3379 * be PAGE_SIZE aligned.
3380 */
3381 const struct bio_vec *bvec = imu->bvec;
3382
3383 if (offset <= bvec->bv_len) {
3384 iov_iter_advance(iter, offset);
3385 } else {
3386 unsigned long seg_skip;
3387
3388 /* skip first vec */
3389 offset -= bvec->bv_len;
3390 seg_skip = 1 + (offset >> PAGE_SHIFT);
3391
3392 iter->bvec = bvec + seg_skip;
3393 iter->nr_segs -= seg_skip;
99c79f66 3394 iter->count -= bvec->bv_len + offset;
bd11b3a3 3395 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
3396 }
3397 }
3398
847595de 3399 return 0;
edafccee
JA
3400}
3401
5106dd6e
JA
3402static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
3403 unsigned int issue_flags)
eae071c9 3404{
eae071c9
PB
3405 struct io_mapped_ubuf *imu = req->imu;
3406 u16 index, buf_index = req->buf_index;
3407
3408 if (likely(!imu)) {
578c0ee2
PB
3409 struct io_ring_ctx *ctx = req->ctx;
3410
eae071c9
PB
3411 if (unlikely(buf_index >= ctx->nr_user_bufs))
3412 return -EFAULT;
5106dd6e 3413 io_req_set_rsrc_node(req, ctx, issue_flags);
eae071c9
PB
3414 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
3415 imu = READ_ONCE(ctx->user_bufs[index]);
3416 req->imu = imu;
3417 }
3418 return __io_import_fixed(req, rw, iter, imu);
3419}
3420
dbc7d452
JA
3421static void io_buffer_add_list(struct io_ring_ctx *ctx,
3422 struct io_buffer_list *bl, unsigned int bgid)
3423{
3424 struct list_head *list;
3425
3426 list = &ctx->io_buffers[hash_32(bgid, IO_BUFFERS_HASH_BITS)];
3427 INIT_LIST_HEAD(&bl->buf_list);
3428 bl->bgid = bgid;
3429 list_add(&bl->list, list);
3430}
3431
bcda7baa 3432static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
51aac424 3433 int bgid, unsigned int issue_flags)
bcda7baa 3434{
30d51dd4 3435 struct io_buffer *kbuf = req->kbuf;
dbc7d452
JA
3436 struct io_ring_ctx *ctx = req->ctx;
3437 struct io_buffer_list *bl;
bcda7baa
JA
3438
3439 if (req->flags & REQ_F_BUFFER_SELECTED)
3440 return kbuf;
3441
f8929630 3442 io_ring_submit_lock(req->ctx, issue_flags);
bcda7baa 3443
dbc7d452
JA
3444 bl = io_buffer_get_list(ctx, bgid);
3445 if (bl && !list_empty(&bl->buf_list)) {
3446 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
3447 list_del(&kbuf->list);
bcda7baa
JA
3448 if (*len > kbuf->len)
3449 *len = kbuf->len;
30d51dd4
PB
3450 req->flags |= REQ_F_BUFFER_SELECTED;
3451 req->kbuf = kbuf;
bcda7baa
JA
3452 } else {
3453 kbuf = ERR_PTR(-ENOBUFS);
3454 }
3455
f8929630 3456 io_ring_submit_unlock(req->ctx, issue_flags);
bcda7baa
JA
3457 return kbuf;
3458}
3459
4d954c25 3460static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
51aac424 3461 unsigned int issue_flags)
4d954c25
JA
3462{
3463 struct io_buffer *kbuf;
4f4eeba8 3464 u16 bgid;
4d954c25 3465
4f4eeba8 3466 bgid = req->buf_index;
51aac424 3467 kbuf = io_buffer_select(req, len, bgid, issue_flags);
4d954c25
JA
3468 if (IS_ERR(kbuf))
3469 return kbuf;
4d954c25
JA
3470 return u64_to_user_ptr(kbuf->addr);
3471}
3472
3473#ifdef CONFIG_COMPAT
3474static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
51aac424 3475 unsigned int issue_flags)
4d954c25
JA
3476{
3477 struct compat_iovec __user *uiov;
3478 compat_ssize_t clen;
3479 void __user *buf;
3480 ssize_t len;
3481
3482 uiov = u64_to_user_ptr(req->rw.addr);
3483 if (!access_ok(uiov, sizeof(*uiov)))
3484 return -EFAULT;
3485 if (__get_user(clen, &uiov->iov_len))
3486 return -EFAULT;
3487 if (clen < 0)
3488 return -EINVAL;
3489
3490 len = clen;
51aac424 3491 buf = io_rw_buffer_select(req, &len, issue_flags);
4d954c25
JA
3492 if (IS_ERR(buf))
3493 return PTR_ERR(buf);
3494 iov[0].iov_base = buf;
3495 iov[0].iov_len = (compat_size_t) len;
3496 return 0;
3497}
3498#endif
3499
3500static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
51aac424 3501 unsigned int issue_flags)
4d954c25
JA
3502{
3503 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3504 void __user *buf;
3505 ssize_t len;
3506
3507 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3508 return -EFAULT;
3509
3510 len = iov[0].iov_len;
3511 if (len < 0)
3512 return -EINVAL;
51aac424 3513 buf = io_rw_buffer_select(req, &len, issue_flags);
4d954c25
JA
3514 if (IS_ERR(buf))
3515 return PTR_ERR(buf);
3516 iov[0].iov_base = buf;
3517 iov[0].iov_len = len;
3518 return 0;
3519}
3520
3521static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
51aac424 3522 unsigned int issue_flags)
4d954c25 3523{
dddb3e26 3524 if (req->flags & REQ_F_BUFFER_SELECTED) {
30d51dd4 3525 struct io_buffer *kbuf = req->kbuf;
dddb3e26 3526
dddb3e26
JA
3527 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3528 iov[0].iov_len = kbuf->len;
4d954c25 3529 return 0;
dddb3e26 3530 }
dd201662 3531 if (req->rw.len != 1)
4d954c25
JA
3532 return -EINVAL;
3533
3534#ifdef CONFIG_COMPAT
3535 if (req->ctx->compat)
51aac424 3536 return io_compat_import(req, iov, issue_flags);
4d954c25
JA
3537#endif
3538
51aac424 3539 return __io_iov_buffer_select(req, iov, issue_flags);
4d954c25
JA
3540}
3541
caa8fe6e
PB
3542static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
3543 struct io_rw_state *s,
3544 unsigned int issue_flags)
2b188cc1 3545{
5e49c973 3546 struct iov_iter *iter = &s->iter;
847595de 3547 u8 opcode = req->opcode;
caa8fe6e 3548 struct iovec *iovec;
d1d681b0
PB
3549 void __user *buf;
3550 size_t sqe_len;
4d954c25 3551 ssize_t ret;
edafccee 3552
f3251183 3553 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
5106dd6e 3554 ret = io_import_fixed(req, rw, iter, issue_flags);
f3251183
PB
3555 if (ret)
3556 return ERR_PTR(ret);
3557 return NULL;
3558 }
2b188cc1 3559
bcda7baa 3560 /* buffer index only valid with fixed read/write, or buffer select */
d1d681b0 3561 if (unlikely(req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)))
caa8fe6e 3562 return ERR_PTR(-EINVAL);
9adbd45d 3563
d1d681b0
PB
3564 buf = u64_to_user_ptr(req->rw.addr);
3565 sqe_len = req->rw.len;
9adbd45d 3566
3a6820f2 3567 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
bcda7baa 3568 if (req->flags & REQ_F_BUFFER_SELECT) {
51aac424 3569 buf = io_rw_buffer_select(req, &sqe_len, issue_flags);
867a23ea 3570 if (IS_ERR(buf))
898df244 3571 return ERR_CAST(buf);
3f9d6441 3572 req->rw.len = sqe_len;
bcda7baa
JA
3573 }
3574
5e49c973 3575 ret = import_single_range(rw, buf, sqe_len, s->fast_iov, iter);
f3251183
PB
3576 if (ret)
3577 return ERR_PTR(ret);
3578 return NULL;
3a6820f2
JA
3579 }
3580
caa8fe6e 3581 iovec = s->fast_iov;
4d954c25 3582 if (req->flags & REQ_F_BUFFER_SELECT) {
caa8fe6e 3583 ret = io_iov_buffer_select(req, iovec, issue_flags);
f3251183
PB
3584 if (ret)
3585 return ERR_PTR(ret);
3586 iov_iter_init(iter, rw, iovec, 1, iovec->iov_len);
3587 return NULL;
4d954c25
JA
3588 }
3589
caa8fe6e 3590 ret = __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
89cd35c5 3591 req->ctx->compat);
caa8fe6e
PB
3592 if (unlikely(ret < 0))
3593 return ERR_PTR(ret);
3594 return iovec;
2b188cc1
JA
3595}
3596
5e49c973
PB
3597static inline int io_import_iovec(int rw, struct io_kiocb *req,
3598 struct iovec **iovec, struct io_rw_state *s,
3599 unsigned int issue_flags)
3600{
caa8fe6e
PB
3601 *iovec = __io_import_iovec(rw, req, s, issue_flags);
3602 if (unlikely(IS_ERR(*iovec)))
3603 return PTR_ERR(*iovec);
5e49c973 3604
5e49c973 3605 iov_iter_save_state(&s->iter, &s->iter_state);
caa8fe6e 3606 return 0;
2b188cc1
JA
3607}
3608
0fef9483
JA
3609static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3610{
5b09e37e 3611 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
0fef9483
JA
3612}
3613
31b51510 3614/*
32960613
JA
3615 * For files that don't have ->read_iter() and ->write_iter(), handle them
3616 * by looping over ->read() or ->write() manually.
31b51510 3617 */
4017eb91 3618static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
32960613 3619{
4017eb91
JA
3620 struct kiocb *kiocb = &req->rw.kiocb;
3621 struct file *file = req->file;
32960613 3622 ssize_t ret = 0;
af9c45ec 3623 loff_t *ppos;
32960613
JA
3624
3625 /*
3626 * Don't support polled IO through this interface, and we can't
3627 * support non-blocking either. For the latter, this just causes
3628 * the kiocb to be handled from an async context.
3629 */
3630 if (kiocb->ki_flags & IOCB_HIPRI)
3631 return -EOPNOTSUPP;
35645ac3
PB
3632 if ((kiocb->ki_flags & IOCB_NOWAIT) &&
3633 !(kiocb->ki_filp->f_flags & O_NONBLOCK))
32960613
JA
3634 return -EAGAIN;
3635
af9c45ec
DY
3636 ppos = io_kiocb_ppos(kiocb);
3637
32960613 3638 while (iov_iter_count(iter)) {
311ae9e1 3639 struct iovec iovec;
32960613
JA
3640 ssize_t nr;
3641
311ae9e1
PB
3642 if (!iov_iter_is_bvec(iter)) {
3643 iovec = iov_iter_iovec(iter);
3644 } else {
4017eb91
JA
3645 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3646 iovec.iov_len = req->rw.len;
311ae9e1
PB
3647 }
3648
32960613
JA
3649 if (rw == READ) {
3650 nr = file->f_op->read(file, iovec.iov_base,
af9c45ec 3651 iovec.iov_len, ppos);
32960613
JA
3652 } else {
3653 nr = file->f_op->write(file, iovec.iov_base,
af9c45ec 3654 iovec.iov_len, ppos);
32960613
JA
3655 }
3656
3657 if (nr < 0) {
3658 if (!ret)
3659 ret = nr;
3660 break;
3661 }
5e929367 3662 ret += nr;
16c8d2df
JA
3663 if (!iov_iter_is_bvec(iter)) {
3664 iov_iter_advance(iter, nr);
3665 } else {
16c8d2df 3666 req->rw.addr += nr;
5e929367
JA
3667 req->rw.len -= nr;
3668 if (!req->rw.len)
3669 break;
16c8d2df 3670 }
32960613
JA
3671 if (nr != iovec.iov_len)
3672 break;
32960613
JA
3673 }
3674
3675 return ret;
3676}
3677
ff6165b2
JA
3678static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3679 const struct iovec *fast_iov, struct iov_iter *iter)
f67676d1 3680{
e8c2bc1f 3681 struct io_async_rw *rw = req->async_data;
b64e3444 3682
538941e2 3683 memcpy(&rw->s.iter, iter, sizeof(*iter));
afb87658 3684 rw->free_iovec = iovec;
227c0c96 3685 rw->bytes_done = 0;
ff6165b2 3686 /* can only be fixed buffers, no need to do anything */
9c3a205c 3687 if (iov_iter_is_bvec(iter))
ff6165b2 3688 return;
b64e3444 3689 if (!iovec) {
ff6165b2
JA
3690 unsigned iov_off = 0;
3691
538941e2 3692 rw->s.iter.iov = rw->s.fast_iov;
ff6165b2
JA
3693 if (iter->iov != fast_iov) {
3694 iov_off = iter->iov - fast_iov;
538941e2 3695 rw->s.iter.iov += iov_off;
ff6165b2 3696 }
538941e2
PB
3697 if (rw->s.fast_iov != fast_iov)
3698 memcpy(rw->s.fast_iov + iov_off, fast_iov + iov_off,
45097dae 3699 sizeof(struct iovec) * iter->nr_segs);
99bc4c38
PB
3700 } else {
3701 req->flags |= REQ_F_NEED_CLEANUP;
f67676d1
JA
3702 }
3703}
3704
8d4af685 3705static inline bool io_alloc_async_data(struct io_kiocb *req)
3d9932a8 3706{
e8c2bc1f
JA
3707 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3708 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
d886e185
PB
3709 if (req->async_data) {
3710 req->flags |= REQ_F_ASYNC_DATA;
3711 return false;
3712 }
3713 return true;
3d9932a8
XW
3714}
3715
ff6165b2 3716static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
c88598a9 3717 struct io_rw_state *s, bool force)
b7bb4f7d 3718{
26f0505a 3719 if (!force && !io_op_defs[req->opcode].needs_async_setup)
74566df3 3720 return 0;
d886e185 3721 if (!req_has_async_data(req)) {
cd658695
JA
3722 struct io_async_rw *iorw;
3723
6cb78689 3724 if (io_alloc_async_data(req)) {
6bf985dc 3725 kfree(iovec);
5d204bcf 3726 return -ENOMEM;
6bf985dc 3727 }
b7bb4f7d 3728
c88598a9 3729 io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
cd658695
JA
3730 iorw = req->async_data;
3731 /* we've copied and mapped the iter, ensure state is saved */
538941e2 3732 iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
5d204bcf 3733 }
b7bb4f7d 3734 return 0;
f67676d1
JA
3735}
3736
73debe68 3737static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
c3e330a4 3738{
e8c2bc1f 3739 struct io_async_rw *iorw = req->async_data;
5e49c973 3740 struct iovec *iov;
847595de 3741 int ret;
c3e330a4 3742
51aac424 3743 /* submission path, ->uring_lock should already be taken */
3b44b371 3744 ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
c3e330a4
PB
3745 if (unlikely(ret < 0))
3746 return ret;
3747
ab0b196c
PB
3748 iorw->bytes_done = 0;
3749 iorw->free_iovec = iov;
3750 if (iov)
3751 req->flags |= REQ_F_NEED_CLEANUP;
c3e330a4
PB
3752 return 0;
3753}
3754
c1dd91d1 3755/*
ffdc8dab 3756 * This is our waitqueue callback handler, registered through __folio_lock_async()
c1dd91d1
JA
3757 * when we initially tried to do the IO with the iocb armed our waitqueue.
3758 * This gets called when the page is unlocked, and we generally expect that to
3759 * happen when the page IO is completed and the page is now uptodate. This will
3760 * queue a task_work based retry of the operation, attempting to copy the data
3761 * again. If the latter fails because the page was NOT uptodate, then we will
3762 * do a thread based blocking retry of the operation. That's the unexpected
3763 * slow path.
3764 */
bcf5a063
JA
3765static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3766 int sync, void *arg)
3767{
3768 struct wait_page_queue *wpq;
3769 struct io_kiocb *req = wait->private;
bcf5a063 3770 struct wait_page_key *key = arg;
bcf5a063
JA
3771
3772 wpq = container_of(wait, struct wait_page_queue, wait);
3773
cdc8fcb4
LT
3774 if (!wake_page_match(wpq, key))
3775 return 0;
3776
c8d317aa 3777 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
bcf5a063 3778 list_del_init(&wait->entry);
921b9054 3779 io_req_task_queue(req);
bcf5a063
JA
3780 return 1;
3781}
3782
c1dd91d1
JA
3783/*
3784 * This controls whether a given IO request should be armed for async page
3785 * based retry. If we return false here, the request is handed to the async
3786 * worker threads for retry. If we're doing buffered reads on a regular file,
3787 * we prepare a private wait_page_queue entry and retry the operation. This
3788 * will either succeed because the page is now uptodate and unlocked, or it
3789 * will register a callback when the page is unlocked at IO completion. Through
3790 * that callback, io_uring uses task_work to setup a retry of the operation.
3791 * That retry will attempt the buffered read again. The retry will generally
3792 * succeed, or in rare cases where it fails, we then fall back to using the
3793 * async worker threads for a blocking retry.
3794 */
227c0c96 3795static bool io_rw_should_retry(struct io_kiocb *req)
f67676d1 3796{
e8c2bc1f
JA
3797 struct io_async_rw *rw = req->async_data;
3798 struct wait_page_queue *wait = &rw->wpq;
bcf5a063 3799 struct kiocb *kiocb = &req->rw.kiocb;
f67676d1 3800
bcf5a063
JA
3801 /* never retry for NOWAIT, we just complete with -EAGAIN */
3802 if (req->flags & REQ_F_NOWAIT)
3803 return false;
f67676d1 3804
227c0c96 3805 /* Only for buffered IO */
3b2a4439 3806 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
bcf5a063 3807 return false;
3b2a4439 3808
bcf5a063
JA
3809 /*
3810 * just use poll if we can, and don't attempt if the fs doesn't
3811 * support callback based unlocks
3812 */
3813 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3814 return false;
f67676d1 3815
3b2a4439
JA
3816 wait->wait.func = io_async_buf_func;
3817 wait->wait.private = req;
3818 wait->wait.flags = 0;
3819 INIT_LIST_HEAD(&wait->wait.entry);
3820 kiocb->ki_flags |= IOCB_WAITQ;
c8d317aa 3821 kiocb->ki_flags &= ~IOCB_NOWAIT;
3b2a4439 3822 kiocb->ki_waitq = wait;
3b2a4439 3823 return true;
bcf5a063
JA
3824}
3825
aeab9506 3826static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
bcf5a063 3827{
607b6fb8 3828 if (likely(req->file->f_op->read_iter))
bcf5a063 3829 return call_read_iter(req->file, &req->rw.kiocb, iter);
2dd2111d 3830 else if (req->file->f_op->read)
4017eb91 3831 return loop_rw_iter(READ, req, iter);
2dd2111d
GH
3832 else
3833 return -EINVAL;
f67676d1
JA
3834}
3835
7db30437
ML
3836static bool need_read_all(struct io_kiocb *req)
3837{
3838 return req->flags & REQ_F_ISREG ||
3839 S_ISBLK(file_inode(req->file)->i_mode);
3840}
3841
584b0180
JA
3842static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
3843{
3844 struct kiocb *kiocb = &req->rw.kiocb;
3845 struct io_ring_ctx *ctx = req->ctx;
3846 struct file *file = req->file;
3847 int ret;
3848
3849 if (unlikely(!file || !(file->f_mode & mode)))
3850 return -EBADF;
3851
3852 if (!io_req_ffs_set(req))
3853 req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
3854
3855 kiocb->ki_flags = iocb_flags(file);
3856 ret = kiocb_set_rw_flags(kiocb, req->rw.flags);
3857 if (unlikely(ret))
3858 return ret;
3859
3860 /*
3861 * If the file is marked O_NONBLOCK, still allow retry for it if it
3862 * supports async. Otherwise it's impossible to use O_NONBLOCK files
3863 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
3864 */
3865 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
3866 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
3867 req->flags |= REQ_F_NOWAIT;
3868
3869 if (ctx->flags & IORING_SETUP_IOPOLL) {
3870 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
3871 return -EOPNOTSUPP;
3872
3873 kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
3874 kiocb->ki_complete = io_complete_rw_iopoll;
3875 req->iopoll_completed = 0;
3876 } else {
3877 if (kiocb->ki_flags & IOCB_HIPRI)
3878 return -EINVAL;
3879 kiocb->ki_complete = io_complete_rw;
3880 }
3881
3882 return 0;
3883}
3884
889fca73 3885static int io_read(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1 3886{
607b6fb8 3887 struct io_rw_state __s, *s = &__s;
c88598a9 3888 struct iovec *iovec;
9adbd45d 3889 struct kiocb *kiocb = &req->rw.kiocb;
45d189c6 3890 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
d886e185 3891 struct io_async_rw *rw;
cd658695 3892 ssize_t ret, ret2;
b4aec400 3893 loff_t *ppos;
ff6165b2 3894
607b6fb8
PB
3895 if (!req_has_async_data(req)) {
3896 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
3897 if (unlikely(ret < 0))
3898 return ret;
3899 } else {
2be2eb02
JA
3900 /*
3901 * Safe and required to re-import if we're using provided
3902 * buffers, as we dropped the selected one before retry.
3903 */
3904 if (req->flags & REQ_F_BUFFER_SELECT) {
3905 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
3906 if (unlikely(ret < 0))
3907 return ret;
3908 }
3909
d886e185 3910 rw = req->async_data;
c88598a9 3911 s = &rw->s;
cd658695
JA
3912 /*
3913 * We come here from an earlier attempt, restore our state to
3914 * match in case it doesn't. It's cheap enough that we don't
3915 * need to make this conditional.
3916 */
c88598a9 3917 iov_iter_restore(&s->iter, &s->iter_state);
2846c481 3918 iovec = NULL;
2846c481 3919 }
584b0180 3920 ret = io_rw_init_file(req, FMODE_READ);
323b190b
JA
3921 if (unlikely(ret)) {
3922 kfree(iovec);
584b0180 3923 return ret;
323b190b 3924 }
cef216fc 3925 req->cqe.res = iov_iter_count(&s->iter);
2b188cc1 3926
607b6fb8
PB
3927 if (force_nonblock) {
3928 /* If the file doesn't support async, just async punt */
35645ac3 3929 if (unlikely(!io_file_supports_nowait(req))) {
607b6fb8
PB
3930 ret = io_setup_async_rw(req, iovec, s, true);
3931 return ret ?: -EAGAIN;
3932 }
a88fc400 3933 kiocb->ki_flags |= IOCB_NOWAIT;
607b6fb8
PB
3934 } else {
3935 /* Ensure we clear previously set non-block flag */
3936 kiocb->ki_flags &= ~IOCB_NOWAIT;
6713e7a6 3937 }
9e645e11 3938
b4aec400 3939 ppos = io_kiocb_update_pos(req);
d34e1e5b 3940
cef216fc 3941 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
5ea5dd45
PB
3942 if (unlikely(ret)) {
3943 kfree(iovec);
3944 return ret;
3945 }
2b188cc1 3946
c88598a9 3947 ret = io_iter_do_read(req, &s->iter);
32960613 3948
230d50d4 3949 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
6ad7f233 3950 req->flags &= ~REQ_F_REISSUE;
9af177ee
JA
3951 /* if we can poll, just do that */
3952 if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
3953 return -EAGAIN;
eefdf30f
JA
3954 /* IOPOLL retry should happen for io-wq threads */
3955 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
f91daf56 3956 goto done;
75c668cd
PB
3957 /* no retry on NONBLOCK nor RWF_NOWAIT */
3958 if (req->flags & REQ_F_NOWAIT)
355afaeb 3959 goto done;
f38c7e3a 3960 ret = 0;
230d50d4
JA
3961 } else if (ret == -EIOCBQUEUED) {
3962 goto out_free;
cef216fc 3963 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
7db30437 3964 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
7335e3bf 3965 /* read all, failed, already did sync or don't want to retry */
00d23d51 3966 goto done;
227c0c96
JA
3967 }
3968
cd658695
JA
3969 /*
3970 * Don't depend on the iter state matching what was consumed, or being
3971 * untouched in case of error. Restore it and we'll advance it
3972 * manually if we need to.
3973 */
c88598a9 3974 iov_iter_restore(&s->iter, &s->iter_state);
cd658695 3975
c88598a9 3976 ret2 = io_setup_async_rw(req, iovec, s, true);
6bf985dc
PB
3977 if (ret2)
3978 return ret2;
3979
fe1cdd55 3980 iovec = NULL;
e8c2bc1f 3981 rw = req->async_data;
c88598a9 3982 s = &rw->s;
cd658695
JA
3983 /*
3984 * Now use our persistent iterator and state, if we aren't already.
3985 * We've restored and mapped the iter to match.
3986 */
227c0c96 3987
b23df91b 3988 do {
cd658695
JA
3989 /*
3990 * We end up here because of a partial read, either from
3991 * above or inside this loop. Advance the iter by the bytes
3992 * that were consumed.
3993 */
c88598a9
PB
3994 iov_iter_advance(&s->iter, ret);
3995 if (!iov_iter_count(&s->iter))
cd658695 3996 break;
b23df91b 3997 rw->bytes_done += ret;
c88598a9 3998 iov_iter_save_state(&s->iter, &s->iter_state);
cd658695 3999
b23df91b
PB
4000 /* if we can retry, do so with the callbacks armed */
4001 if (!io_rw_should_retry(req)) {
4002 kiocb->ki_flags &= ~IOCB_WAITQ;
4003 return -EAGAIN;
4004 }
4005
4006 /*
4007 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
4008 * we get -EIOCBQUEUED, then we'll get a notification when the
4009 * desired page gets unlocked. We can also get a partial read
4010 * here, and if we do, then just retry at the new offset.
4011 */
c88598a9 4012 ret = io_iter_do_read(req, &s->iter);
b23df91b
PB
4013 if (ret == -EIOCBQUEUED)
4014 return 0;
227c0c96 4015 /* we got some bytes, but not all. retry. */
b5b0ecb7 4016 kiocb->ki_flags &= ~IOCB_WAITQ;
c88598a9 4017 iov_iter_restore(&s->iter, &s->iter_state);
cd658695 4018 } while (ret > 0);
227c0c96 4019done:
2ea537ca 4020 kiocb_done(req, ret, issue_flags);
fe1cdd55
PB
4021out_free:
4022 /* it's faster to check here then delegate to kfree */
4023 if (iovec)
4024 kfree(iovec);
5ea5dd45 4025 return 0;
2b188cc1
JA
4026}
4027
889fca73 4028static int io_write(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1 4029{
607b6fb8 4030 struct io_rw_state __s, *s = &__s;
c88598a9 4031 struct iovec *iovec;
9adbd45d 4032 struct kiocb *kiocb = &req->rw.kiocb;
45d189c6 4033 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
cd658695 4034 ssize_t ret, ret2;
b4aec400 4035 loff_t *ppos;
2b188cc1 4036
607b6fb8 4037 if (!req_has_async_data(req)) {
5e49c973
PB
4038 ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
4039 if (unlikely(ret < 0))
2846c481 4040 return ret;
607b6fb8
PB
4041 } else {
4042 struct io_async_rw *rw = req->async_data;
4043
4044 s = &rw->s;
4045 iov_iter_restore(&s->iter, &s->iter_state);
2846c481 4046 iovec = NULL;
2846c481 4047 }
584b0180 4048 ret = io_rw_init_file(req, FMODE_WRITE);
323b190b
JA
4049 if (unlikely(ret)) {
4050 kfree(iovec);
584b0180 4051 return ret;
323b190b 4052 }
cef216fc 4053 req->cqe.res = iov_iter_count(&s->iter);
2b188cc1 4054
607b6fb8
PB
4055 if (force_nonblock) {
4056 /* If the file doesn't support async, just async punt */
35645ac3 4057 if (unlikely(!io_file_supports_nowait(req)))
607b6fb8 4058 goto copy_iov;
fd6c2e4c 4059
607b6fb8
PB
4060 /* file path doesn't support NOWAIT for non-direct_IO */
4061 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
4062 (req->flags & REQ_F_ISREG))
4063 goto copy_iov;
31b51510 4064
607b6fb8
PB
4065 kiocb->ki_flags |= IOCB_NOWAIT;
4066 } else {
4067 /* Ensure we clear previously set non-block flag */
4068 kiocb->ki_flags &= ~IOCB_NOWAIT;
4069 }
31b51510 4070
b4aec400 4071 ppos = io_kiocb_update_pos(req);
d34e1e5b 4072
cef216fc 4073 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
fa15bafb
PB
4074 if (unlikely(ret))
4075 goto out_free;
4ed734b0 4076
fa15bafb
PB
4077 /*
4078 * Open-code file_start_write here to grab freeze protection,
4079 * which will be released by another thread in
4080 * io_complete_rw(). Fool lockdep by telling it the lock got
4081 * released so that it doesn't complain about the held lock when
4082 * we return to userspace.
4083 */
4084 if (req->flags & REQ_F_ISREG) {
8a3c84b6 4085 sb_start_write(file_inode(req->file)->i_sb);
fa15bafb
PB
4086 __sb_writers_release(file_inode(req->file)->i_sb,
4087 SB_FREEZE_WRITE);
4088 }
4089 kiocb->ki_flags |= IOCB_WRITE;
4ed734b0 4090
35645ac3 4091 if (likely(req->file->f_op->write_iter))
c88598a9 4092 ret2 = call_write_iter(req->file, kiocb, &s->iter);
2dd2111d 4093 else if (req->file->f_op->write)
c88598a9 4094 ret2 = loop_rw_iter(WRITE, req, &s->iter);
2dd2111d
GH
4095 else
4096 ret2 = -EINVAL;
4ed734b0 4097
6ad7f233
PB
4098 if (req->flags & REQ_F_REISSUE) {
4099 req->flags &= ~REQ_F_REISSUE;
230d50d4 4100 ret2 = -EAGAIN;
6ad7f233 4101 }
230d50d4 4102
fa15bafb
PB
4103 /*
4104 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
4105 * retry them without IOCB_NOWAIT.
4106 */
4107 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
4108 ret2 = -EAGAIN;
75c668cd
PB
4109 /* no retry on NONBLOCK nor RWF_NOWAIT */
4110 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
355afaeb 4111 goto done;
fa15bafb 4112 if (!force_nonblock || ret2 != -EAGAIN) {
eefdf30f 4113 /* IOPOLL retry should happen for io-wq threads */
b10841c9 4114 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
eefdf30f 4115 goto copy_iov;
355afaeb 4116done:
2ea537ca 4117 kiocb_done(req, ret2, issue_flags);
fa15bafb 4118 } else {
f67676d1 4119copy_iov:
c88598a9
PB
4120 iov_iter_restore(&s->iter, &s->iter_state);
4121 ret = io_setup_async_rw(req, iovec, s, false);
6bf985dc 4122 return ret ?: -EAGAIN;
2b188cc1 4123 }
31b51510 4124out_free:
f261c168 4125 /* it's reportedly faster than delegating the null check to kfree() */
252917c3 4126 if (iovec)
6f2cc166 4127 kfree(iovec);
2b188cc1
JA
4128 return ret;
4129}
4130
80a261fd
JA
4131static int io_renameat_prep(struct io_kiocb *req,
4132 const struct io_uring_sqe *sqe)
4133{
4134 struct io_rename *ren = &req->rename;
4135 const char __user *oldf, *newf;
4136
ed7eb259
JA
4137 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4138 return -EINVAL;
26578cda 4139 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
ed7eb259 4140 return -EINVAL;
80a261fd
JA
4141 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4142 return -EBADF;
4143
4144 ren->old_dfd = READ_ONCE(sqe->fd);
4145 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
4146 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4147 ren->new_dfd = READ_ONCE(sqe->len);
4148 ren->flags = READ_ONCE(sqe->rename_flags);
4149
4150 ren->oldpath = getname(oldf);
4151 if (IS_ERR(ren->oldpath))
4152 return PTR_ERR(ren->oldpath);
4153
4154 ren->newpath = getname(newf);
4155 if (IS_ERR(ren->newpath)) {
4156 putname(ren->oldpath);
4157 return PTR_ERR(ren->newpath);
4158 }
4159
4160 req->flags |= REQ_F_NEED_CLEANUP;
4161 return 0;
4162}
4163
45d189c6 4164static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
80a261fd
JA
4165{
4166 struct io_rename *ren = &req->rename;
4167 int ret;
4168
45d189c6 4169 if (issue_flags & IO_URING_F_NONBLOCK)
80a261fd
JA
4170 return -EAGAIN;
4171
4172 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
4173 ren->newpath, ren->flags);
4174
4175 req->flags &= ~REQ_F_NEED_CLEANUP;
4176 if (ret < 0)
93d2bcd2 4177 req_set_fail(req);
80a261fd
JA
4178 io_req_complete(req, ret);
4179 return 0;
4180}
4181
14a1143b
JA
4182static int io_unlinkat_prep(struct io_kiocb *req,
4183 const struct io_uring_sqe *sqe)
4184{
4185 struct io_unlink *un = &req->unlink;
4186 const char __user *fname;
4187
22634bc5
JA
4188 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4189 return -EINVAL;
26578cda
PB
4190 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
4191 sqe->splice_fd_in)
22634bc5 4192 return -EINVAL;
14a1143b
JA
4193 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4194 return -EBADF;
4195
4196 un->dfd = READ_ONCE(sqe->fd);
4197
4198 un->flags = READ_ONCE(sqe->unlink_flags);
4199 if (un->flags & ~AT_REMOVEDIR)
4200 return -EINVAL;
4201
4202 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
4203 un->filename = getname(fname);
4204 if (IS_ERR(un->filename))
4205 return PTR_ERR(un->filename);
4206
4207 req->flags |= REQ_F_NEED_CLEANUP;
4208 return 0;
4209}
4210
45d189c6 4211static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
14a1143b
JA
4212{
4213 struct io_unlink *un = &req->unlink;
4214 int ret;
4215
45d189c6 4216 if (issue_flags & IO_URING_F_NONBLOCK)
14a1143b
JA
4217 return -EAGAIN;
4218
4219 if (un->flags & AT_REMOVEDIR)
4220 ret = do_rmdir(un->dfd, un->filename);
4221 else
4222 ret = do_unlinkat(un->dfd, un->filename);
4223
4224 req->flags &= ~REQ_F_NEED_CLEANUP;
4225 if (ret < 0)
93d2bcd2 4226 req_set_fail(req);
14a1143b
JA
4227 io_req_complete(req, ret);
4228 return 0;
4229}
4230
e34a02dc
DK
4231static int io_mkdirat_prep(struct io_kiocb *req,
4232 const struct io_uring_sqe *sqe)
4233{
4234 struct io_mkdir *mkd = &req->mkdir;
4235 const char __user *fname;
4236
4237 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4238 return -EINVAL;
4239 if (sqe->ioprio || sqe->off || sqe->rw_flags || sqe->buf_index ||
4240 sqe->splice_fd_in)
4241 return -EINVAL;
4242 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4243 return -EBADF;
4244
4245 mkd->dfd = READ_ONCE(sqe->fd);
4246 mkd->mode = READ_ONCE(sqe->len);
4247
4248 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
4249 mkd->filename = getname(fname);
4250 if (IS_ERR(mkd->filename))
4251 return PTR_ERR(mkd->filename);
4252
4253 req->flags |= REQ_F_NEED_CLEANUP;
4254 return 0;
4255}
4256
04f34081 4257static int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags)
e34a02dc
DK
4258{
4259 struct io_mkdir *mkd = &req->mkdir;
4260 int ret;
4261
4262 if (issue_flags & IO_URING_F_NONBLOCK)
4263 return -EAGAIN;
4264
4265 ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
4266
4267 req->flags &= ~REQ_F_NEED_CLEANUP;
4268 if (ret < 0)
4269 req_set_fail(req);
4270 io_req_complete(req, ret);
4271 return 0;
4272}
4273
7a8721f8
DK
4274static int io_symlinkat_prep(struct io_kiocb *req,
4275 const struct io_uring_sqe *sqe)
4276{
4277 struct io_symlink *sl = &req->symlink;
4278 const char __user *oldpath, *newpath;
4279
4280 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4281 return -EINVAL;
4282 if (sqe->ioprio || sqe->len || sqe->rw_flags || sqe->buf_index ||
4283 sqe->splice_fd_in)
4284 return -EINVAL;
4285 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4286 return -EBADF;
4287
4288 sl->new_dfd = READ_ONCE(sqe->fd);
4289 oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
4290 newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4291
4292 sl->oldpath = getname(oldpath);
4293 if (IS_ERR(sl->oldpath))
4294 return PTR_ERR(sl->oldpath);
4295
4296 sl->newpath = getname(newpath);
4297 if (IS_ERR(sl->newpath)) {
4298 putname(sl->oldpath);
4299 return PTR_ERR(sl->newpath);
4300 }
4301
4302 req->flags |= REQ_F_NEED_CLEANUP;
4303 return 0;
4304}
4305
04f34081 4306static int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags)
7a8721f8
DK
4307{
4308 struct io_symlink *sl = &req->symlink;
4309 int ret;
4310
4311 if (issue_flags & IO_URING_F_NONBLOCK)
4312 return -EAGAIN;
4313
4314 ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
4315
4316 req->flags &= ~REQ_F_NEED_CLEANUP;
4317 if (ret < 0)
4318 req_set_fail(req);
4319 io_req_complete(req, ret);
4320 return 0;
4321}
4322
cf30da90
DK
4323static int io_linkat_prep(struct io_kiocb *req,
4324 const struct io_uring_sqe *sqe)
4325{
4326 struct io_hardlink *lnk = &req->hardlink;
4327 const char __user *oldf, *newf;
4328
4329 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4330 return -EINVAL;
4331 if (sqe->ioprio || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
4332 return -EINVAL;
4333 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4334 return -EBADF;
4335
4336 lnk->old_dfd = READ_ONCE(sqe->fd);
4337 lnk->new_dfd = READ_ONCE(sqe->len);
4338 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
4339 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4340 lnk->flags = READ_ONCE(sqe->hardlink_flags);
4341
4342 lnk->oldpath = getname(oldf);
4343 if (IS_ERR(lnk->oldpath))
4344 return PTR_ERR(lnk->oldpath);
4345
4346 lnk->newpath = getname(newf);
4347 if (IS_ERR(lnk->newpath)) {
4348 putname(lnk->oldpath);
4349 return PTR_ERR(lnk->newpath);
4350 }
4351
4352 req->flags |= REQ_F_NEED_CLEANUP;
4353 return 0;
4354}
4355
04f34081 4356static int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
cf30da90
DK
4357{
4358 struct io_hardlink *lnk = &req->hardlink;
4359 int ret;
4360
4361 if (issue_flags & IO_URING_F_NONBLOCK)
4362 return -EAGAIN;
4363
4364 ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
4365 lnk->newpath, lnk->flags);
4366
4367 req->flags &= ~REQ_F_NEED_CLEANUP;
4368 if (ret < 0)
4369 req_set_fail(req);
4370 io_req_complete(req, ret);
4371 return 0;
4372}
4373
36f4fa68
JA
4374static int io_shutdown_prep(struct io_kiocb *req,
4375 const struct io_uring_sqe *sqe)
4376{
4377#if defined(CONFIG_NET)
4378 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4379 return -EINVAL;
26578cda
PB
4380 if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
4381 sqe->buf_index || sqe->splice_fd_in))
36f4fa68
JA
4382 return -EINVAL;
4383
4384 req->shutdown.how = READ_ONCE(sqe->len);
4385 return 0;
4386#else
4387 return -EOPNOTSUPP;
4388#endif
4389}
4390
45d189c6 4391static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
36f4fa68
JA
4392{
4393#if defined(CONFIG_NET)
4394 struct socket *sock;
4395 int ret;
4396
45d189c6 4397 if (issue_flags & IO_URING_F_NONBLOCK)
36f4fa68
JA
4398 return -EAGAIN;
4399
48aba79b 4400 sock = sock_from_file(req->file);
36f4fa68 4401 if (unlikely(!sock))
48aba79b 4402 return -ENOTSOCK;
36f4fa68
JA
4403
4404 ret = __sys_shutdown_sock(sock, req->shutdown.how);
a146468d 4405 if (ret < 0)
93d2bcd2 4406 req_set_fail(req);
36f4fa68
JA
4407 io_req_complete(req, ret);
4408 return 0;
4409#else
4410 return -EOPNOTSUPP;
4411#endif
4412}
4413
f2a8d5c7
PB
4414static int __io_splice_prep(struct io_kiocb *req,
4415 const struct io_uring_sqe *sqe)
7d67af2c 4416{
fe7e3257 4417 struct io_splice *sp = &req->splice;
7d67af2c 4418 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
7d67af2c 4419
3232dd02
PB
4420 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4421 return -EINVAL;
7d67af2c 4422
7d67af2c
PB
4423 sp->len = READ_ONCE(sqe->len);
4424 sp->flags = READ_ONCE(sqe->splice_flags);
7d67af2c
PB
4425 if (unlikely(sp->flags & ~valid_flags))
4426 return -EINVAL;
a3e4bc23 4427 sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
7d67af2c
PB
4428 return 0;
4429}
4430
f2a8d5c7
PB
4431static int io_tee_prep(struct io_kiocb *req,
4432 const struct io_uring_sqe *sqe)
4433{
4434 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
4435 return -EINVAL;
4436 return __io_splice_prep(req, sqe);
4437}
4438
45d189c6 4439static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
f2a8d5c7
PB
4440{
4441 struct io_splice *sp = &req->splice;
f2a8d5c7
PB
4442 struct file *out = sp->file_out;
4443 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
a3e4bc23 4444 struct file *in;
f2a8d5c7
PB
4445 long ret = 0;
4446
45d189c6 4447 if (issue_flags & IO_URING_F_NONBLOCK)
f2a8d5c7 4448 return -EAGAIN;
a3e4bc23 4449
5106dd6e 4450 if (sp->flags & SPLICE_F_FD_IN_FIXED)
e9419766 4451 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
5106dd6e
JA
4452 else
4453 in = io_file_get_normal(req, sp->splice_fd_in);
a3e4bc23
JA
4454 if (!in) {
4455 ret = -EBADF;
4456 goto done;
4457 }
4458
f2a8d5c7
PB
4459 if (sp->len)
4460 ret = do_tee(in, out, sp->len, flags);
4461
e1d767f0
PB
4462 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
4463 io_put_file(in);
a3e4bc23 4464done:
f2a8d5c7 4465 if (ret != sp->len)
93d2bcd2 4466 req_set_fail(req);
e1e16097 4467 io_req_complete(req, ret);
f2a8d5c7
PB
4468 return 0;
4469}
4470
4471static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4472{
fe7e3257 4473 struct io_splice *sp = &req->splice;
f2a8d5c7
PB
4474
4475 sp->off_in = READ_ONCE(sqe->splice_off_in);
4476 sp->off_out = READ_ONCE(sqe->off);
4477 return __io_splice_prep(req, sqe);
4478}
4479
45d189c6 4480static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
7d67af2c
PB
4481{
4482 struct io_splice *sp = &req->splice;
7d67af2c
PB
4483 struct file *out = sp->file_out;
4484 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
4485 loff_t *poff_in, *poff_out;
a3e4bc23 4486 struct file *in;
c9687426 4487 long ret = 0;
7d67af2c 4488
45d189c6 4489 if (issue_flags & IO_URING_F_NONBLOCK)
2fb3e822 4490 return -EAGAIN;
7d67af2c 4491
5106dd6e 4492 if (sp->flags & SPLICE_F_FD_IN_FIXED)
e9419766 4493 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
5106dd6e
JA
4494 else
4495 in = io_file_get_normal(req, sp->splice_fd_in);
a3e4bc23
JA
4496 if (!in) {
4497 ret = -EBADF;
4498 goto done;
4499 }
4500
7d67af2c
PB
4501 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
4502 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
c9687426 4503
948a7749 4504 if (sp->len)
c9687426 4505 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
7d67af2c 4506
e1d767f0
PB
4507 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
4508 io_put_file(in);
a3e4bc23 4509done:
7d67af2c 4510 if (ret != sp->len)
93d2bcd2 4511 req_set_fail(req);
e1e16097 4512 io_req_complete(req, ret);
7d67af2c
PB
4513 return 0;
4514}
4515
2b188cc1
JA
4516/*
4517 * IORING_OP_NOP just posts a completion event, nothing else.
4518 */
889fca73 4519static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1
JA
4520{
4521 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 4522
def596e9
JA
4523 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4524 return -EINVAL;
4525
889fca73 4526 __io_req_complete(req, issue_flags, 0, 0);
2b188cc1
JA
4527 return 0;
4528}
4529
4f57f06c
JA
4530static int io_msg_ring_prep(struct io_kiocb *req,
4531 const struct io_uring_sqe *sqe)
4532{
f3b6a41e
JA
4533 if (unlikely(sqe->addr || sqe->ioprio || sqe->rw_flags ||
4534 sqe->splice_fd_in || sqe->buf_index || sqe->personality))
4f57f06c
JA
4535 return -EINVAL;
4536
4f57f06c
JA
4537 req->msg.user_data = READ_ONCE(sqe->off);
4538 req->msg.len = READ_ONCE(sqe->len);
4539 return 0;
4540}
4541
4542static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
4543{
4544 struct io_ring_ctx *target_ctx;
4545 struct io_msg *msg = &req->msg;
4f57f06c 4546 bool filled;
3f1d52ab 4547 int ret;
4f57f06c 4548
3f1d52ab
JA
4549 ret = -EBADFD;
4550 if (req->file->f_op != &io_uring_fops)
4551 goto done;
4f57f06c 4552
3f1d52ab 4553 ret = -EOVERFLOW;
4f57f06c
JA
4554 target_ctx = req->file->private_data;
4555
4556 spin_lock(&target_ctx->completion_lock);
7ef66d18 4557 filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, 0);
4f57f06c
JA
4558 io_commit_cqring(target_ctx);
4559 spin_unlock(&target_ctx->completion_lock);
4560
4561 if (filled) {
4562 io_cqring_ev_posted(target_ctx);
4563 ret = 0;
4564 }
4565
3f1d52ab 4566done:
9666d420
JA
4567 if (ret < 0)
4568 req_set_fail(req);
4f57f06c
JA
4569 __io_req_complete(req, issue_flags, ret, 0);
4570 return 0;
4571}
4572
1155c76a 4573static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 4574{
6b06314c 4575 struct io_ring_ctx *ctx = req->ctx;
c992fe29 4576
6b06314c 4577 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 4578 return -EINVAL;
26578cda
PB
4579 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
4580 sqe->splice_fd_in))
c992fe29
CH
4581 return -EINVAL;
4582
8ed8d3c3
JA
4583 req->sync.flags = READ_ONCE(sqe->fsync_flags);
4584 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
4585 return -EINVAL;
4586
4587 req->sync.off = READ_ONCE(sqe->off);
4588 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
4589 return 0;
4590}
4591
45d189c6 4592static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3 4593{
8ed8d3c3 4594 loff_t end = req->sync.off + req->sync.len;
8ed8d3c3
JA
4595 int ret;
4596
ac45abc0 4597 /* fsync always requires a blocking context */
45d189c6 4598 if (issue_flags & IO_URING_F_NONBLOCK)
ac45abc0
PB
4599 return -EAGAIN;
4600
9adbd45d 4601 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
4602 end > 0 ? end : LLONG_MAX,
4603 req->sync.flags & IORING_FSYNC_DATASYNC);
4604 if (ret < 0)
93d2bcd2 4605 req_set_fail(req);
e1e16097 4606 io_req_complete(req, ret);
c992fe29
CH
4607 return 0;
4608}
4609
d63d1b5e
JA
4610static int io_fallocate_prep(struct io_kiocb *req,
4611 const struct io_uring_sqe *sqe)
4612{
26578cda
PB
4613 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
4614 sqe->splice_fd_in)
d63d1b5e 4615 return -EINVAL;
3232dd02
PB
4616 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4617 return -EINVAL;
d63d1b5e
JA
4618
4619 req->sync.off = READ_ONCE(sqe->off);
4620 req->sync.len = READ_ONCE(sqe->addr);
4621 req->sync.mode = READ_ONCE(sqe->len);
4622 return 0;
4623}
4624
45d189c6 4625static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
5d17b4a4 4626{
ac45abc0
PB
4627 int ret;
4628
d63d1b5e 4629 /* fallocate always requiring blocking context */
45d189c6 4630 if (issue_flags & IO_URING_F_NONBLOCK)
5d17b4a4 4631 return -EAGAIN;
ac45abc0
PB
4632 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
4633 req->sync.len);
ac45abc0 4634 if (ret < 0)
93d2bcd2 4635 req_set_fail(req);
f63cf519
JA
4636 else
4637 fsnotify_modify(req->file);
e1e16097 4638 io_req_complete(req, ret);
5d17b4a4
JA
4639 return 0;
4640}
4641
ec65fea5 4642static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 4643{
f8748881 4644 const char __user *fname;
15b71abe 4645 int ret;
b7bb4f7d 4646
d3fddf6d
PB
4647 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4648 return -EINVAL;
b9445598 4649 if (unlikely(sqe->ioprio || sqe->buf_index))
15b71abe 4650 return -EINVAL;
ec65fea5 4651 if (unlikely(req->flags & REQ_F_FIXED_FILE))
cf3040ca 4652 return -EBADF;
03b1230c 4653
ec65fea5
PB
4654 /* open.how should be already initialised */
4655 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
08a1d26e 4656 req->open.how.flags |= O_LARGEFILE;
3529d8c2 4657
25e72d10
PB
4658 req->open.dfd = READ_ONCE(sqe->fd);
4659 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
f8748881 4660 req->open.filename = getname(fname);
15b71abe
JA
4661 if (IS_ERR(req->open.filename)) {
4662 ret = PTR_ERR(req->open.filename);
4663 req->open.filename = NULL;
4664 return ret;
4665 }
b9445598
PB
4666
4667 req->open.file_slot = READ_ONCE(sqe->file_index);
4668 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC))
4669 return -EINVAL;
4670
4022e7af 4671 req->open.nofile = rlimit(RLIMIT_NOFILE);
8fef80bf 4672 req->flags |= REQ_F_NEED_CLEANUP;
15b71abe 4673 return 0;
03b1230c
JA
4674}
4675
ec65fea5
PB
4676static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4677{
d3fddf6d
PB
4678 u64 mode = READ_ONCE(sqe->len);
4679 u64 flags = READ_ONCE(sqe->open_flags);
ec65fea5 4680
ec65fea5
PB
4681 req->open.how = build_open_how(flags, mode);
4682 return __io_openat_prep(req, sqe);
4683}
4684
cebdb986 4685static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 4686{
cebdb986 4687 struct open_how __user *how;
cebdb986 4688 size_t len;
0fa03c62
JA
4689 int ret;
4690
cebdb986
JA
4691 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4692 len = READ_ONCE(sqe->len);
cebdb986
JA
4693 if (len < OPEN_HOW_SIZE_VER0)
4694 return -EINVAL;
3529d8c2 4695
cebdb986
JA
4696 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
4697 len);
4698 if (ret)
4699 return ret;
3529d8c2 4700
ec65fea5 4701 return __io_openat_prep(req, sqe);
cebdb986
JA
4702}
4703
45d189c6 4704static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
15b71abe
JA
4705{
4706 struct open_flags op;
15b71abe 4707 struct file *file;
b9445598
PB
4708 bool resolve_nonblock, nonblock_set;
4709 bool fixed = !!req->open.file_slot;
15b71abe
JA
4710 int ret;
4711
cebdb986 4712 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
4713 if (ret)
4714 goto err;
3a81fd02
JA
4715 nonblock_set = op.open_flag & O_NONBLOCK;
4716 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
45d189c6 4717 if (issue_flags & IO_URING_F_NONBLOCK) {
3a81fd02
JA
4718 /*
4719 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
4720 * it'll always -EAGAIN
4721 */
4722 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
4723 return -EAGAIN;
4724 op.lookup_flags |= LOOKUP_CACHED;
4725 op.open_flag |= O_NONBLOCK;
4726 }
15b71abe 4727
b9445598
PB
4728 if (!fixed) {
4729 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
4730 if (ret < 0)
4731 goto err;
4732 }
15b71abe
JA
4733
4734 file = do_filp_open(req->open.dfd, req->open.filename, &op);
12dcb58a 4735 if (IS_ERR(file)) {
944d1444 4736 /*
12dcb58a
PB
4737 * We could hang on to this 'fd' on retrying, but seems like
4738 * marginal gain for something that is now known to be a slower
4739 * path. So just put it, and we'll get a new one when we retry.
944d1444 4740 */
b9445598
PB
4741 if (!fixed)
4742 put_unused_fd(ret);
3a81fd02 4743
15b71abe 4744 ret = PTR_ERR(file);
12dcb58a
PB
4745 /* only retry if RESOLVE_CACHED wasn't already set by application */
4746 if (ret == -EAGAIN &&
4747 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
4748 return -EAGAIN;
4749 goto err;
15b71abe 4750 }
12dcb58a
PB
4751
4752 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
4753 file->f_flags &= ~O_NONBLOCK;
4754 fsnotify_open(file);
b9445598
PB
4755
4756 if (!fixed)
4757 fd_install(ret, file);
4758 else
4759 ret = io_install_fixed_file(req, file, issue_flags,
4760 req->open.file_slot - 1);
15b71abe
JA
4761err:
4762 putname(req->open.filename);
8fef80bf 4763 req->flags &= ~REQ_F_NEED_CLEANUP;
15b71abe 4764 if (ret < 0)
93d2bcd2 4765 req_set_fail(req);
0bdf3398 4766 __io_req_complete(req, issue_flags, ret, 0);
15b71abe
JA
4767 return 0;
4768}
4769
45d189c6 4770static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
cebdb986 4771{
e45cff58 4772 return io_openat2(req, issue_flags);
cebdb986
JA
4773}
4774
067524e9
JA
4775static int io_remove_buffers_prep(struct io_kiocb *req,
4776 const struct io_uring_sqe *sqe)
4777{
4778 struct io_provide_buf *p = &req->pbuf;
4779 u64 tmp;
4780
26578cda
PB
4781 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
4782 sqe->splice_fd_in)
067524e9
JA
4783 return -EINVAL;
4784
4785 tmp = READ_ONCE(sqe->fd);
4786 if (!tmp || tmp > USHRT_MAX)
4787 return -EINVAL;
4788
4789 memset(p, 0, sizeof(*p));
4790 p->nbufs = tmp;
4791 p->bgid = READ_ONCE(sqe->buf_group);
4792 return 0;
4793}
4794
dbc7d452
JA
4795static int __io_remove_buffers(struct io_ring_ctx *ctx,
4796 struct io_buffer_list *bl, unsigned nbufs)
067524e9
JA
4797{
4798 unsigned i = 0;
4799
4800 /* shouldn't happen */
4801 if (!nbufs)
4802 return 0;
4803
4804 /* the head kbuf is the list itself */
dbc7d452 4805 while (!list_empty(&bl->buf_list)) {
067524e9
JA
4806 struct io_buffer *nxt;
4807
dbc7d452 4808 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
067524e9 4809 list_del(&nxt->list);
067524e9
JA
4810 if (++i == nbufs)
4811 return i;
1d0254e6 4812 cond_resched();
067524e9
JA
4813 }
4814 i++;
067524e9
JA
4815
4816 return i;
4817}
4818
889fca73 4819static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
067524e9
JA
4820{
4821 struct io_provide_buf *p = &req->pbuf;
4822 struct io_ring_ctx *ctx = req->ctx;
dbc7d452 4823 struct io_buffer_list *bl;
067524e9
JA
4824 int ret = 0;
4825
f8929630 4826 io_ring_submit_lock(ctx, issue_flags);
067524e9
JA
4827
4828 ret = -ENOENT;
dbc7d452
JA
4829 bl = io_buffer_get_list(ctx, p->bgid);
4830 if (bl)
4831 ret = __io_remove_buffers(ctx, bl, p->nbufs);
067524e9 4832 if (ret < 0)
93d2bcd2 4833 req_set_fail(req);
067524e9 4834
9fb8cb49
PB
4835 /* complete before unlock, IOPOLL may need the lock */
4836 __io_req_complete(req, issue_flags, ret, 0);
f8929630 4837 io_ring_submit_unlock(ctx, issue_flags);
067524e9
JA
4838 return 0;
4839}
4840
ddf0322d
JA
4841static int io_provide_buffers_prep(struct io_kiocb *req,
4842 const struct io_uring_sqe *sqe)
4843{
38134ada 4844 unsigned long size, tmp_check;
ddf0322d
JA
4845 struct io_provide_buf *p = &req->pbuf;
4846 u64 tmp;
4847
26578cda 4848 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
ddf0322d
JA
4849 return -EINVAL;
4850
4851 tmp = READ_ONCE(sqe->fd);
4852 if (!tmp || tmp > USHRT_MAX)
4853 return -E2BIG;
4854 p->nbufs = tmp;
4855 p->addr = READ_ONCE(sqe->addr);
4856 p->len = READ_ONCE(sqe->len);
4857
38134ada
PB
4858 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
4859 &size))
4860 return -EOVERFLOW;
4861 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
4862 return -EOVERFLOW;
4863
d81269fe
PB
4864 size = (unsigned long)p->len * p->nbufs;
4865 if (!access_ok(u64_to_user_ptr(p->addr), size))
ddf0322d
JA
4866 return -EFAULT;
4867
4868 p->bgid = READ_ONCE(sqe->buf_group);
4869 tmp = READ_ONCE(sqe->off);
4870 if (tmp > USHRT_MAX)
4871 return -E2BIG;
4872 p->bid = tmp;
4873 return 0;
4874}
4875
cc3cec83
JA
4876static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
4877{
4878 struct io_buffer *buf;
4879 struct page *page;
4880 int bufs_in_page;
4881
4882 /*
4883 * Completions that don't happen inline (eg not under uring_lock) will
4884 * add to ->io_buffers_comp. If we don't have any free buffers, check
4885 * the completion list and splice those entries first.
4886 */
4887 if (!list_empty_careful(&ctx->io_buffers_comp)) {
4888 spin_lock(&ctx->completion_lock);
4889 if (!list_empty(&ctx->io_buffers_comp)) {
4890 list_splice_init(&ctx->io_buffers_comp,
4891 &ctx->io_buffers_cache);
4892 spin_unlock(&ctx->completion_lock);
4893 return 0;
4894 }
4895 spin_unlock(&ctx->completion_lock);
4896 }
4897
4898 /*
4899 * No free buffers and no completion entries either. Allocate a new
4900 * page worth of buffer entries and add those to our freelist.
4901 */
4902 page = alloc_page(GFP_KERNEL_ACCOUNT);
4903 if (!page)
4904 return -ENOMEM;
4905
4906 list_add(&page->lru, &ctx->io_buffers_pages);
4907
4908 buf = page_address(page);
4909 bufs_in_page = PAGE_SIZE / sizeof(*buf);
4910 while (bufs_in_page) {
4911 list_add_tail(&buf->list, &ctx->io_buffers_cache);
4912 buf++;
4913 bufs_in_page--;
4914 }
4915
4916 return 0;
4917}
4918
4919static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
dbc7d452 4920 struct io_buffer_list *bl)
ddf0322d
JA
4921{
4922 struct io_buffer *buf;
4923 u64 addr = pbuf->addr;
4924 int i, bid = pbuf->bid;
4925
4926 for (i = 0; i < pbuf->nbufs; i++) {
cc3cec83
JA
4927 if (list_empty(&ctx->io_buffers_cache) &&
4928 io_refill_buffer_cache(ctx))
ddf0322d 4929 break;
cc3cec83
JA
4930 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
4931 list);
dbc7d452 4932 list_move_tail(&buf->list, &bl->buf_list);
ddf0322d 4933 buf->addr = addr;
d1f82808 4934 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
ddf0322d 4935 buf->bid = bid;
b1c62645 4936 buf->bgid = pbuf->bgid;
ddf0322d
JA
4937 addr += pbuf->len;
4938 bid++;
f240762f 4939 cond_resched();
ddf0322d
JA
4940 }
4941
dbc7d452 4942 return i ? 0 : -ENOMEM;
ddf0322d
JA
4943}
4944
889fca73 4945static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
ddf0322d
JA
4946{
4947 struct io_provide_buf *p = &req->pbuf;
4948 struct io_ring_ctx *ctx = req->ctx;
dbc7d452 4949 struct io_buffer_list *bl;
ddf0322d 4950 int ret = 0;
ddf0322d 4951
f8929630 4952 io_ring_submit_lock(ctx, issue_flags);
ddf0322d 4953
dbc7d452
JA
4954 bl = io_buffer_get_list(ctx, p->bgid);
4955 if (unlikely(!bl)) {
4956 bl = kmalloc(sizeof(*bl), GFP_KERNEL);
4957 if (!bl) {
4958 ret = -ENOMEM;
4959 goto err;
4960 }
4961 io_buffer_add_list(ctx, bl, p->bgid);
ddf0322d 4962 }
dbc7d452
JA
4963
4964 ret = io_add_buffers(ctx, p, bl);
4965err:
ddf0322d 4966 if (ret < 0)
93d2bcd2 4967 req_set_fail(req);
9fb8cb49
PB
4968 /* complete before unlock, IOPOLL may need the lock */
4969 __io_req_complete(req, issue_flags, ret, 0);
f8929630 4970 io_ring_submit_unlock(ctx, issue_flags);
ddf0322d 4971 return 0;
cebdb986
JA
4972}
4973
3e4827b0
JA
4974static int io_epoll_ctl_prep(struct io_kiocb *req,
4975 const struct io_uring_sqe *sqe)
4976{
4977#if defined(CONFIG_EPOLL)
26578cda 4978 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
3e4827b0 4979 return -EINVAL;
2d74d042 4980 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3232dd02 4981 return -EINVAL;
3e4827b0
JA
4982
4983 req->epoll.epfd = READ_ONCE(sqe->fd);
4984 req->epoll.op = READ_ONCE(sqe->len);
4985 req->epoll.fd = READ_ONCE(sqe->off);
4986
4987 if (ep_op_has_event(req->epoll.op)) {
4988 struct epoll_event __user *ev;
4989
4990 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4991 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4992 return -EFAULT;
4993 }
4994
4995 return 0;
4996#else
4997 return -EOPNOTSUPP;
4998#endif
4999}
5000
889fca73 5001static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
3e4827b0
JA
5002{
5003#if defined(CONFIG_EPOLL)
5004 struct io_epoll *ie = &req->epoll;
5005 int ret;
45d189c6 5006 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3e4827b0
JA
5007
5008 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
5009 if (force_nonblock && ret == -EAGAIN)
5010 return -EAGAIN;
5011
5012 if (ret < 0)
93d2bcd2 5013 req_set_fail(req);
889fca73 5014 __io_req_complete(req, issue_flags, ret, 0);
3e4827b0
JA
5015 return 0;
5016#else
5017 return -EOPNOTSUPP;
5018#endif
5019}
5020
c1ca757b
JA
5021static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5022{
5023#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
26578cda 5024 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
c1ca757b 5025 return -EINVAL;
3232dd02
PB
5026 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5027 return -EINVAL;
c1ca757b
JA
5028
5029 req->madvise.addr = READ_ONCE(sqe->addr);
5030 req->madvise.len = READ_ONCE(sqe->len);
5031 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
5032 return 0;
5033#else
5034 return -EOPNOTSUPP;
5035#endif
5036}
5037
45d189c6 5038static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
c1ca757b
JA
5039{
5040#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
5041 struct io_madvise *ma = &req->madvise;
5042 int ret;
5043
45d189c6 5044 if (issue_flags & IO_URING_F_NONBLOCK)
c1ca757b
JA
5045 return -EAGAIN;
5046
0726b01e 5047 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
c1ca757b 5048 if (ret < 0)
93d2bcd2 5049 req_set_fail(req);
e1e16097 5050 io_req_complete(req, ret);
c1ca757b
JA
5051 return 0;
5052#else
5053 return -EOPNOTSUPP;
5054#endif
5055}
5056
4840e418
JA
5057static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5058{
26578cda 5059 if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
4840e418 5060 return -EINVAL;
3232dd02
PB
5061 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5062 return -EINVAL;
4840e418
JA
5063
5064 req->fadvise.offset = READ_ONCE(sqe->off);
5065 req->fadvise.len = READ_ONCE(sqe->len);
5066 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
5067 return 0;
5068}
5069
45d189c6 5070static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
4840e418
JA
5071{
5072 struct io_fadvise *fa = &req->fadvise;
5073 int ret;
5074
45d189c6 5075 if (issue_flags & IO_URING_F_NONBLOCK) {
3e69426d
JA
5076 switch (fa->advice) {
5077 case POSIX_FADV_NORMAL:
5078 case POSIX_FADV_RANDOM:
5079 case POSIX_FADV_SEQUENTIAL:
5080 break;
5081 default:
5082 return -EAGAIN;
5083 }
5084 }
4840e418
JA
5085
5086 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
5087 if (ret < 0)
93d2bcd2 5088 req_set_fail(req);
0bdf3398 5089 __io_req_complete(req, issue_flags, ret, 0);
4840e418
JA
5090 return 0;
5091}
5092
eddc7ef5
JA
5093static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5094{
1b6fe6e0
SR
5095 const char __user *path;
5096
2d74d042 5097 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3232dd02 5098 return -EINVAL;
26578cda 5099 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
eddc7ef5 5100 return -EINVAL;
9c280f90 5101 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 5102 return -EBADF;
eddc7ef5 5103
1d9e1288
BM
5104 req->statx.dfd = READ_ONCE(sqe->fd);
5105 req->statx.mask = READ_ONCE(sqe->len);
1b6fe6e0 5106 path = u64_to_user_ptr(READ_ONCE(sqe->addr));
1d9e1288
BM
5107 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
5108 req->statx.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5 5109
1b6fe6e0
SR
5110 req->statx.filename = getname_flags(path,
5111 getname_statx_lookup_flags(req->statx.flags),
5112 NULL);
5113
5114 if (IS_ERR(req->statx.filename)) {
5115 int ret = PTR_ERR(req->statx.filename);
5116
5117 req->statx.filename = NULL;
5118 return ret;
5119 }
5120
5121 req->flags |= REQ_F_NEED_CLEANUP;
eddc7ef5
JA
5122 return 0;
5123}
5124
45d189c6 5125static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
eddc7ef5 5126{
1d9e1288 5127 struct io_statx *ctx = &req->statx;
eddc7ef5
JA
5128 int ret;
5129
59d70013 5130 if (issue_flags & IO_URING_F_NONBLOCK)
eddc7ef5
JA
5131 return -EAGAIN;
5132
e62753e4
BM
5133 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
5134 ctx->buffer);
eddc7ef5 5135
eddc7ef5 5136 if (ret < 0)
93d2bcd2 5137 req_set_fail(req);
e1e16097 5138 io_req_complete(req, ret);
eddc7ef5
JA
5139 return 0;
5140}
5141
b5dba59e
JA
5142static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5143{
14587a46 5144 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3232dd02 5145 return -EINVAL;
b5dba59e 5146 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
7df778be 5147 sqe->rw_flags || sqe->buf_index)
b5dba59e 5148 return -EINVAL;
9c280f90 5149 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 5150 return -EBADF;
b5dba59e
JA
5151
5152 req->close.fd = READ_ONCE(sqe->fd);
7df778be
PB
5153 req->close.file_slot = READ_ONCE(sqe->file_index);
5154 if (req->close.file_slot && req->close.fd)
5155 return -EINVAL;
5156
b5dba59e 5157 return 0;
b5dba59e
JA
5158}
5159
889fca73 5160static int io_close(struct io_kiocb *req, unsigned int issue_flags)
b5dba59e 5161{
9eac1904 5162 struct files_struct *files = current->files;
3af73b28 5163 struct io_close *close = &req->close;
9eac1904 5164 struct fdtable *fdt;
a1fde923
PB
5165 struct file *file = NULL;
5166 int ret = -EBADF;
b5dba59e 5167
7df778be
PB
5168 if (req->close.file_slot) {
5169 ret = io_close_fixed(req, issue_flags);
5170 goto err;
5171 }
5172
9eac1904
JA
5173 spin_lock(&files->file_lock);
5174 fdt = files_fdtable(files);
5175 if (close->fd >= fdt->max_fds) {
5176 spin_unlock(&files->file_lock);
5177 goto err;
5178 }
5179 file = fdt->fd[close->fd];
a1fde923 5180 if (!file || file->f_op == &io_uring_fops) {
9eac1904
JA
5181 spin_unlock(&files->file_lock);
5182 file = NULL;
5183 goto err;
3af73b28 5184 }
b5dba59e
JA
5185
5186 /* if the file has a flush method, be safe and punt to async */
45d189c6 5187 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
9eac1904 5188 spin_unlock(&files->file_lock);
0bf0eefd 5189 return -EAGAIN;
a2100672 5190 }
b5dba59e 5191
9eac1904
JA
5192 ret = __close_fd_get_file(close->fd, &file);
5193 spin_unlock(&files->file_lock);
5194 if (ret < 0) {
5195 if (ret == -ENOENT)
5196 ret = -EBADF;
5197 goto err;
5198 }
5199
3af73b28 5200 /* No ->flush() or already async, safely close from here */
9eac1904
JA
5201 ret = filp_close(file, current->files);
5202err:
3af73b28 5203 if (ret < 0)
93d2bcd2 5204 req_set_fail(req);
9eac1904
JA
5205 if (file)
5206 fput(file);
889fca73 5207 __io_req_complete(req, issue_flags, ret, 0);
1a417f4e 5208 return 0;
b5dba59e
JA
5209}
5210
1155c76a 5211static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4
JA
5212{
5213 struct io_ring_ctx *ctx = req->ctx;
5d17b4a4 5214
5d17b4a4
JA
5215 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
5216 return -EINVAL;
26578cda
PB
5217 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
5218 sqe->splice_fd_in))
5d17b4a4
JA
5219 return -EINVAL;
5220
8ed8d3c3
JA
5221 req->sync.off = READ_ONCE(sqe->off);
5222 req->sync.len = READ_ONCE(sqe->len);
5223 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
5224 return 0;
5225}
5226
45d189c6 5227static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3 5228{
8ed8d3c3
JA
5229 int ret;
5230
ac45abc0 5231 /* sync_file_range always requires a blocking context */
45d189c6 5232 if (issue_flags & IO_URING_F_NONBLOCK)
ac45abc0
PB
5233 return -EAGAIN;
5234
9adbd45d 5235 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3
JA
5236 req->sync.flags);
5237 if (ret < 0)
93d2bcd2 5238 req_set_fail(req);
e1e16097 5239 io_req_complete(req, ret);
5d17b4a4
JA
5240 return 0;
5241}
5242
469956e8 5243#if defined(CONFIG_NET)
4c3c0943
JA
5244static bool io_net_retry(struct socket *sock, int flags)
5245{
5246 if (!(flags & MSG_WAITALL))
5247 return false;
5248 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
5249}
5250
02d27d89
PB
5251static int io_setup_async_msg(struct io_kiocb *req,
5252 struct io_async_msghdr *kmsg)
5253{
e8c2bc1f
JA
5254 struct io_async_msghdr *async_msg = req->async_data;
5255
5256 if (async_msg)
02d27d89 5257 return -EAGAIN;
e8c2bc1f 5258 if (io_alloc_async_data(req)) {
257e84a5 5259 kfree(kmsg->free_iov);
02d27d89
PB
5260 return -ENOMEM;
5261 }
e8c2bc1f 5262 async_msg = req->async_data;
02d27d89 5263 req->flags |= REQ_F_NEED_CLEANUP;
e8c2bc1f 5264 memcpy(async_msg, kmsg, sizeof(*kmsg));
2a780802 5265 async_msg->msg.msg_name = &async_msg->addr;
257e84a5
PB
5266 /* if were using fast_iov, set it to the new one */
5267 if (!async_msg->free_iov)
5268 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
5269
02d27d89
PB
5270 return -EAGAIN;
5271}
5272
2ae523ed
PB
5273static int io_sendmsg_copy_hdr(struct io_kiocb *req,
5274 struct io_async_msghdr *iomsg)
5275{
2ae523ed 5276 iomsg->msg.msg_name = &iomsg->addr;
257e84a5 5277 iomsg->free_iov = iomsg->fast_iov;
2ae523ed 5278 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
257e84a5 5279 req->sr_msg.msg_flags, &iomsg->free_iov);
2ae523ed
PB
5280}
5281
93642ef8
PB
5282static int io_sendmsg_prep_async(struct io_kiocb *req)
5283{
5284 int ret;
5285
93642ef8
PB
5286 ret = io_sendmsg_copy_hdr(req, req->async_data);
5287 if (!ret)
5288 req->flags |= REQ_F_NEED_CLEANUP;
5289 return ret;
5290}
5291
3529d8c2 5292static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 5293{
e47293fd 5294 struct io_sr_msg *sr = &req->sr_msg;
03b1230c 5295
d2b6f48b
PB
5296 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5297 return -EINVAL;
5298
270a5940 5299 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 5300 sr->len = READ_ONCE(sqe->len);
04411806
PB
5301 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
5302 if (sr->msg_flags & MSG_DONTWAIT)
5303 req->flags |= REQ_F_NOWAIT;
3529d8c2 5304
d8768362
JA
5305#ifdef CONFIG_COMPAT
5306 if (req->ctx->compat)
5307 sr->msg_flags |= MSG_CMSG_COMPAT;
5308#endif
4c3c0943 5309 sr->done_io = 0;
93642ef8 5310 return 0;
03b1230c
JA
5311}
5312
889fca73 5313static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
aa1fa28f 5314{
6b754c8b 5315 struct io_async_msghdr iomsg, *kmsg;
4c3c0943 5316 struct io_sr_msg *sr = &req->sr_msg;
0fa03c62 5317 struct socket *sock;
7a7cacba 5318 unsigned flags;
0031275d 5319 int min_ret = 0;
0fa03c62
JA
5320 int ret;
5321
dba4a925 5322 sock = sock_from_file(req->file);
7a7cacba 5323 if (unlikely(!sock))
dba4a925 5324 return -ENOTSOCK;
3529d8c2 5325
d886e185
PB
5326 if (req_has_async_data(req)) {
5327 kmsg = req->async_data;
5328 } else {
7a7cacba
PB
5329 ret = io_sendmsg_copy_hdr(req, &iomsg);
5330 if (ret)
5331 return ret;
5332 kmsg = &iomsg;
0fa03c62 5333 }
0fa03c62 5334
04411806
PB
5335 flags = req->sr_msg.msg_flags;
5336 if (issue_flags & IO_URING_F_NONBLOCK)
7a7cacba 5337 flags |= MSG_DONTWAIT;
0031275d
SM
5338 if (flags & MSG_WAITALL)
5339 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
5340
7a7cacba 5341 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
0fa03c62 5342
7297ce3d
PB
5343 if (ret < min_ret) {
5344 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
5345 return io_setup_async_msg(req, kmsg);
5346 if (ret == -ERESTARTSYS)
5347 ret = -EINTR;
4c3c0943
JA
5348 if (ret > 0 && io_net_retry(sock, flags)) {
5349 sr->done_io += ret;
5350 req->flags |= REQ_F_PARTIAL_IO;
5351 return io_setup_async_msg(req, kmsg);
5352 }
7297ce3d
PB
5353 req_set_fail(req);
5354 }
257e84a5
PB
5355 /* fast path, check for non-NULL to avoid function call */
5356 if (kmsg->free_iov)
5357 kfree(kmsg->free_iov);
99bc4c38 5358 req->flags &= ~REQ_F_NEED_CLEANUP;
4c3c0943
JA
5359 if (ret >= 0)
5360 ret += sr->done_io;
5361 else if (sr->done_io)
5362 ret = sr->done_io;
889fca73 5363 __io_req_complete(req, issue_flags, ret, 0);
5d17b4a4 5364 return 0;
03b1230c 5365}
aa1fa28f 5366
889fca73 5367static int io_send(struct io_kiocb *req, unsigned int issue_flags)
fddaface 5368{
7a7cacba
PB
5369 struct io_sr_msg *sr = &req->sr_msg;
5370 struct msghdr msg;
5371 struct iovec iov;
fddaface 5372 struct socket *sock;
7a7cacba 5373 unsigned flags;
0031275d 5374 int min_ret = 0;
fddaface
JA
5375 int ret;
5376
dba4a925 5377 sock = sock_from_file(req->file);
7a7cacba 5378 if (unlikely(!sock))
dba4a925 5379 return -ENOTSOCK;
fddaface 5380
7a7cacba
PB
5381 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
5382 if (unlikely(ret))
14db8411 5383 return ret;
fddaface 5384
7a7cacba
PB
5385 msg.msg_name = NULL;
5386 msg.msg_control = NULL;
5387 msg.msg_controllen = 0;
5388 msg.msg_namelen = 0;
fddaface 5389
04411806
PB
5390 flags = req->sr_msg.msg_flags;
5391 if (issue_flags & IO_URING_F_NONBLOCK)
7a7cacba 5392 flags |= MSG_DONTWAIT;
0031275d
SM
5393 if (flags & MSG_WAITALL)
5394 min_ret = iov_iter_count(&msg.msg_iter);
5395
7a7cacba
PB
5396 msg.msg_flags = flags;
5397 ret = sock_sendmsg(sock, &msg);
7297ce3d
PB
5398 if (ret < min_ret) {
5399 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
5400 return -EAGAIN;
5401 if (ret == -ERESTARTSYS)
5402 ret = -EINTR;
4c3c0943
JA
5403 if (ret > 0 && io_net_retry(sock, flags)) {
5404 sr->len -= ret;
5405 sr->buf += ret;
5406 sr->done_io += ret;
5407 req->flags |= REQ_F_PARTIAL_IO;
5408 return -EAGAIN;
5409 }
93d2bcd2 5410 req_set_fail(req);
7297ce3d 5411 }
4c3c0943
JA
5412 if (ret >= 0)
5413 ret += sr->done_io;
5414 else if (sr->done_io)
5415 ret = sr->done_io;
889fca73 5416 __io_req_complete(req, issue_flags, ret, 0);
fddaface 5417 return 0;
fddaface
JA
5418}
5419
1400e697
PB
5420static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
5421 struct io_async_msghdr *iomsg)
52de1fe1
JA
5422{
5423 struct io_sr_msg *sr = &req->sr_msg;
5424 struct iovec __user *uiov;
5425 size_t iov_len;
5426 int ret;
5427
1400e697
PB
5428 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
5429 &iomsg->uaddr, &uiov, &iov_len);
52de1fe1
JA
5430 if (ret)
5431 return ret;
5432
5433 if (req->flags & REQ_F_BUFFER_SELECT) {
5434 if (iov_len > 1)
5435 return -EINVAL;
5476dfed 5436 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
52de1fe1 5437 return -EFAULT;
5476dfed 5438 sr->len = iomsg->fast_iov[0].iov_len;
257e84a5 5439 iomsg->free_iov = NULL;
52de1fe1 5440 } else {
257e84a5 5441 iomsg->free_iov = iomsg->fast_iov;
89cd35c5 5442 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
257e84a5 5443 &iomsg->free_iov, &iomsg->msg.msg_iter,
89cd35c5 5444 false);
52de1fe1
JA
5445 if (ret > 0)
5446 ret = 0;
5447 }
5448
5449 return ret;
5450}
5451
5452#ifdef CONFIG_COMPAT
5453static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
1400e697 5454 struct io_async_msghdr *iomsg)
52de1fe1 5455{
52de1fe1
JA
5456 struct io_sr_msg *sr = &req->sr_msg;
5457 struct compat_iovec __user *uiov;
5458 compat_uptr_t ptr;
5459 compat_size_t len;
5460 int ret;
5461
4af3417a
PB
5462 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
5463 &ptr, &len);
52de1fe1
JA
5464 if (ret)
5465 return ret;
5466
5467 uiov = compat_ptr(ptr);
5468 if (req->flags & REQ_F_BUFFER_SELECT) {
5469 compat_ssize_t clen;
5470
5471 if (len > 1)
5472 return -EINVAL;
5473 if (!access_ok(uiov, sizeof(*uiov)))
5474 return -EFAULT;
5475 if (__get_user(clen, &uiov->iov_len))
5476 return -EFAULT;
5477 if (clen < 0)
5478 return -EINVAL;
2d280bc8 5479 sr->len = clen;
257e84a5 5480 iomsg->free_iov = NULL;
52de1fe1 5481 } else {
257e84a5 5482 iomsg->free_iov = iomsg->fast_iov;
89cd35c5 5483 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
257e84a5 5484 UIO_FASTIOV, &iomsg->free_iov,
89cd35c5 5485 &iomsg->msg.msg_iter, true);
52de1fe1
JA
5486 if (ret < 0)
5487 return ret;
5488 }
5489
5490 return 0;
5491}
5492#endif
5493
1400e697
PB
5494static int io_recvmsg_copy_hdr(struct io_kiocb *req,
5495 struct io_async_msghdr *iomsg)
52de1fe1 5496{
1400e697 5497 iomsg->msg.msg_name = &iomsg->addr;
52de1fe1
JA
5498
5499#ifdef CONFIG_COMPAT
5500 if (req->ctx->compat)
1400e697 5501 return __io_compat_recvmsg_copy_hdr(req, iomsg);
fddaface 5502#endif
52de1fe1 5503
1400e697 5504 return __io_recvmsg_copy_hdr(req, iomsg);
52de1fe1
JA
5505}
5506
bcda7baa 5507static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
51aac424 5508 unsigned int issue_flags)
bcda7baa
JA
5509{
5510 struct io_sr_msg *sr = &req->sr_msg;
bcda7baa 5511
51aac424 5512 return io_buffer_select(req, &sr->len, sr->bgid, issue_flags);
fddaface
JA
5513}
5514
93642ef8 5515static int io_recvmsg_prep_async(struct io_kiocb *req)
aa1fa28f 5516{
99bc4c38 5517 int ret;
3529d8c2 5518
93642ef8
PB
5519 ret = io_recvmsg_copy_hdr(req, req->async_data);
5520 if (!ret)
5521 req->flags |= REQ_F_NEED_CLEANUP;
5522 return ret;
5523}
5524
5525static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5526{
5527 struct io_sr_msg *sr = &req->sr_msg;
5528
d2b6f48b
PB
5529 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5530 return -EINVAL;
5531
270a5940 5532 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 5533 sr->len = READ_ONCE(sqe->len);
bcda7baa 5534 sr->bgid = READ_ONCE(sqe->buf_group);
04411806
PB
5535 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
5536 if (sr->msg_flags & MSG_DONTWAIT)
5537 req->flags |= REQ_F_NOWAIT;
06b76d44 5538
d8768362
JA
5539#ifdef CONFIG_COMPAT
5540 if (req->ctx->compat)
5541 sr->msg_flags |= MSG_CMSG_COMPAT;
5542#endif
7ba89d2a 5543 sr->done_io = 0;
93642ef8 5544 return 0;
aa1fa28f
JA
5545}
5546
889fca73 5547static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
aa1fa28f 5548{
6b754c8b 5549 struct io_async_msghdr iomsg, *kmsg;
7ba89d2a 5550 struct io_sr_msg *sr = &req->sr_msg;
03b1230c 5551 struct socket *sock;
7fbb1b54 5552 struct io_buffer *kbuf;
7a7cacba 5553 unsigned flags;
d1fd1c20 5554 int ret, min_ret = 0;
45d189c6 5555 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
03b1230c 5556
dba4a925 5557 sock = sock_from_file(req->file);
7a7cacba 5558 if (unlikely(!sock))
dba4a925 5559 return -ENOTSOCK;
3529d8c2 5560
d886e185
PB
5561 if (req_has_async_data(req)) {
5562 kmsg = req->async_data;
5563 } else {
7a7cacba
PB
5564 ret = io_recvmsg_copy_hdr(req, &iomsg);
5565 if (ret)
681fda8d 5566 return ret;
7a7cacba
PB
5567 kmsg = &iomsg;
5568 }
03b1230c 5569
bc02ef33 5570 if (req->flags & REQ_F_BUFFER_SELECT) {
51aac424 5571 kbuf = io_recv_buffer_select(req, issue_flags);
bc02ef33 5572 if (IS_ERR(kbuf))
52de1fe1 5573 return PTR_ERR(kbuf);
7a7cacba 5574 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
5476dfed
PB
5575 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
5576 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
7a7cacba
PB
5577 1, req->sr_msg.len);
5578 }
52de1fe1 5579
04411806
PB
5580 flags = req->sr_msg.msg_flags;
5581 if (force_nonblock)
7a7cacba 5582 flags |= MSG_DONTWAIT;
0031275d
SM
5583 if (flags & MSG_WAITALL)
5584 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
5585
7a7cacba
PB
5586 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
5587 kmsg->uaddr, flags);
7297ce3d
PB
5588 if (ret < min_ret) {
5589 if (ret == -EAGAIN && force_nonblock)
5590 return io_setup_async_msg(req, kmsg);
5591 if (ret == -ERESTARTSYS)
5592 ret = -EINTR;
7ba89d2a
JA
5593 if (ret > 0 && io_net_retry(sock, flags)) {
5594 sr->done_io += ret;
8a3e8ee5 5595 req->flags |= REQ_F_PARTIAL_IO;
7ba89d2a
JA
5596 return io_setup_async_msg(req, kmsg);
5597 }
7297ce3d
PB
5598 req_set_fail(req);
5599 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
5600 req_set_fail(req);
5601 }
03b1230c 5602
257e84a5
PB
5603 /* fast path, check for non-NULL to avoid function call */
5604 if (kmsg->free_iov)
5605 kfree(kmsg->free_iov);
99bc4c38 5606 req->flags &= ~REQ_F_NEED_CLEANUP;
7ba89d2a
JA
5607 if (ret >= 0)
5608 ret += sr->done_io;
5609 else if (sr->done_io)
5610 ret = sr->done_io;
cc3cec83 5611 __io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags));
03b1230c 5612 return 0;
0fa03c62 5613}
5d17b4a4 5614
889fca73 5615static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
fddaface 5616{
6b754c8b 5617 struct io_buffer *kbuf;
7a7cacba
PB
5618 struct io_sr_msg *sr = &req->sr_msg;
5619 struct msghdr msg;
5620 void __user *buf = sr->buf;
fddaface 5621 struct socket *sock;
7a7cacba
PB
5622 struct iovec iov;
5623 unsigned flags;
d1fd1c20 5624 int ret, min_ret = 0;
45d189c6 5625 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
fddaface 5626
dba4a925 5627 sock = sock_from_file(req->file);
7a7cacba 5628 if (unlikely(!sock))
dba4a925 5629 return -ENOTSOCK;
fddaface 5630
bc02ef33 5631 if (req->flags & REQ_F_BUFFER_SELECT) {
51aac424 5632 kbuf = io_recv_buffer_select(req, issue_flags);
bcda7baa
JA
5633 if (IS_ERR(kbuf))
5634 return PTR_ERR(kbuf);
7a7cacba 5635 buf = u64_to_user_ptr(kbuf->addr);
bc02ef33 5636 }
bcda7baa 5637
7a7cacba 5638 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
14c32eee
PB
5639 if (unlikely(ret))
5640 goto out_free;
fddaface 5641
7a7cacba
PB
5642 msg.msg_name = NULL;
5643 msg.msg_control = NULL;
5644 msg.msg_controllen = 0;
5645 msg.msg_namelen = 0;
5646 msg.msg_iocb = NULL;
5647 msg.msg_flags = 0;
fddaface 5648
04411806
PB
5649 flags = req->sr_msg.msg_flags;
5650 if (force_nonblock)
7a7cacba 5651 flags |= MSG_DONTWAIT;
0031275d
SM
5652 if (flags & MSG_WAITALL)
5653 min_ret = iov_iter_count(&msg.msg_iter);
5654
7a7cacba 5655 ret = sock_recvmsg(sock, &msg, flags);
7297ce3d
PB
5656 if (ret < min_ret) {
5657 if (ret == -EAGAIN && force_nonblock)
5658 return -EAGAIN;
5659 if (ret == -ERESTARTSYS)
5660 ret = -EINTR;
7ba89d2a
JA
5661 if (ret > 0 && io_net_retry(sock, flags)) {
5662 sr->len -= ret;
5663 sr->buf += ret;
5664 sr->done_io += ret;
8a3e8ee5 5665 req->flags |= REQ_F_PARTIAL_IO;
7ba89d2a
JA
5666 return -EAGAIN;
5667 }
7297ce3d
PB
5668 req_set_fail(req);
5669 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
0d7c1153 5670out_free:
93d2bcd2 5671 req_set_fail(req);
7297ce3d 5672 }
cc3cec83 5673
7ba89d2a
JA
5674 if (ret >= 0)
5675 ret += sr->done_io;
5676 else if (sr->done_io)
5677 ret = sr->done_io;
cc3cec83 5678 __io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags));
fddaface 5679 return 0;
fddaface
JA
5680}
5681
3529d8c2 5682static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35 5683{
8ed8d3c3
JA
5684 struct io_accept *accept = &req->accept;
5685
14587a46 5686 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
17f2fe35 5687 return -EINVAL;
aaa4db12 5688 if (sqe->ioprio || sqe->len || sqe->buf_index)
17f2fe35
JA
5689 return -EINVAL;
5690
d55e5f5b
JA
5691 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5692 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 5693 accept->flags = READ_ONCE(sqe->accept_flags);
09952e3e 5694 accept->nofile = rlimit(RLIMIT_NOFILE);
a7083ad5 5695
aaa4db12 5696 accept->file_slot = READ_ONCE(sqe->file_index);
adf3a9e9 5697 if (accept->file_slot && (accept->flags & SOCK_CLOEXEC))
aaa4db12 5698 return -EINVAL;
a7083ad5
PB
5699 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
5700 return -EINVAL;
5701 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
5702 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
8ed8d3c3 5703 return 0;
8ed8d3c3 5704}
17f2fe35 5705
889fca73 5706static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3
JA
5707{
5708 struct io_accept *accept = &req->accept;
45d189c6 5709 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ac45abc0 5710 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
aaa4db12 5711 bool fixed = !!accept->file_slot;
a7083ad5
PB
5712 struct file *file;
5713 int ret, fd;
8ed8d3c3 5714
aaa4db12
PB
5715 if (!fixed) {
5716 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
5717 if (unlikely(fd < 0))
5718 return fd;
5719 }
a7083ad5
PB
5720 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
5721 accept->flags);
5722 if (IS_ERR(file)) {
aaa4db12
PB
5723 if (!fixed)
5724 put_unused_fd(fd);
a7083ad5
PB
5725 ret = PTR_ERR(file);
5726 if (ret == -EAGAIN && force_nonblock)
5727 return -EAGAIN;
ac45abc0
PB
5728 if (ret == -ERESTARTSYS)
5729 ret = -EINTR;
93d2bcd2 5730 req_set_fail(req);
aaa4db12 5731 } else if (!fixed) {
a7083ad5
PB
5732 fd_install(fd, file);
5733 ret = fd;
aaa4db12
PB
5734 } else {
5735 ret = io_install_fixed_file(req, file, issue_flags,
5736 accept->file_slot - 1);
ac45abc0 5737 }
889fca73 5738 __io_req_complete(req, issue_flags, ret, 0);
17f2fe35 5739 return 0;
8ed8d3c3
JA
5740}
5741
93642ef8
PB
5742static int io_connect_prep_async(struct io_kiocb *req)
5743{
5744 struct io_async_connect *io = req->async_data;
5745 struct io_connect *conn = &req->connect;
5746
5747 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
5748}
5749
3529d8c2 5750static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021 5751{
3529d8c2 5752 struct io_connect *conn = &req->connect;
f499a021 5753
14587a46 5754 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3fbb51c1 5755 return -EINVAL;
26578cda
PB
5756 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
5757 sqe->splice_fd_in)
3fbb51c1
JA
5758 return -EINVAL;
5759
3529d8c2
JA
5760 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5761 conn->addr_len = READ_ONCE(sqe->addr2);
93642ef8 5762 return 0;
f499a021
JA
5763}
5764
889fca73 5765static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
f8e85cf2 5766{
e8c2bc1f 5767 struct io_async_connect __io, *io;
f8e85cf2 5768 unsigned file_flags;
3fbb51c1 5769 int ret;
45d189c6 5770 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
f8e85cf2 5771
d886e185 5772 if (req_has_async_data(req)) {
e8c2bc1f 5773 io = req->async_data;
f499a021 5774 } else {
3529d8c2
JA
5775 ret = move_addr_to_kernel(req->connect.addr,
5776 req->connect.addr_len,
e8c2bc1f 5777 &__io.address);
f499a021
JA
5778 if (ret)
5779 goto out;
5780 io = &__io;
5781 }
5782
3fbb51c1
JA
5783 file_flags = force_nonblock ? O_NONBLOCK : 0;
5784
e8c2bc1f 5785 ret = __sys_connect_file(req->file, &io->address,
3fbb51c1 5786 req->connect.addr_len, file_flags);
87f80d62 5787 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
d886e185 5788 if (req_has_async_data(req))
b7bb4f7d 5789 return -EAGAIN;
e8c2bc1f 5790 if (io_alloc_async_data(req)) {
f499a021
JA
5791 ret = -ENOMEM;
5792 goto out;
5793 }
e8c2bc1f 5794 memcpy(req->async_data, &__io, sizeof(__io));
f8e85cf2 5795 return -EAGAIN;
f499a021 5796 }
f8e85cf2
JA
5797 if (ret == -ERESTARTSYS)
5798 ret = -EINTR;
f499a021 5799out:
4e88d6e7 5800 if (ret < 0)
93d2bcd2 5801 req_set_fail(req);
889fca73 5802 __io_req_complete(req, issue_flags, ret, 0);
f8e85cf2 5803 return 0;
469956e8
Y
5804}
5805#else /* !CONFIG_NET */
99a10081
JA
5806#define IO_NETOP_FN(op) \
5807static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
5808{ \
5809 return -EOPNOTSUPP; \
5810}
5811
5812#define IO_NETOP_PREP(op) \
5813IO_NETOP_FN(op) \
5814static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
5815{ \
5816 return -EOPNOTSUPP; \
5817} \
5818
5819#define IO_NETOP_PREP_ASYNC(op) \
5820IO_NETOP_PREP(op) \
5821static int io_##op##_prep_async(struct io_kiocb *req) \
5822{ \
5823 return -EOPNOTSUPP; \
5824}
5825
5826IO_NETOP_PREP_ASYNC(sendmsg);
5827IO_NETOP_PREP_ASYNC(recvmsg);
5828IO_NETOP_PREP_ASYNC(connect);
5829IO_NETOP_PREP(accept);
5830IO_NETOP_FN(send);
5831IO_NETOP_FN(recv);
469956e8 5832#endif /* CONFIG_NET */
f8e85cf2 5833
d7718a9d
JA
5834struct io_poll_table {
5835 struct poll_table_struct pt;
5836 struct io_kiocb *req;
68b11e8b 5837 int nr_entries;
d7718a9d
JA
5838 int error;
5839};
ce593a6c 5840
aa43477b 5841#define IO_POLL_CANCEL_FLAG BIT(31)
e2c0cb7c 5842#define IO_POLL_REF_MASK GENMASK(30, 0)
6d816e08 5843
aa43477b
PB
5844/*
5845 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
5846 * bump it and acquire ownership. It's disallowed to modify requests while not
5847 * owning it, that prevents from races for enqueueing task_work's and b/w
5848 * arming poll and wakeups.
5849 */
5850static inline bool io_poll_get_ownership(struct io_kiocb *req)
5851{
5852 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
d7718a9d
JA
5853}
5854
aa43477b 5855static void io_poll_mark_cancelled(struct io_kiocb *req)
74ce6ce4 5856{
aa43477b 5857 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
74ce6ce4
JA
5858}
5859
d4e7cd36 5860static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
18bceab1 5861{
e8c2bc1f 5862 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
d4e7cd36 5863 if (req->opcode == IORING_OP_POLL_ADD)
e8c2bc1f 5864 return req->async_data;
d4e7cd36
JA
5865 return req->apoll->double_poll;
5866}
5867
5868static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
5869{
5870 if (req->opcode == IORING_OP_POLL_ADD)
5871 return &req->poll;
5872 return &req->apoll->poll;
5873}
5874
5641897a 5875static void io_poll_req_insert(struct io_kiocb *req)
d4e7cd36 5876{
5641897a
PB
5877 struct io_ring_ctx *ctx = req->ctx;
5878 struct hlist_head *list;
18bceab1 5879
cef216fc 5880 list = &ctx->cancel_hash[hash_long(req->cqe.user_data, ctx->cancel_hash_bits)];
5641897a 5881 hlist_add_head(&req->hash_node, list);
18bceab1
JA
5882}
5883
5641897a
PB
5884static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5885 wait_queue_func_t wake_func)
18bceab1 5886{
5641897a 5887 poll->head = NULL;
5641897a
PB
5888#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
5889 /* mask in events that we always want/need */
5890 poll->events = events | IO_POLL_UNMASK;
5891 INIT_LIST_HEAD(&poll->wait.entry);
5892 init_waitqueue_func_entry(&poll->wait, wake_func);
18bceab1
JA
5893}
5894
aa43477b 5895static inline void io_poll_remove_entry(struct io_poll_iocb *poll)
18bceab1 5896{
791f3465 5897 struct wait_queue_head *head = smp_load_acquire(&poll->head);
18bceab1 5898
791f3465
PB
5899 if (head) {
5900 spin_lock_irq(&head->lock);
5901 list_del_init(&poll->wait.entry);
5902 poll->head = NULL;
5903 spin_unlock_irq(&head->lock);
5904 }
aa43477b 5905}
18bceab1 5906
aa43477b
PB
5907static void io_poll_remove_entries(struct io_kiocb *req)
5908{
91eac1c6
JA
5909 /*
5910 * Nothing to do if neither of those flags are set. Avoid dipping
5911 * into the poll/apoll/double cachelines if we can.
5912 */
5913 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
5914 return;
18bceab1 5915
791f3465
PB
5916 /*
5917 * While we hold the waitqueue lock and the waitqueue is nonempty,
5918 * wake_up_pollfree() will wait for us. However, taking the waitqueue
5919 * lock in the first place can race with the waitqueue being freed.
5920 *
5921 * We solve this as eventpoll does: by taking advantage of the fact that
5922 * all users of wake_up_pollfree() will RCU-delay the actual free. If
5923 * we enter rcu_read_lock() and see that the pointer to the queue is
5924 * non-NULL, we can then lock it without the memory being freed out from
5925 * under us.
5926 *
5927 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
5928 * case the caller deletes the entry from the queue, leaving it empty.
5929 * In that case, only RCU prevents the queue memory from being freed.
5930 */
5931 rcu_read_lock();
91eac1c6
JA
5932 if (req->flags & REQ_F_SINGLE_POLL)
5933 io_poll_remove_entry(io_poll_get_single(req));
5934 if (req->flags & REQ_F_DOUBLE_POLL)
5935 io_poll_remove_entry(io_poll_get_double(req));
791f3465 5936 rcu_read_unlock();
18bceab1
JA
5937}
5938
aa43477b
PB
5939/*
5940 * All poll tw should go through this. Checks for poll events, manages
5941 * references, does rewait, etc.
5942 *
5943 * Returns a negative error on failure. >0 when no action require, which is
5944 * either spurious wakeup or multishot CQE is served. 0 when it's done with
cef216fc 5945 * the request, then the mask is stored in req->cqe.res.
aa43477b 5946 */
5106dd6e 5947static int io_poll_check_events(struct io_kiocb *req, bool locked)
18bceab1 5948{
74ce6ce4 5949 struct io_ring_ctx *ctx = req->ctx;
aa43477b 5950 int v;
18bceab1 5951
316319e8 5952 /* req->task == current here, checking PF_EXITING is safe */
e09ee510 5953 if (unlikely(req->task->flags & PF_EXITING))
f2219057 5954 return -ECANCELED;
18bceab1 5955
aa43477b
PB
5956 do {
5957 v = atomic_read(&req->poll_refs);
74ce6ce4 5958
aa43477b
PB
5959 /* tw handler should be the owner, and so have some references */
5960 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
5961 return 0;
5962 if (v & IO_POLL_CANCEL_FLAG)
5963 return -ECANCELED;
8706e04e 5964
cef216fc 5965 if (!req->cqe.res) {
2804ecd8 5966 struct poll_table_struct pt = { ._key = req->apoll_events };
cce64ef0 5967 unsigned flags = locked ? 0 : IO_URING_F_UNLOCKED;
18bceab1 5968
cce64ef0 5969 if (unlikely(!io_assign_file(req, flags)))
7179c3ce 5970 return -EBADF;
cef216fc 5971 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
c8b5e260 5972 }
74ce6ce4 5973
aa43477b 5974 /* multishot, just fill an CQE and proceed */
cef216fc
PB
5975 if (req->cqe.res && !(req->apoll_events & EPOLLONESHOT)) {
5976 __poll_t mask = mangle_poll(req->cqe.res & req->apoll_events);
aa43477b 5977 bool filled;
18bceab1 5978
aa43477b 5979 spin_lock(&ctx->completion_lock);
cef216fc 5980 filled = io_fill_cqe_aux(ctx, req->cqe.user_data, mask,
aa43477b
PB
5981 IORING_CQE_F_MORE);
5982 io_commit_cqring(ctx);
5983 spin_unlock(&ctx->completion_lock);
5984 if (unlikely(!filled))
5985 return -ECANCELED;
5986 io_cqring_ev_posted(ctx);
cef216fc 5987 } else if (req->cqe.res) {
aa43477b
PB
5988 return 0;
5989 }
18bceab1 5990
aa43477b
PB
5991 /*
5992 * Release all references, retry if someone tried to restart
5993 * task_work while we were executing it.
5994 */
5995 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
18bceab1 5996
18bceab1
JA
5997 return 1;
5998}
5999
aa43477b 6000static void io_poll_task_func(struct io_kiocb *req, bool *locked)
18bceab1 6001{
18bceab1 6002 struct io_ring_ctx *ctx = req->ctx;
aa43477b 6003 int ret;
18bceab1 6004
5106dd6e 6005 ret = io_poll_check_events(req, *locked);
aa43477b
PB
6006 if (ret > 0)
6007 return;
6008
6009 if (!ret) {
cef216fc 6010 req->cqe.res = mangle_poll(req->cqe.res & req->poll.events);
e27414be 6011 } else {
cef216fc 6012 req->cqe.res = ret;
aa43477b 6013 req_set_fail(req);
a62682f9 6014 }
aa43477b
PB
6015
6016 io_poll_remove_entries(req);
6017 spin_lock(&ctx->completion_lock);
6018 hash_del(&req->hash_node);
cef216fc 6019 __io_req_complete_post(req, req->cqe.res, 0);
aa43477b
PB
6020 io_commit_cqring(ctx);
6021 spin_unlock(&ctx->completion_lock);
6022 io_cqring_ev_posted(ctx);
18bceab1
JA
6023}
6024
aa43477b 6025static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
18bceab1
JA
6026{
6027 struct io_ring_ctx *ctx = req->ctx;
aa43477b 6028 int ret;
18bceab1 6029
5106dd6e 6030 ret = io_poll_check_events(req, *locked);
aa43477b
PB
6031 if (ret > 0)
6032 return;
18bceab1 6033
aa43477b
PB
6034 io_poll_remove_entries(req);
6035 spin_lock(&ctx->completion_lock);
6036 hash_del(&req->hash_node);
6037 spin_unlock(&ctx->completion_lock);
18bceab1 6038
aa43477b
PB
6039 if (!ret)
6040 io_req_task_submit(req, locked);
6041 else
6042 io_req_complete_failed(req, ret);
18bceab1
JA
6043}
6044
81459350 6045static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
aa43477b 6046{
cef216fc 6047 req->cqe.res = mask;
81459350
JA
6048 /*
6049 * This is useful for poll that is armed on behalf of another
6050 * request, and where the wakeup path could be on a different
6051 * CPU. We want to avoid pulling in req->apoll->events for that
6052 * case.
6053 */
2804ecd8 6054 req->apoll_events = events;
aa43477b
PB
6055 if (req->opcode == IORING_OP_POLL_ADD)
6056 req->io_task_work.func = io_poll_task_func;
6057 else
6058 req->io_task_work.func = io_apoll_task_func;
6059
cef216fc 6060 trace_io_uring_task_add(req->ctx, req, req->cqe.user_data, req->opcode, mask);
aa43477b
PB
6061 io_req_task_work_add(req, false);
6062}
6063
81459350 6064static inline void io_poll_execute(struct io_kiocb *req, int res, int events)
aa43477b
PB
6065{
6066 if (io_poll_get_ownership(req))
81459350 6067 __io_poll_execute(req, res, events);
aa43477b
PB
6068}
6069
6070static void io_poll_cancel_req(struct io_kiocb *req)
6071{
6072 io_poll_mark_cancelled(req);
6073 /* kick tw, which should complete the request */
81459350 6074 io_poll_execute(req, 0, 0);
aa43477b
PB
6075}
6076
d89a4fac
JA
6077#define wqe_to_req(wait) ((void *)((unsigned long) (wait)->private & ~1))
6078#define wqe_is_double(wait) ((unsigned long) (wait)->private & 1)
6079
aa43477b
PB
6080static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
6081 void *key)
18bceab1 6082{
d89a4fac 6083 struct io_kiocb *req = wqe_to_req(wait);
aa43477b
PB
6084 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
6085 wait);
18bceab1
JA
6086 __poll_t mask = key_to_poll(key);
6087
791f3465
PB
6088 if (unlikely(mask & POLLFREE)) {
6089 io_poll_mark_cancelled(req);
6090 /* we have to kick tw in case it's not already */
81459350 6091 io_poll_execute(req, 0, poll->events);
791f3465
PB
6092
6093 /*
6094 * If the waitqueue is being freed early but someone is already
6095 * holds ownership over it, we have to tear down the request as
6096 * best we can. That means immediately removing the request from
6097 * its waitqueue and preventing all further accesses to the
6098 * waitqueue via the request.
6099 */
6100 list_del_init(&poll->wait.entry);
6101
6102 /*
6103 * Careful: this *must* be the last step, since as soon
6104 * as req->head is NULL'ed out, the request can be
6105 * completed and freed, since aio_poll_complete_work()
6106 * will no longer need to take the waitqueue lock.
6107 */
6108 smp_store_release(&poll->head, NULL);
6109 return 1;
6110 }
6111
aa43477b 6112 /* for instances that support it check for an event match first */
18bceab1
JA
6113 if (mask && !(mask & poll->events))
6114 return 0;
6115
eb0089d6
PB
6116 if (io_poll_get_ownership(req)) {
6117 /* optional, saves extra locking for removal in tw handler */
6118 if (mask && poll->events & EPOLLONESHOT) {
6119 list_del_init(&poll->wait.entry);
6120 poll->head = NULL;
d89a4fac
JA
6121 if (wqe_is_double(wait))
6122 req->flags &= ~REQ_F_DOUBLE_POLL;
6123 else
6124 req->flags &= ~REQ_F_SINGLE_POLL;
eb0089d6 6125 }
81459350 6126 __io_poll_execute(req, mask, poll->events);
eb0089d6 6127 }
18bceab1 6128 return 1;
18bceab1
JA
6129}
6130
6131static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
807abcb0
JA
6132 struct wait_queue_head *head,
6133 struct io_poll_iocb **poll_ptr)
18bceab1
JA
6134{
6135 struct io_kiocb *req = pt->req;
d89a4fac 6136 unsigned long wqe_private = (unsigned long) req;
18bceab1
JA
6137
6138 /*
68b11e8b
PB
6139 * The file being polled uses multiple waitqueues for poll handling
6140 * (e.g. one for read, one for write). Setup a separate io_poll_iocb
6141 * if this happens.
18bceab1 6142 */
68b11e8b 6143 if (unlikely(pt->nr_entries)) {
aa43477b 6144 struct io_poll_iocb *first = poll;
58852d4d 6145
23a65db8 6146 /* double add on the same waitqueue head, ignore */
aa43477b 6147 if (first->head == head)
23a65db8 6148 return;
18bceab1 6149 /* already have a 2nd entry, fail a third attempt */
807abcb0 6150 if (*poll_ptr) {
23a65db8
PB
6151 if ((*poll_ptr)->head == head)
6152 return;
18bceab1
JA
6153 pt->error = -EINVAL;
6154 return;
6155 }
aa43477b 6156
18bceab1
JA
6157 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
6158 if (!poll) {
6159 pt->error = -ENOMEM;
6160 return;
6161 }
d89a4fac
JA
6162 /* mark as double wq entry */
6163 wqe_private |= 1;
91eac1c6 6164 req->flags |= REQ_F_DOUBLE_POLL;
aa43477b 6165 io_init_poll_iocb(poll, first->events, first->wait.func);
807abcb0 6166 *poll_ptr = poll;
d886e185
PB
6167 if (req->opcode == IORING_OP_POLL_ADD)
6168 req->flags |= REQ_F_ASYNC_DATA;
18bceab1
JA
6169 }
6170
91eac1c6 6171 req->flags |= REQ_F_SINGLE_POLL;
68b11e8b 6172 pt->nr_entries++;
18bceab1 6173 poll->head = head;
d89a4fac 6174 poll->wait.private = (void *) wqe_private;
a31eb4a2
JX
6175
6176 if (poll->events & EPOLLEXCLUSIVE)
6177 add_wait_queue_exclusive(head, &poll->wait);
6178 else
6179 add_wait_queue(head, &poll->wait);
18bceab1
JA
6180}
6181
aa43477b 6182static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
18bceab1
JA
6183 struct poll_table_struct *p)
6184{
6185 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
d7718a9d 6186
aa43477b
PB
6187 __io_queue_proc(&pt->req->poll, pt, head,
6188 (struct io_poll_iocb **) &pt->req->async_data);
d7718a9d
JA
6189}
6190
aa43477b
PB
6191static int __io_arm_poll_handler(struct io_kiocb *req,
6192 struct io_poll_iocb *poll,
6193 struct io_poll_table *ipt, __poll_t mask)
d7718a9d
JA
6194{
6195 struct io_ring_ctx *ctx = req->ctx;
aa43477b 6196 int v;
d7718a9d 6197
4d52f338 6198 INIT_HLIST_NODE(&req->hash_node);
8e29da69 6199 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
aa43477b 6200 io_init_poll_iocb(poll, mask, io_poll_wake);
b90cd197 6201 poll->file = req->file;
d7718a9d
JA
6202
6203 ipt->pt._key = mask;
6204 ipt->req = req;
68b11e8b
PB
6205 ipt->error = 0;
6206 ipt->nr_entries = 0;
d7718a9d 6207
aa43477b
PB
6208 /*
6209 * Take the ownership to delay any tw execution up until we're done
6210 * with poll arming. see io_poll_get_ownership().
6211 */
6212 atomic_set(&req->poll_refs, 1);
d7718a9d 6213 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
aa43477b
PB
6214
6215 if (mask && (poll->events & EPOLLONESHOT)) {
6216 io_poll_remove_entries(req);
6217 /* no one else has access to the req, forget about the ref */
6218 return mask;
6219 }
6220 if (!mask && unlikely(ipt->error || !ipt->nr_entries)) {
6221 io_poll_remove_entries(req);
6222 if (!ipt->error)
6223 ipt->error = -EINVAL;
6224 return 0;
6225 }
d7718a9d 6226
79ebeaee 6227 spin_lock(&ctx->completion_lock);
aa43477b
PB
6228 io_poll_req_insert(req);
6229 spin_unlock(&ctx->completion_lock);
6230
6231 if (mask) {
6232 /* can't multishot if failed, just queue the event we've got */
6233 if (unlikely(ipt->error || !ipt->nr_entries))
6234 poll->events |= EPOLLONESHOT;
81459350 6235 __io_poll_execute(req, mask, poll->events);
aa43477b 6236 return 0;
d7718a9d
JA
6237 }
6238
aa43477b
PB
6239 /*
6240 * Release ownership. If someone tried to queue a tw while it was
6241 * locked, kick it off for them.
6242 */
6243 v = atomic_dec_return(&req->poll_refs);
6244 if (unlikely(v & IO_POLL_REF_MASK))
81459350 6245 __io_poll_execute(req, 0, poll->events);
aa43477b
PB
6246 return 0;
6247}
6248
6249static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
6250 struct poll_table_struct *p)
6251{
6252 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
6253 struct async_poll *apoll = pt->req->apoll;
6254
6255 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
d7718a9d
JA
6256}
6257
59b735ae
OL
6258enum {
6259 IO_APOLL_OK,
6260 IO_APOLL_ABORTED,
6261 IO_APOLL_READY
6262};
6263
4d9237e3 6264static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
d7718a9d
JA
6265{
6266 const struct io_op_def *def = &io_op_defs[req->opcode];
6267 struct io_ring_ctx *ctx = req->ctx;
6268 struct async_poll *apoll;
6269 struct io_poll_table ipt;
aa43477b
PB
6270 __poll_t mask = EPOLLONESHOT | POLLERR | POLLPRI;
6271 int ret;
d7718a9d 6272
b2d9c3da
PB
6273 if (!def->pollin && !def->pollout)
6274 return IO_APOLL_ABORTED;
10c87333
JA
6275 if (!file_can_poll(req->file))
6276 return IO_APOLL_ABORTED;
6277 if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
658d0a40 6278 return IO_APOLL_ABORTED;
b2d9c3da
PB
6279
6280 if (def->pollin) {
b2d9c3da
PB
6281 mask |= POLLIN | POLLRDNORM;
6282
6283 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
6284 if ((req->opcode == IORING_OP_RECVMSG) &&
6285 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
6286 mask &= ~POLLIN;
6287 } else {
b2d9c3da
PB
6288 mask |= POLLOUT | POLLWRNORM;
6289 }
52dd8640
DY
6290 if (def->poll_exclusive)
6291 mask |= EPOLLEXCLUSIVE;
10c87333
JA
6292 if (req->flags & REQ_F_POLLED) {
6293 apoll = req->apoll;
6294 } else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
6295 !list_empty(&ctx->apoll_cache)) {
4d9237e3
JA
6296 apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
6297 poll.wait.entry);
6298 list_del_init(&apoll->poll.wait.entry);
6299 } else {
6300 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
6301 if (unlikely(!apoll))
6302 return IO_APOLL_ABORTED;
6303 }
807abcb0 6304 apoll->double_poll = NULL;
d7718a9d 6305 req->apoll = apoll;
b2d9c3da 6306 req->flags |= REQ_F_POLLED;
d7718a9d
JA
6307 ipt.pt._qproc = io_async_queue_proc;
6308
4d55f238 6309 io_kbuf_recycle(req, issue_flags);
abdad709 6310
aa43477b 6311 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
41a5169c
HX
6312 if (ret || ipt.error)
6313 return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
6314
cef216fc 6315 trace_io_uring_poll_arm(ctx, req, req->cqe.user_data, req->opcode,
236daeae 6316 mask, apoll->poll.events);
59b735ae 6317 return IO_APOLL_OK;
d7718a9d
JA
6318}
6319
76e1b642
JA
6320/*
6321 * Returns true if we found and killed one or more poll requests
6322 */
c072481d
PB
6323static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
6324 struct task_struct *tsk, bool cancel_all)
221c5eb2 6325{
78076bb6 6326 struct hlist_node *tmp;
221c5eb2 6327 struct io_kiocb *req;
aa43477b
PB
6328 bool found = false;
6329 int i;
221c5eb2 6330
79ebeaee 6331 spin_lock(&ctx->completion_lock);
78076bb6
JA
6332 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
6333 struct hlist_head *list;
6334
6335 list = &ctx->cancel_hash[i];
f3606e3a 6336 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
42a7b4ed 6337 if (io_match_task_safe(req, tsk, cancel_all)) {
61bc84c4 6338 hlist_del_init(&req->hash_node);
aa43477b
PB
6339 io_poll_cancel_req(req);
6340 found = true;
6341 }
f3606e3a 6342 }
221c5eb2 6343 }
79ebeaee 6344 spin_unlock(&ctx->completion_lock);
aa43477b 6345 return found;
221c5eb2
JA
6346}
6347
b21432b4
JA
6348static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
6349 struct io_cancel_data *cd)
e07785b0 6350 __must_hold(&ctx->completion_lock)
47f46768 6351{
78076bb6 6352 struct hlist_head *list;
47f46768
JA
6353 struct io_kiocb *req;
6354
b21432b4 6355 list = &ctx->cancel_hash[hash_long(cd->data, ctx->cancel_hash_bits)];
78076bb6 6356 hlist_for_each_entry(req, list, hash_node) {
b21432b4 6357 if (cd->data != req->cqe.user_data)
b41e9852 6358 continue;
9ba5fac8
PB
6359 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
6360 continue;
8e29da69
JA
6361 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
6362 if (cd->seq == req->work.cancel_seq)
6363 continue;
6364 req->work.cancel_seq = cd->seq;
6365 }
b2cb805f 6366 return req;
47f46768 6367 }
b2cb805f
JA
6368 return NULL;
6369}
6370
4bf94615
JA
6371static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
6372 struct io_cancel_data *cd)
6373 __must_hold(&ctx->completion_lock)
6374{
6375 struct io_kiocb *req;
6376 int i;
6377
6378 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
6379 struct hlist_head *list;
6380
6381 list = &ctx->cancel_hash[i];
6382 hlist_for_each_entry(req, list, hash_node) {
970f256e
JA
6383 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
6384 req->file != cd->file)
4bf94615
JA
6385 continue;
6386 if (cd->seq == req->work.cancel_seq)
6387 continue;
6388 req->work.cancel_seq = cd->seq;
6389 return req;
6390 }
6391 }
6392 return NULL;
6393}
6394
aa43477b
PB
6395static bool io_poll_disarm(struct io_kiocb *req)
6396 __must_hold(&ctx->completion_lock)
6397{
6398 if (!io_poll_get_ownership(req))
6399 return false;
6400 io_poll_remove_entries(req);
6401 hash_del(&req->hash_node);
6402 return true;
6403}
6404
b21432b4 6405static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
e07785b0 6406 __must_hold(&ctx->completion_lock)
b2cb805f 6407{
4bf94615 6408 struct io_kiocb *req;
b2cb805f 6409
970f256e 6410 if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
4bf94615
JA
6411 req = io_poll_file_find(ctx, cd);
6412 else
6413 req = io_poll_find(ctx, false, cd);
b2cb805f
JA
6414 if (!req)
6415 return -ENOENT;
aa43477b
PB
6416 io_poll_cancel_req(req);
6417 return 0;
47f46768
JA
6418}
6419
9096af3e
PB
6420static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
6421 unsigned int flags)
6422{
6423 u32 events;
47f46768 6424
9096af3e
PB
6425 events = READ_ONCE(sqe->poll32_events);
6426#ifdef __BIG_ENDIAN
6427 events = swahw32(events);
6428#endif
6429 if (!(flags & IORING_POLL_ADD_MULTI))
6430 events |= EPOLLONESHOT;
6431 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
47f46768
JA
6432}
6433
c5de0036 6434static int io_poll_update_prep(struct io_kiocb *req,
3529d8c2 6435 const struct io_uring_sqe *sqe)
0969e783 6436{
c5de0036
PB
6437 struct io_poll_update *upd = &req->poll_update;
6438 u32 flags;
6439
0969e783
JA
6440 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6441 return -EINVAL;
26578cda 6442 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
c5de0036
PB
6443 return -EINVAL;
6444 flags = READ_ONCE(sqe->len);
6445 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
6446 IORING_POLL_ADD_MULTI))
6447 return -EINVAL;
6448 /* meaningless without update */
6449 if (flags == IORING_POLL_ADD_MULTI)
0969e783
JA
6450 return -EINVAL;
6451
c5de0036
PB
6452 upd->old_user_data = READ_ONCE(sqe->addr);
6453 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
6454 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
221c5eb2 6455
c5de0036
PB
6456 upd->new_user_data = READ_ONCE(sqe->off);
6457 if (!upd->update_user_data && upd->new_user_data)
6458 return -EINVAL;
6459 if (upd->update_events)
6460 upd->events = io_poll_parse_events(sqe, flags);
6461 else if (sqe->poll32_events)
6462 return -EINVAL;
221c5eb2 6463
221c5eb2
JA
6464 return 0;
6465}
6466
3529d8c2 6467static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
6468{
6469 struct io_poll_iocb *poll = &req->poll;
c5de0036 6470 u32 flags;
221c5eb2
JA
6471
6472 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6473 return -EINVAL;
c5de0036 6474 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
88e41cf9
JA
6475 return -EINVAL;
6476 flags = READ_ONCE(sqe->len);
c5de0036 6477 if (flags & ~IORING_POLL_ADD_MULTI)
221c5eb2 6478 return -EINVAL;
04c76b41
PB
6479 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
6480 return -EINVAL;
221c5eb2 6481
48dcd38d 6482 io_req_set_refcount(req);
2804ecd8 6483 req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
0969e783
JA
6484 return 0;
6485}
6486
61e98203 6487static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
0969e783
JA
6488{
6489 struct io_poll_iocb *poll = &req->poll;
0969e783 6490 struct io_poll_table ipt;
aa43477b 6491 int ret;
0969e783 6492
d7718a9d 6493 ipt.pt._qproc = io_poll_queue_proc;
36703247 6494
aa43477b
PB
6495 ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
6496 ret = ret ?: ipt.error;
6497 if (ret)
6498 __io_req_complete(req, issue_flags, ret, 0);
6499 return 0;
221c5eb2
JA
6500}
6501
c5de0036 6502static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
b69de288 6503{
b21432b4 6504 struct io_cancel_data cd = { .data = req->poll_update.old_user_data, };
b69de288
JA
6505 struct io_ring_ctx *ctx = req->ctx;
6506 struct io_kiocb *preq;
2bbb146d 6507 int ret2, ret = 0;
cc8e9ba7 6508 bool locked;
b69de288 6509
79ebeaee 6510 spin_lock(&ctx->completion_lock);
b21432b4 6511 preq = io_poll_find(ctx, true, &cd);
aa43477b 6512 if (!preq || !io_poll_disarm(preq)) {
79ebeaee 6513 spin_unlock(&ctx->completion_lock);
aa43477b 6514 ret = preq ? -EALREADY : -ENOENT;
2bbb146d 6515 goto out;
b69de288 6516 }
79ebeaee 6517 spin_unlock(&ctx->completion_lock);
cb3b200e 6518
2bbb146d
PB
6519 if (req->poll_update.update_events || req->poll_update.update_user_data) {
6520 /* only mask one event flags, keep behavior flags */
6521 if (req->poll_update.update_events) {
6522 preq->poll.events &= ~0xffff;
6523 preq->poll.events |= req->poll_update.events & 0xffff;
6524 preq->poll.events |= IO_POLL_UNMASK;
cb3b200e 6525 }
2bbb146d 6526 if (req->poll_update.update_user_data)
cef216fc 6527 preq->cqe.user_data = req->poll_update.new_user_data;
b69de288 6528
2bbb146d
PB
6529 ret2 = io_poll_add(preq, issue_flags);
6530 /* successfully updated, don't complete poll request */
6531 if (!ret2)
6532 goto out;
b69de288 6533 }
6224590d 6534
2bbb146d 6535 req_set_fail(preq);
cef216fc 6536 preq->cqe.res = -ECANCELED;
cc8e9ba7
PB
6537 locked = !(issue_flags & IO_URING_F_UNLOCKED);
6538 io_req_task_complete(preq, &locked);
2bbb146d
PB
6539out:
6540 if (ret < 0)
6224590d 6541 req_set_fail(req);
2bbb146d 6542 /* complete update request, we're done with it */
cc8e9ba7 6543 __io_req_complete(req, issue_flags, ret, 0);
b69de288 6544 return 0;
89850fce
JA
6545}
6546
5262f567
JA
6547static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
6548{
ad8a48ac
JA
6549 struct io_timeout_data *data = container_of(timer,
6550 struct io_timeout_data, timer);
6551 struct io_kiocb *req = data->req;
6552 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
6553 unsigned long flags;
6554
89850fce 6555 spin_lock_irqsave(&ctx->timeout_lock, flags);
a71976f3 6556 list_del_init(&req->timeout.list);
01cec8c1
PB
6557 atomic_set(&req->ctx->cq_timeouts,
6558 atomic_read(&req->ctx->cq_timeouts) + 1);
89850fce 6559 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
01cec8c1 6560
a90c8bf6
PB
6561 if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
6562 req_set_fail(req);
6563
cef216fc 6564 req->cqe.res = -ETIME;
a90c8bf6 6565 req->io_task_work.func = io_req_task_complete;
4813c377 6566 io_req_task_work_add(req, false);
5262f567
JA
6567 return HRTIMER_NORESTART;
6568}
6569
fbd15848 6570static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
b21432b4 6571 struct io_cancel_data *cd)
89850fce 6572 __must_hold(&ctx->timeout_lock)
f254ac04 6573{
fbd15848 6574 struct io_timeout_data *io;
47f46768 6575 struct io_kiocb *req;
fd9c7bc5 6576 bool found = false;
f254ac04 6577
135fcde8 6578 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
970f256e
JA
6579 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
6580 cd->data != req->cqe.user_data)
8e29da69 6581 continue;
970f256e 6582 if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
8e29da69
JA
6583 if (cd->seq == req->work.cancel_seq)
6584 continue;
6585 req->work.cancel_seq = cd->seq;
6586 }
6587 found = true;
6588 break;
47f46768 6589 }
fd9c7bc5
PB
6590 if (!found)
6591 return ERR_PTR(-ENOENT);
fbd15848
PB
6592
6593 io = req->async_data;
fd9c7bc5 6594 if (hrtimer_try_to_cancel(&io->timer) == -1)
fbd15848 6595 return ERR_PTR(-EALREADY);
a71976f3 6596 list_del_init(&req->timeout.list);
fbd15848
PB
6597 return req;
6598}
47f46768 6599
b21432b4 6600static int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
ec3c3d0f 6601 __must_hold(&ctx->completion_lock)
fbd15848 6602{
3645c200
PB
6603 struct io_kiocb *req;
6604
6605 spin_lock_irq(&ctx->timeout_lock);
b21432b4 6606 req = io_timeout_extract(ctx, cd);
3645c200 6607 spin_unlock_irq(&ctx->timeout_lock);
fbd15848
PB
6608
6609 if (IS_ERR(req))
6610 return PTR_ERR(req);
6695490d 6611 io_req_task_queue_fail(req, -ECANCELED);
f254ac04
JA
6612 return 0;
6613}
6614
50c1df2b
JA
6615static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
6616{
6617 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
6618 case IORING_TIMEOUT_BOOTTIME:
6619 return CLOCK_BOOTTIME;
6620 case IORING_TIMEOUT_REALTIME:
6621 return CLOCK_REALTIME;
6622 default:
6623 /* can't happen, vetted at prep time */
6624 WARN_ON_ONCE(1);
6625 fallthrough;
6626 case 0:
6627 return CLOCK_MONOTONIC;
6628 }
6629}
6630
f1042b6c
PB
6631static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
6632 struct timespec64 *ts, enum hrtimer_mode mode)
6633 __must_hold(&ctx->timeout_lock)
6634{
6635 struct io_timeout_data *io;
6636 struct io_kiocb *req;
6637 bool found = false;
6638
6639 list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) {
cef216fc 6640 found = user_data == req->cqe.user_data;
f1042b6c
PB
6641 if (found)
6642 break;
6643 }
6644 if (!found)
6645 return -ENOENT;
6646
6647 io = req->async_data;
6648 if (hrtimer_try_to_cancel(&io->timer) == -1)
6649 return -EALREADY;
6650 hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
6651 io->timer.function = io_link_timeout_fn;
6652 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
6653 return 0;
6654}
6655
9c8e11b3
PB
6656static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
6657 struct timespec64 *ts, enum hrtimer_mode mode)
89850fce 6658 __must_hold(&ctx->timeout_lock)
47f46768 6659{
b21432b4
JA
6660 struct io_cancel_data cd = { .data = user_data, };
6661 struct io_kiocb *req = io_timeout_extract(ctx, &cd);
9c8e11b3 6662 struct io_timeout_data *data;
47f46768 6663
9c8e11b3
PB
6664 if (IS_ERR(req))
6665 return PTR_ERR(req);
47f46768 6666
9c8e11b3
PB
6667 req->timeout.off = 0; /* noseq */
6668 data = req->async_data;
6669 list_add_tail(&req->timeout.list, &ctx->timeout_list);
50c1df2b 6670 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
9c8e11b3
PB
6671 data->timer.function = io_timeout_fn;
6672 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
6673 return 0;
47f46768
JA
6674}
6675
3529d8c2
JA
6676static int io_timeout_remove_prep(struct io_kiocb *req,
6677 const struct io_uring_sqe *sqe)
b29472ee 6678{
9c8e11b3
PB
6679 struct io_timeout_rem *tr = &req->timeout_rem;
6680
b29472ee
JA
6681 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6682 return -EINVAL;
61710e43
DA
6683 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6684 return -EINVAL;
26578cda 6685 if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
b29472ee
JA
6686 return -EINVAL;
6687
f1042b6c 6688 tr->ltimeout = false;
9c8e11b3
PB
6689 tr->addr = READ_ONCE(sqe->addr);
6690 tr->flags = READ_ONCE(sqe->timeout_flags);
f1042b6c
PB
6691 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
6692 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
6693 return -EINVAL;
6694 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
6695 tr->ltimeout = true;
6696 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
9c8e11b3
PB
6697 return -EINVAL;
6698 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
6699 return -EFAULT;
2087009c
YB
6700 if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0)
6701 return -EINVAL;
9c8e11b3
PB
6702 } else if (tr->flags) {
6703 /* timeout removal doesn't support flags */
b29472ee 6704 return -EINVAL;
9c8e11b3 6705 }
b29472ee 6706
b29472ee
JA
6707 return 0;
6708}
6709
8662daec
PB
6710static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
6711{
6712 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
6713 : HRTIMER_MODE_REL;
6714}
6715
11365043
JA
6716/*
6717 * Remove or update an existing timeout command
6718 */
61e98203 6719static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
11365043 6720{
9c8e11b3 6721 struct io_timeout_rem *tr = &req->timeout_rem;
11365043 6722 struct io_ring_ctx *ctx = req->ctx;
47f46768 6723 int ret;
11365043 6724
ec3c3d0f 6725 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
b21432b4
JA
6726 struct io_cancel_data cd = { .data = tr->addr, };
6727
ec3c3d0f 6728 spin_lock(&ctx->completion_lock);
b21432b4 6729 ret = io_timeout_cancel(ctx, &cd);
ec3c3d0f
PB
6730 spin_unlock(&ctx->completion_lock);
6731 } else {
f1042b6c
PB
6732 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
6733
ec3c3d0f 6734 spin_lock_irq(&ctx->timeout_lock);
f1042b6c
PB
6735 if (tr->ltimeout)
6736 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
6737 else
6738 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
ec3c3d0f
PB
6739 spin_unlock_irq(&ctx->timeout_lock);
6740 }
11365043 6741
4e88d6e7 6742 if (ret < 0)
93d2bcd2 6743 req_set_fail(req);
505657bc 6744 io_req_complete_post(req, ret, 0);
11365043 6745 return 0;
5262f567
JA
6746}
6747
3529d8c2 6748static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2d28390a 6749 bool is_timeout_link)
5262f567 6750{
ad8a48ac 6751 struct io_timeout_data *data;
a41525ab 6752 unsigned flags;
56080b02 6753 u32 off = READ_ONCE(sqe->off);
5262f567 6754
ad8a48ac 6755 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5262f567 6756 return -EINVAL;
26578cda
PB
6757 if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
6758 sqe->splice_fd_in)
a41525ab 6759 return -EINVAL;
56080b02 6760 if (off && is_timeout_link)
2d28390a 6761 return -EINVAL;
a41525ab 6762 flags = READ_ONCE(sqe->timeout_flags);
6224590d
PB
6763 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK |
6764 IORING_TIMEOUT_ETIME_SUCCESS))
50c1df2b
JA
6765 return -EINVAL;
6766 /* more than one clock specified is invalid, obviously */
6767 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
5262f567 6768 return -EINVAL;
bdf20073 6769
ef9dd637 6770 INIT_LIST_HEAD(&req->timeout.list);
bfe68a22 6771 req->timeout.off = off;
f18ee4cf
PB
6772 if (unlikely(off && !req->ctx->off_timeout_used))
6773 req->ctx->off_timeout_used = true;
26a61679 6774
d6a644a7
PB
6775 if (WARN_ON_ONCE(req_has_async_data(req)))
6776 return -EFAULT;
6777 if (io_alloc_async_data(req))
26a61679
JA
6778 return -ENOMEM;
6779
e8c2bc1f 6780 data = req->async_data;
ad8a48ac 6781 data->req = req;
50c1df2b 6782 data->flags = flags;
ad8a48ac
JA
6783
6784 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
6785 return -EFAULT;
6786
f6223ff7
YB
6787 if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
6788 return -EINVAL;
6789
e677edbc 6790 INIT_LIST_HEAD(&req->timeout.list);
8662daec 6791 data->mode = io_translate_timeout_mode(flags);
50c1df2b 6792 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
b97e736a
PB
6793
6794 if (is_timeout_link) {
6795 struct io_submit_link *link = &req->ctx->submit_state.link;
6796
6797 if (!link->head)
6798 return -EINVAL;
6799 if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
6800 return -EINVAL;
4d13d1a4
PB
6801 req->timeout.head = link->last;
6802 link->last->flags |= REQ_F_ARM_LTIMEOUT;
b97e736a 6803 }
ad8a48ac
JA
6804 return 0;
6805}
6806
61e98203 6807static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
ad8a48ac 6808{
ad8a48ac 6809 struct io_ring_ctx *ctx = req->ctx;
e8c2bc1f 6810 struct io_timeout_data *data = req->async_data;
ad8a48ac 6811 struct list_head *entry;
bfe68a22 6812 u32 tail, off = req->timeout.off;
ad8a48ac 6813
89850fce 6814 spin_lock_irq(&ctx->timeout_lock);
93bd25bb 6815
5262f567
JA
6816 /*
6817 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
6818 * timeout event to be satisfied. If it isn't set, then this is
6819 * a pure timeout request, sequence isn't used.
5262f567 6820 */
8eb7e2d0 6821 if (io_is_timeout_noseq(req)) {
93bd25bb
JA
6822 entry = ctx->timeout_list.prev;
6823 goto add;
6824 }
5262f567 6825
bfe68a22
PB
6826 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
6827 req->timeout.target_seq = tail + off;
5262f567 6828
f010505b
MDG
6829 /* Update the last seq here in case io_flush_timeouts() hasn't.
6830 * This is safe because ->completion_lock is held, and submissions
6831 * and completions are never mixed in the same ->completion_lock section.
6832 */
6833 ctx->cq_last_tm_flush = tail;
6834
5262f567
JA
6835 /*
6836 * Insertion sort, ensuring the first entry in the list is always
6837 * the one we need first.
6838 */
5262f567 6839 list_for_each_prev(entry, &ctx->timeout_list) {
135fcde8
PB
6840 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
6841 timeout.list);
5262f567 6842
8eb7e2d0 6843 if (io_is_timeout_noseq(nxt))
93bd25bb 6844 continue;
bfe68a22
PB
6845 /* nxt.seq is behind @tail, otherwise would've been completed */
6846 if (off >= nxt->timeout.target_seq - tail)
5262f567
JA
6847 break;
6848 }
93bd25bb 6849add:
135fcde8 6850 list_add(&req->timeout.list, entry);
ad8a48ac
JA
6851 data->timer.function = io_timeout_fn;
6852 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
89850fce 6853 spin_unlock_irq(&ctx->timeout_lock);
5262f567
JA
6854 return 0;
6855}
5262f567 6856
62755e35
JA
6857static bool io_cancel_cb(struct io_wq_work *work, void *data)
6858{
6859 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
f458dd84 6860 struct io_cancel_data *cd = data;
62755e35 6861
8e29da69
JA
6862 if (req->ctx != cd->ctx)
6863 return false;
970f256e
JA
6864 if (cd->flags & IORING_ASYNC_CANCEL_ANY) {
6865 ;
6866 } else if (cd->flags & IORING_ASYNC_CANCEL_FD) {
4bf94615
JA
6867 if (req->file != cd->file)
6868 return false;
6869 } else {
6870 if (req->cqe.user_data != cd->data)
6871 return false;
6872 }
970f256e 6873 if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
8e29da69
JA
6874 if (cd->seq == req->work.cancel_seq)
6875 return false;
6876 req->work.cancel_seq = cd->seq;
6877 }
6878 return true;
62755e35
JA
6879}
6880
b21432b4
JA
6881static int io_async_cancel_one(struct io_uring_task *tctx,
6882 struct io_cancel_data *cd)
62755e35 6883{
62755e35 6884 enum io_wq_cancel cancel_ret;
62755e35 6885 int ret = 0;
970f256e 6886 bool all;
62755e35 6887
f458dd84 6888 if (!tctx || !tctx->io_wq)
5aa75ed5
JA
6889 return -ENOENT;
6890
970f256e
JA
6891 all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
6892 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
62755e35
JA
6893 switch (cancel_ret) {
6894 case IO_WQ_CANCEL_OK:
6895 ret = 0;
6896 break;
6897 case IO_WQ_CANCEL_RUNNING:
6898 ret = -EALREADY;
6899 break;
6900 case IO_WQ_CANCEL_NOTFOUND:
6901 ret = -ENOENT;
6902 break;
6903 }
6904
e977d6d3
JA
6905 return ret;
6906}
6907
b21432b4 6908static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd)
47f46768 6909{
8cb01fac 6910 struct io_ring_ctx *ctx = req->ctx;
47f46768
JA
6911 int ret;
6912
dadebc35 6913 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
8cb01fac 6914
b21432b4 6915 ret = io_async_cancel_one(req->task->io_uring, cd);
ccbf7261
JA
6916 /*
6917 * Fall-through even for -EALREADY, as we may have poll armed
6918 * that need unarming.
6919 */
6920 if (!ret)
6921 return 0;
505657bc
PB
6922
6923 spin_lock(&ctx->completion_lock);
b21432b4 6924 ret = io_poll_cancel(ctx, cd);
ccbf7261
JA
6925 if (ret != -ENOENT)
6926 goto out;
4bf94615
JA
6927 if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
6928 ret = io_timeout_cancel(ctx, cd);
505657bc
PB
6929out:
6930 spin_unlock(&ctx->completion_lock);
6931 return ret;
47f46768
JA
6932}
6933
970f256e
JA
6934#define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
6935 IORING_ASYNC_CANCEL_ANY)
6936
3529d8c2
JA
6937static int io_async_cancel_prep(struct io_kiocb *req,
6938 const struct io_uring_sqe *sqe)
e977d6d3 6939{
fbf23849 6940 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
e977d6d3 6941 return -EINVAL;
4bf94615 6942 if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
61710e43 6943 return -EINVAL;
8e29da69 6944 if (sqe->ioprio || sqe->off || sqe->len || sqe->splice_fd_in)
e977d6d3
JA
6945 return -EINVAL;
6946
fbf23849 6947 req->cancel.addr = READ_ONCE(sqe->addr);
8e29da69 6948 req->cancel.flags = READ_ONCE(sqe->cancel_flags);
970f256e 6949 if (req->cancel.flags & ~CANCEL_FLAGS)
8e29da69 6950 return -EINVAL;
970f256e
JA
6951 if (req->cancel.flags & IORING_ASYNC_CANCEL_FD) {
6952 if (req->cancel.flags & IORING_ASYNC_CANCEL_ANY)
6953 return -EINVAL;
4bf94615 6954 req->cancel.fd = READ_ONCE(sqe->fd);
970f256e 6955 }
8e29da69 6956
fbf23849
JA
6957 return 0;
6958}
6959
8e29da69
JA
6960static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req,
6961 unsigned int issue_flags)
fbf23849 6962{
970f256e 6963 bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
8e29da69 6964 struct io_ring_ctx *ctx = cd->ctx;
58f99373 6965 struct io_tctx_node *node;
8e29da69 6966 int ret, nr = 0;
58f99373 6967
8e29da69
JA
6968 do {
6969 ret = io_try_cancel(req, cd);
6970 if (ret == -ENOENT)
6971 break;
970f256e 6972 if (!all)
8e29da69
JA
6973 return ret;
6974 nr++;
6975 } while (1);
58f99373
PB
6976
6977 /* slow path, try all io-wq's */
f8929630 6978 io_ring_submit_lock(ctx, issue_flags);
58f99373
PB
6979 ret = -ENOENT;
6980 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
6981 struct io_uring_task *tctx = node->task->io_uring;
fbf23849 6982
8e29da69
JA
6983 ret = io_async_cancel_one(tctx, cd);
6984 if (ret != -ENOENT) {
970f256e 6985 if (!all)
8e29da69
JA
6986 break;
6987 nr++;
6988 }
58f99373 6989 }
f8929630 6990 io_ring_submit_unlock(ctx, issue_flags);
970f256e 6991 return all ? nr : ret;
8e29da69
JA
6992}
6993
6994static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
6995{
6996 struct io_cancel_data cd = {
6997 .ctx = req->ctx,
6998 .data = req->cancel.addr,
6999 .flags = req->cancel.flags,
7000 .seq = atomic_inc_return(&req->ctx->cancel_seq),
7001 };
7002 int ret;
7003
4bf94615
JA
7004 if (cd.flags & IORING_ASYNC_CANCEL_FD) {
7005 if (req->flags & REQ_F_FIXED_FILE)
7006 req->file = io_file_get_fixed(req, req->cancel.fd,
7007 issue_flags);
7008 else
7009 req->file = io_file_get_normal(req, req->cancel.fd);
7010 if (!req->file) {
7011 ret = -EBADF;
7012 goto done;
7013 }
7014 cd.file = req->file;
7015 }
7016
8e29da69 7017 ret = __io_async_cancel(&cd, req, issue_flags);
4bf94615 7018done:
58f99373 7019 if (ret < 0)
93d2bcd2 7020 req_set_fail(req);
505657bc 7021 io_req_complete_post(req, ret, 0);
5262f567
JA
7022 return 0;
7023}
7024
269bbe5f 7025static int io_rsrc_update_prep(struct io_kiocb *req,
05f3fb3c
JA
7026 const struct io_uring_sqe *sqe)
7027{
61710e43
DA
7028 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
7029 return -EINVAL;
26578cda 7030 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
05f3fb3c
JA
7031 return -EINVAL;
7032
269bbe5f
BM
7033 req->rsrc_update.offset = READ_ONCE(sqe->off);
7034 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
7035 if (!req->rsrc_update.nr_args)
05f3fb3c 7036 return -EINVAL;
269bbe5f 7037 req->rsrc_update.arg = READ_ONCE(sqe->addr);
05f3fb3c
JA
7038 return 0;
7039}
7040
889fca73 7041static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
fbf23849
JA
7042{
7043 struct io_ring_ctx *ctx = req->ctx;
c3bdad02 7044 struct io_uring_rsrc_update2 up;
05f3fb3c 7045 int ret;
fbf23849 7046
269bbe5f
BM
7047 up.offset = req->rsrc_update.offset;
7048 up.data = req->rsrc_update.arg;
c3bdad02
PB
7049 up.nr = 0;
7050 up.tags = 0;
615cee49 7051 up.resv = 0;
d8a3ba9c 7052 up.resv2 = 0;
05f3fb3c 7053
f8929630 7054 io_ring_submit_lock(ctx, issue_flags);
fdecb662 7055 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
98f0b3b4 7056 &up, req->rsrc_update.nr_args);
f8929630 7057 io_ring_submit_unlock(ctx, issue_flags);
05f3fb3c
JA
7058
7059 if (ret < 0)
93d2bcd2 7060 req_set_fail(req);
889fca73 7061 __io_req_complete(req, issue_flags, ret, 0);
5262f567
JA
7062 return 0;
7063}
7064
bfe76559 7065static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1 7066{
d625c6ee 7067 switch (req->opcode) {
e781573e 7068 case IORING_OP_NOP:
bfe76559 7069 return 0;
f67676d1
JA
7070 case IORING_OP_READV:
7071 case IORING_OP_READ_FIXED:
3a6820f2 7072 case IORING_OP_READ:
f67676d1
JA
7073 case IORING_OP_WRITEV:
7074 case IORING_OP_WRITE_FIXED:
3a6820f2 7075 case IORING_OP_WRITE:
584b0180 7076 return io_prep_rw(req, sqe);
0969e783 7077 case IORING_OP_POLL_ADD:
bfe76559 7078 return io_poll_add_prep(req, sqe);
0969e783 7079 case IORING_OP_POLL_REMOVE:
c5de0036 7080 return io_poll_update_prep(req, sqe);
8ed8d3c3 7081 case IORING_OP_FSYNC:
1155c76a 7082 return io_fsync_prep(req, sqe);
8ed8d3c3 7083 case IORING_OP_SYNC_FILE_RANGE:
1155c76a 7084 return io_sfr_prep(req, sqe);
03b1230c 7085 case IORING_OP_SENDMSG:
fddaface 7086 case IORING_OP_SEND:
bfe76559 7087 return io_sendmsg_prep(req, sqe);
03b1230c 7088 case IORING_OP_RECVMSG:
fddaface 7089 case IORING_OP_RECV:
bfe76559 7090 return io_recvmsg_prep(req, sqe);
f499a021 7091 case IORING_OP_CONNECT:
bfe76559 7092 return io_connect_prep(req, sqe);
2d28390a 7093 case IORING_OP_TIMEOUT:
bfe76559 7094 return io_timeout_prep(req, sqe, false);
b29472ee 7095 case IORING_OP_TIMEOUT_REMOVE:
bfe76559 7096 return io_timeout_remove_prep(req, sqe);
fbf23849 7097 case IORING_OP_ASYNC_CANCEL:
bfe76559 7098 return io_async_cancel_prep(req, sqe);
2d28390a 7099 case IORING_OP_LINK_TIMEOUT:
bfe76559 7100 return io_timeout_prep(req, sqe, true);
8ed8d3c3 7101 case IORING_OP_ACCEPT:
bfe76559 7102 return io_accept_prep(req, sqe);
d63d1b5e 7103 case IORING_OP_FALLOCATE:
bfe76559 7104 return io_fallocate_prep(req, sqe);
15b71abe 7105 case IORING_OP_OPENAT:
bfe76559 7106 return io_openat_prep(req, sqe);
b5dba59e 7107 case IORING_OP_CLOSE:
bfe76559 7108 return io_close_prep(req, sqe);
05f3fb3c 7109 case IORING_OP_FILES_UPDATE:
269bbe5f 7110 return io_rsrc_update_prep(req, sqe);
eddc7ef5 7111 case IORING_OP_STATX:
bfe76559 7112 return io_statx_prep(req, sqe);
4840e418 7113 case IORING_OP_FADVISE:
bfe76559 7114 return io_fadvise_prep(req, sqe);
c1ca757b 7115 case IORING_OP_MADVISE:
bfe76559 7116 return io_madvise_prep(req, sqe);
cebdb986 7117 case IORING_OP_OPENAT2:
bfe76559 7118 return io_openat2_prep(req, sqe);
3e4827b0 7119 case IORING_OP_EPOLL_CTL:
bfe76559 7120 return io_epoll_ctl_prep(req, sqe);
7d67af2c 7121 case IORING_OP_SPLICE:
bfe76559 7122 return io_splice_prep(req, sqe);
ddf0322d 7123 case IORING_OP_PROVIDE_BUFFERS:
bfe76559 7124 return io_provide_buffers_prep(req, sqe);
067524e9 7125 case IORING_OP_REMOVE_BUFFERS:
bfe76559 7126 return io_remove_buffers_prep(req, sqe);
f2a8d5c7 7127 case IORING_OP_TEE:
bfe76559 7128 return io_tee_prep(req, sqe);
36f4fa68
JA
7129 case IORING_OP_SHUTDOWN:
7130 return io_shutdown_prep(req, sqe);
80a261fd
JA
7131 case IORING_OP_RENAMEAT:
7132 return io_renameat_prep(req, sqe);
14a1143b
JA
7133 case IORING_OP_UNLINKAT:
7134 return io_unlinkat_prep(req, sqe);
e34a02dc
DK
7135 case IORING_OP_MKDIRAT:
7136 return io_mkdirat_prep(req, sqe);
7a8721f8
DK
7137 case IORING_OP_SYMLINKAT:
7138 return io_symlinkat_prep(req, sqe);
cf30da90
DK
7139 case IORING_OP_LINKAT:
7140 return io_linkat_prep(req, sqe);
4f57f06c
JA
7141 case IORING_OP_MSG_RING:
7142 return io_msg_ring_prep(req, sqe);
f67676d1
JA
7143 }
7144
bfe76559
PB
7145 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
7146 req->opcode);
bd54b6fe 7147 return -EINVAL;
bfe76559
PB
7148}
7149
93642ef8 7150static int io_req_prep_async(struct io_kiocb *req)
bfe76559 7151{
b7e298d2
PB
7152 if (!io_op_defs[req->opcode].needs_async_setup)
7153 return 0;
d886e185 7154 if (WARN_ON_ONCE(req_has_async_data(req)))
b7e298d2
PB
7155 return -EFAULT;
7156 if (io_alloc_async_data(req))
7157 return -EAGAIN;
7158
93642ef8
PB
7159 switch (req->opcode) {
7160 case IORING_OP_READV:
93642ef8
PB
7161 return io_rw_prep_async(req, READ);
7162 case IORING_OP_WRITEV:
93642ef8
PB
7163 return io_rw_prep_async(req, WRITE);
7164 case IORING_OP_SENDMSG:
93642ef8
PB
7165 return io_sendmsg_prep_async(req);
7166 case IORING_OP_RECVMSG:
93642ef8
PB
7167 return io_recvmsg_prep_async(req);
7168 case IORING_OP_CONNECT:
7169 return io_connect_prep_async(req);
7170 }
b7e298d2
PB
7171 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
7172 req->opcode);
7173 return -EFAULT;
f67676d1
JA
7174}
7175
9cf7c104
PB
7176static u32 io_get_sequence(struct io_kiocb *req)
7177{
a3dbdf54 7178 u32 seq = req->ctx->cached_sq_head;
963c6abb 7179 struct io_kiocb *cur;
9cf7c104 7180
a3dbdf54 7181 /* need original cached_sq_head, but it was increased for each req */
963c6abb 7182 io_for_each_link(cur, req)
a3dbdf54
PB
7183 seq--;
7184 return seq;
9cf7c104
PB
7185}
7186
c072481d 7187static __cold void io_drain_req(struct io_kiocb *req)
de0617e4 7188{
a197f664 7189 struct io_ring_ctx *ctx = req->ctx;
27dc8338 7190 struct io_defer_entry *de;
f67676d1 7191 int ret;
e0eb71dc 7192 u32 seq = io_get_sequence(req);
3c19966d 7193
9d858b21 7194 /* Still need defer if there is pending req in defer list. */
e302f104 7195 spin_lock(&ctx->completion_lock);
5e371265 7196 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
e302f104 7197 spin_unlock(&ctx->completion_lock);
e0eb71dc 7198queue:
10c66904 7199 ctx->drain_active = false;
e0eb71dc
PB
7200 io_req_task_queue(req);
7201 return;
10c66904 7202 }
e302f104 7203 spin_unlock(&ctx->completion_lock);
9cf7c104 7204
b7e298d2 7205 ret = io_req_prep_async(req);
e0eb71dc
PB
7206 if (ret) {
7207fail:
7208 io_req_complete_failed(req, ret);
7209 return;
7210 }
cbdcb435 7211 io_prep_async_link(req);
27dc8338 7212 de = kmalloc(sizeof(*de), GFP_KERNEL);
76cc33d7 7213 if (!de) {
1b48773f 7214 ret = -ENOMEM;
e0eb71dc 7215 goto fail;
76cc33d7 7216 }
2d28390a 7217
79ebeaee 7218 spin_lock(&ctx->completion_lock);
9cf7c104 7219 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
79ebeaee 7220 spin_unlock(&ctx->completion_lock);
27dc8338 7221 kfree(de);
e0eb71dc 7222 goto queue;
de0617e4
JA
7223 }
7224
cef216fc 7225 trace_io_uring_defer(ctx, req, req->cqe.user_data, req->opcode);
27dc8338 7226 de->req = req;
9cf7c104 7227 de->seq = seq;
27dc8338 7228 list_add_tail(&de->list, &ctx->defer_list);
79ebeaee 7229 spin_unlock(&ctx->completion_lock);
de0617e4
JA
7230}
7231
68fb8979 7232static void io_clean_op(struct io_kiocb *req)
99bc4c38 7233{
8197b053
PB
7234 if (req->flags & REQ_F_BUFFER_SELECTED) {
7235 spin_lock(&req->ctx->completion_lock);
cc3cec83 7236 io_put_kbuf_comp(req);
8197b053
PB
7237 spin_unlock(&req->ctx->completion_lock);
7238 }
99bc4c38 7239
0e1b6fe3
PB
7240 if (req->flags & REQ_F_NEED_CLEANUP) {
7241 switch (req->opcode) {
7242 case IORING_OP_READV:
7243 case IORING_OP_READ_FIXED:
7244 case IORING_OP_READ:
7245 case IORING_OP_WRITEV:
7246 case IORING_OP_WRITE_FIXED:
e8c2bc1f
JA
7247 case IORING_OP_WRITE: {
7248 struct io_async_rw *io = req->async_data;
1dacb4df
PB
7249
7250 kfree(io->free_iovec);
0e1b6fe3 7251 break;
e8c2bc1f 7252 }
0e1b6fe3 7253 case IORING_OP_RECVMSG:
e8c2bc1f
JA
7254 case IORING_OP_SENDMSG: {
7255 struct io_async_msghdr *io = req->async_data;
257e84a5
PB
7256
7257 kfree(io->free_iov);
0e1b6fe3 7258 break;
e8c2bc1f 7259 }
f3cd4850
JA
7260 case IORING_OP_OPENAT:
7261 case IORING_OP_OPENAT2:
7262 if (req->open.filename)
7263 putname(req->open.filename);
7264 break;
80a261fd
JA
7265 case IORING_OP_RENAMEAT:
7266 putname(req->rename.oldpath);
7267 putname(req->rename.newpath);
7268 break;
14a1143b
JA
7269 case IORING_OP_UNLINKAT:
7270 putname(req->unlink.filename);
7271 break;
e34a02dc
DK
7272 case IORING_OP_MKDIRAT:
7273 putname(req->mkdir.filename);
7274 break;
7a8721f8
DK
7275 case IORING_OP_SYMLINKAT:
7276 putname(req->symlink.oldpath);
7277 putname(req->symlink.newpath);
7278 break;
cf30da90
DK
7279 case IORING_OP_LINKAT:
7280 putname(req->hardlink.oldpath);
7281 putname(req->hardlink.newpath);
7282 break;
1b6fe6e0
SR
7283 case IORING_OP_STATX:
7284 if (req->statx.filename)
7285 putname(req->statx.filename);
7286 break;
0e1b6fe3 7287 }
99bc4c38 7288 }
75652a30
JA
7289 if ((req->flags & REQ_F_POLLED) && req->apoll) {
7290 kfree(req->apoll->double_poll);
7291 kfree(req->apoll);
7292 req->apoll = NULL;
7293 }
c854357b 7294 if (req->flags & REQ_F_CREDS)
b8e64b53 7295 put_cred(req->creds);
d886e185
PB
7296 if (req->flags & REQ_F_ASYNC_DATA) {
7297 kfree(req->async_data);
7298 req->async_data = NULL;
7299 }
c854357b 7300 req->flags &= ~IO_REQ_CLEAN_FLAGS;
99bc4c38
PB
7301}
7302
6bf9c47a
JA
7303static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
7304{
7305 if (req->file || !io_op_defs[req->opcode].needs_file)
7306 return true;
7307
7308 if (req->flags & REQ_F_FIXED_FILE)
cef216fc 7309 req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags);
6bf9c47a 7310 else
cef216fc 7311 req->file = io_file_get_normal(req, req->cqe.fd);
6bf9c47a 7312
772f5e00 7313 return !!req->file;
6bf9c47a
JA
7314}
7315
889fca73 7316static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1 7317{
5730b27e 7318 const struct cred *creds = NULL;
d625c6ee 7319 int ret;
2b188cc1 7320
70152140
JA
7321 if (unlikely(!io_assign_file(req, issue_flags)))
7322 return -EBADF;
7323
6878b40e 7324 if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
c10d1f98 7325 creds = override_creds(req->creds);
5730b27e 7326
5bd2182d
PM
7327 if (!io_op_defs[req->opcode].audit_skip)
7328 audit_uring_entry(req->opcode);
7329
d625c6ee 7330 switch (req->opcode) {
2b188cc1 7331 case IORING_OP_NOP:
889fca73 7332 ret = io_nop(req, issue_flags);
2b188cc1
JA
7333 break;
7334 case IORING_OP_READV:
edafccee 7335 case IORING_OP_READ_FIXED:
3a6820f2 7336 case IORING_OP_READ:
889fca73 7337 ret = io_read(req, issue_flags);
edafccee 7338 break;
3529d8c2 7339 case IORING_OP_WRITEV:
edafccee 7340 case IORING_OP_WRITE_FIXED:
3a6820f2 7341 case IORING_OP_WRITE:
889fca73 7342 ret = io_write(req, issue_flags);
2b188cc1 7343 break;
c992fe29 7344 case IORING_OP_FSYNC:
45d189c6 7345 ret = io_fsync(req, issue_flags);
c992fe29 7346 break;
221c5eb2 7347 case IORING_OP_POLL_ADD:
61e98203 7348 ret = io_poll_add(req, issue_flags);
221c5eb2
JA
7349 break;
7350 case IORING_OP_POLL_REMOVE:
c5de0036 7351 ret = io_poll_update(req, issue_flags);
221c5eb2 7352 break;
5d17b4a4 7353 case IORING_OP_SYNC_FILE_RANGE:
45d189c6 7354 ret = io_sync_file_range(req, issue_flags);
5d17b4a4 7355 break;
0fa03c62 7356 case IORING_OP_SENDMSG:
889fca73 7357 ret = io_sendmsg(req, issue_flags);
062d04d7 7358 break;
fddaface 7359 case IORING_OP_SEND:
889fca73 7360 ret = io_send(req, issue_flags);
0fa03c62 7361 break;
aa1fa28f 7362 case IORING_OP_RECVMSG:
889fca73 7363 ret = io_recvmsg(req, issue_flags);
062d04d7 7364 break;
fddaface 7365 case IORING_OP_RECV:
889fca73 7366 ret = io_recv(req, issue_flags);
aa1fa28f 7367 break;
5262f567 7368 case IORING_OP_TIMEOUT:
61e98203 7369 ret = io_timeout(req, issue_flags);
5262f567 7370 break;
11365043 7371 case IORING_OP_TIMEOUT_REMOVE:
61e98203 7372 ret = io_timeout_remove(req, issue_flags);
11365043 7373 break;
17f2fe35 7374 case IORING_OP_ACCEPT:
889fca73 7375 ret = io_accept(req, issue_flags);
17f2fe35 7376 break;
f8e85cf2 7377 case IORING_OP_CONNECT:
889fca73 7378 ret = io_connect(req, issue_flags);
f8e85cf2 7379 break;
62755e35 7380 case IORING_OP_ASYNC_CANCEL:
61e98203 7381 ret = io_async_cancel(req, issue_flags);
62755e35 7382 break;
d63d1b5e 7383 case IORING_OP_FALLOCATE:
45d189c6 7384 ret = io_fallocate(req, issue_flags);
d63d1b5e 7385 break;
15b71abe 7386 case IORING_OP_OPENAT:
45d189c6 7387 ret = io_openat(req, issue_flags);
15b71abe 7388 break;
b5dba59e 7389 case IORING_OP_CLOSE:
889fca73 7390 ret = io_close(req, issue_flags);
b5dba59e 7391 break;
05f3fb3c 7392 case IORING_OP_FILES_UPDATE:
889fca73 7393 ret = io_files_update(req, issue_flags);
05f3fb3c 7394 break;
eddc7ef5 7395 case IORING_OP_STATX:
45d189c6 7396 ret = io_statx(req, issue_flags);
eddc7ef5 7397 break;
4840e418 7398 case IORING_OP_FADVISE:
45d189c6 7399 ret = io_fadvise(req, issue_flags);
4840e418 7400 break;
c1ca757b 7401 case IORING_OP_MADVISE:
45d189c6 7402 ret = io_madvise(req, issue_flags);
c1ca757b 7403 break;
cebdb986 7404 case IORING_OP_OPENAT2:
45d189c6 7405 ret = io_openat2(req, issue_flags);
cebdb986 7406 break;
3e4827b0 7407 case IORING_OP_EPOLL_CTL:
889fca73 7408 ret = io_epoll_ctl(req, issue_flags);
3e4827b0 7409 break;
7d67af2c 7410 case IORING_OP_SPLICE:
45d189c6 7411 ret = io_splice(req, issue_flags);
7d67af2c 7412 break;
ddf0322d 7413 case IORING_OP_PROVIDE_BUFFERS:
889fca73 7414 ret = io_provide_buffers(req, issue_flags);
ddf0322d 7415 break;
067524e9 7416 case IORING_OP_REMOVE_BUFFERS:
889fca73 7417 ret = io_remove_buffers(req, issue_flags);
3e4827b0 7418 break;
f2a8d5c7 7419 case IORING_OP_TEE:
45d189c6 7420 ret = io_tee(req, issue_flags);
f2a8d5c7 7421 break;
36f4fa68 7422 case IORING_OP_SHUTDOWN:
45d189c6 7423 ret = io_shutdown(req, issue_flags);
36f4fa68 7424 break;
80a261fd 7425 case IORING_OP_RENAMEAT:
45d189c6 7426 ret = io_renameat(req, issue_flags);
80a261fd 7427 break;
14a1143b 7428 case IORING_OP_UNLINKAT:
45d189c6 7429 ret = io_unlinkat(req, issue_flags);
14a1143b 7430 break;
e34a02dc
DK
7431 case IORING_OP_MKDIRAT:
7432 ret = io_mkdirat(req, issue_flags);
7433 break;
7a8721f8
DK
7434 case IORING_OP_SYMLINKAT:
7435 ret = io_symlinkat(req, issue_flags);
7436 break;
cf30da90
DK
7437 case IORING_OP_LINKAT:
7438 ret = io_linkat(req, issue_flags);
7439 break;
4f57f06c
JA
7440 case IORING_OP_MSG_RING:
7441 ret = io_msg_ring(req, issue_flags);
7442 break;
2b188cc1
JA
7443 default:
7444 ret = -EINVAL;
7445 break;
7446 }
7447
5bd2182d
PM
7448 if (!io_op_defs[req->opcode].audit_skip)
7449 audit_uring_exit(!ret, ret);
7450
5730b27e
JA
7451 if (creds)
7452 revert_creds(creds);
def596e9
JA
7453 if (ret)
7454 return ret;
b532576e 7455 /* If the op doesn't have a file, we're not polling for it */
9983028e 7456 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file)
9882131c 7457 io_iopoll_req_issued(req, issue_flags);
def596e9
JA
7458
7459 return 0;
2b188cc1
JA
7460}
7461
ebc11b6c
PB
7462static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
7463{
7464 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7465
7466 req = io_put_req_find_next(req);
7467 return req ? &req->work : NULL;
7468}
7469
5280f7e5 7470static void io_wq_submit_work(struct io_wq_work *work)
2b188cc1
JA
7471{
7472 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6bf9c47a 7473 const struct io_op_def *def = &io_op_defs[req->opcode];
d01905db
PB
7474 unsigned int issue_flags = IO_URING_F_UNLOCKED;
7475 bool needs_poll = false;
6bf9c47a 7476 int ret = 0, err = -ECANCELED;
2b188cc1 7477
48dcd38d
PB
7478 /* one will be dropped by ->io_free_work() after returning to io-wq */
7479 if (!(req->flags & REQ_F_REFCOUNT))
7480 __io_req_set_refcount(req, 2);
7481 else
7482 req_ref_get(req);
5d5901a3 7483
cb2d344c 7484 io_arm_ltimeout(req);
6bf9c47a 7485
dadebc35 7486 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
d01905db 7487 if (work->flags & IO_WQ_WORK_CANCEL) {
0f8da75b 7488fail:
6bf9c47a 7489 io_req_task_queue_fail(req, err);
d01905db
PB
7490 return;
7491 }
0f8da75b
PB
7492 if (!io_assign_file(req, issue_flags)) {
7493 err = -EBADF;
7494 work->flags |= IO_WQ_WORK_CANCEL;
7495 goto fail;
7496 }
31b51510 7497
d01905db 7498 if (req->flags & REQ_F_FORCE_ASYNC) {
afb7f56f
PB
7499 bool opcode_poll = def->pollin || def->pollout;
7500
7501 if (opcode_poll && file_can_poll(req->file)) {
7502 needs_poll = true;
d01905db 7503 issue_flags |= IO_URING_F_NONBLOCK;
afb7f56f 7504 }
561fb04a 7505 }
31b51510 7506
d01905db
PB
7507 do {
7508 ret = io_issue_sqe(req, issue_flags);
7509 if (ret != -EAGAIN)
7510 break;
7511 /*
7512 * We can get EAGAIN for iopolled IO even though we're
7513 * forcing a sync submission from here, since we can't
7514 * wait for request slots on the block side.
7515 */
7516 if (!needs_poll) {
7517 cond_resched();
7518 continue;
90fa0288
HX
7519 }
7520
4d9237e3 7521 if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK)
d01905db
PB
7522 return;
7523 /* aborted or ready, in either case retry blocking */
7524 needs_poll = false;
7525 issue_flags &= ~IO_URING_F_NONBLOCK;
7526 } while (1);
31b51510 7527
a3df7698 7528 /* avoid locking problems by failing it from a clean context */
5d5901a3 7529 if (ret)
a3df7698 7530 io_req_task_queue_fail(req, ret);
2b188cc1
JA
7531}
7532
aeca241b 7533static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
042b0d85 7534 unsigned i)
65e19f54 7535{
042b0d85 7536 return &table->files[i];
dafecf19
PB
7537}
7538
65e19f54
JA
7539static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
7540 int index)
7541{
aeca241b 7542 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
65e19f54 7543
a04b0ac0 7544 return (struct file *) (slot->file_ptr & FFS_MASK);
65e19f54
JA
7545}
7546
a04b0ac0 7547static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
9a321c98
PB
7548{
7549 unsigned long file_ptr = (unsigned long) file;
7550
88459b50 7551 file_ptr |= io_file_get_flags(file);
a04b0ac0 7552 file_slot->file_ptr = file_ptr;
65e19f54
JA
7553}
7554
5106dd6e
JA
7555static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
7556 unsigned int issue_flags)
09bb8394 7557{
5106dd6e
JA
7558 struct io_ring_ctx *ctx = req->ctx;
7559 struct file *file = NULL;
ac177053 7560 unsigned long file_ptr;
09bb8394 7561
93f052cb 7562 io_ring_submit_lock(ctx, issue_flags);
5106dd6e 7563
ac177053 7564 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
5106dd6e 7565 goto out;
ac177053
PB
7566 fd = array_index_nospec(fd, ctx->nr_user_files);
7567 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
7568 file = (struct file *) (file_ptr & FFS_MASK);
7569 file_ptr &= ~FFS_MASK;
7570 /* mask in overlapping REQ_F and FFS bits */
35645ac3 7571 req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
5106dd6e
JA
7572 io_req_set_rsrc_node(req, ctx, 0);
7573out:
93f052cb 7574 io_ring_submit_unlock(ctx, issue_flags);
ac177053
PB
7575 return file;
7576}
d44f554e 7577
d5361233
JA
7578/*
7579 * Drop the file for requeue operations. Only used of req->file is the
7580 * io_uring descriptor itself.
7581 */
7582static void io_drop_inflight_file(struct io_kiocb *req)
7583{
7584 if (unlikely(req->flags & REQ_F_INFLIGHT)) {
7585 fput(req->file);
7586 req->file = NULL;
7587 req->flags &= ~REQ_F_INFLIGHT;
7588 }
7589}
7590
5106dd6e 7591static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
ac177053 7592{
62906e89 7593 struct file *file = fget(fd);
ac177053 7594
cef216fc 7595 trace_io_uring_file_get(req->ctx, req, req->cqe.user_data, fd);
09bb8394 7596
ac177053 7597 /* we don't allow fixed io_uring files */
d5361233
JA
7598 if (file && file->f_op == &io_uring_fops)
7599 req->flags |= REQ_F_INFLIGHT;
8371adf5 7600 return file;
09bb8394
JA
7601}
7602
f237c30a 7603static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
89b263f6
JA
7604{
7605 struct io_kiocb *prev = req->timeout.prev;
617a8948 7606 int ret = -ENOENT;
89b263f6
JA
7607
7608 if (prev) {
b21432b4
JA
7609 if (!(req->task->flags & PF_EXITING)) {
7610 struct io_cancel_data cd = {
7611 .ctx = req->ctx,
7612 .data = prev->cqe.user_data,
7613 };
7614
7615 ret = io_try_cancel(req, &cd);
7616 }
505657bc 7617 io_req_complete_post(req, ret ?: -ETIME, 0);
89b263f6 7618 io_put_req(prev);
89b263f6
JA
7619 } else {
7620 io_req_complete_post(req, -ETIME, 0);
7621 }
7622}
7623
2665abfd 7624static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 7625{
ad8a48ac
JA
7626 struct io_timeout_data *data = container_of(timer,
7627 struct io_timeout_data, timer);
90cd7e42 7628 struct io_kiocb *prev, *req = data->req;
2665abfd 7629 struct io_ring_ctx *ctx = req->ctx;
2665abfd 7630 unsigned long flags;
2665abfd 7631
89b263f6 7632 spin_lock_irqsave(&ctx->timeout_lock, flags);
90cd7e42
PB
7633 prev = req->timeout.head;
7634 req->timeout.head = NULL;
2665abfd
JA
7635
7636 /*
7637 * We don't expect the list to be empty, that will only happen if we
7638 * race with the completion of the linked work.
7639 */
447c19f3 7640 if (prev) {
f2f87370 7641 io_remove_next_linked(prev);
447c19f3
PB
7642 if (!req_ref_inc_not_zero(prev))
7643 prev = NULL;
7644 }
ef9dd637 7645 list_del(&req->timeout.list);
89b263f6
JA
7646 req->timeout.prev = prev;
7647 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
2665abfd 7648
89b263f6 7649 req->io_task_work.func = io_req_task_link_timeout;
4813c377 7650 io_req_task_work_add(req, false);
2665abfd
JA
7651 return HRTIMER_NORESTART;
7652}
7653
de968c18 7654static void io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 7655{
de968c18
PB
7656 struct io_ring_ctx *ctx = req->ctx;
7657
89b263f6 7658 spin_lock_irq(&ctx->timeout_lock);
76a46e06 7659 /*
f2f87370
PB
7660 * If the back reference is NULL, then our linked request finished
7661 * before we got a chance to setup the timer
76a46e06 7662 */
90cd7e42 7663 if (req->timeout.head) {
e8c2bc1f 7664 struct io_timeout_data *data = req->async_data;
94ae5e77 7665
ad8a48ac
JA
7666 data->timer.function = io_link_timeout_fn;
7667 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
7668 data->mode);
ef9dd637 7669 list_add_tail(&req->timeout.list, &ctx->ltimeout_list);
2665abfd 7670 }
89b263f6 7671 spin_unlock_irq(&ctx->timeout_lock);
2665abfd 7672 /* drop submission reference */
76a46e06
JA
7673 io_put_req(req);
7674}
2665abfd 7675
7bfa9bad 7676static void io_queue_async(struct io_kiocb *req, int ret)
d475a9a6
PB
7677 __must_hold(&req->ctx->uring_lock)
7678{
7bfa9bad
PB
7679 struct io_kiocb *linked_timeout;
7680
7681 if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
7682 io_req_complete_failed(req, ret);
7683 return;
7684 }
7685
7686 linked_timeout = io_prep_linked_timeout(req);
d475a9a6 7687
4d9237e3 7688 switch (io_arm_poll_handler(req, 0)) {
d475a9a6 7689 case IO_APOLL_READY:
d475a9a6
PB
7690 io_req_task_queue(req);
7691 break;
7692 case IO_APOLL_ABORTED:
7693 /*
7694 * Queued up for async execution, worker will release
7695 * submit reference when the iocb is actually submitted.
7696 */
77955efb 7697 io_queue_iowq(req, NULL);
d475a9a6 7698 break;
b1c62645 7699 case IO_APOLL_OK:
b1c62645 7700 break;
d475a9a6
PB
7701 }
7702
7703 if (linked_timeout)
7704 io_queue_linked_timeout(linked_timeout);
7705}
7706
cbc2e203 7707static inline void io_queue_sqe(struct io_kiocb *req)
282cdc86 7708 __must_hold(&req->ctx->uring_lock)
2b188cc1 7709{
e0c5c576 7710 int ret;
2b188cc1 7711
c5eef2b9 7712 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
193155c8 7713
fff4e40e
PB
7714 if (req->flags & REQ_F_COMPLETE_INLINE) {
7715 io_req_add_compl_list(req);
d9f9d284 7716 return;
fff4e40e 7717 }
491381ce
JA
7718 /*
7719 * We async punt it if the file wasn't marked NOWAIT, or if the file
7720 * doesn't support non-blocking read/write attempts
7721 */
7bfa9bad 7722 if (likely(!ret))
cb2d344c 7723 io_arm_ltimeout(req);
7bfa9bad
PB
7724 else
7725 io_queue_async(req, ret);
2b188cc1
JA
7726}
7727
4652fe3f 7728static void io_queue_sqe_fallback(struct io_kiocb *req)
282cdc86 7729 __must_hold(&req->ctx->uring_lock)
4fe2c963 7730{
17b147f6
PB
7731 if (unlikely(req->flags & REQ_F_FAIL)) {
7732 /*
7733 * We don't submit, fail them all, for that replace hardlinks
7734 * with normal links. Extra REQ_F_LINK is tolerated.
7735 */
7736 req->flags &= ~REQ_F_HARDLINK;
7737 req->flags |= REQ_F_LINK;
7738 io_req_complete_failed(req, req->cqe.res);
e0eb71dc
PB
7739 } else if (unlikely(req->ctx->drain_active)) {
7740 io_drain_req(req);
76cc33d7
PB
7741 } else {
7742 int ret = io_req_prep_async(req);
7743
7744 if (unlikely(ret))
7745 io_req_complete_failed(req, ret);
7746 else
77955efb 7747 io_queue_iowq(req, NULL);
ce35a47a 7748 }
4fe2c963
JL
7749}
7750
b16fed66
PB
7751/*
7752 * Check SQE restrictions (opcode and flags).
7753 *
7754 * Returns 'true' if SQE is allowed, 'false' otherwise.
7755 */
7756static inline bool io_check_restriction(struct io_ring_ctx *ctx,
7757 struct io_kiocb *req,
7758 unsigned int sqe_flags)
4fe2c963 7759{
b16fed66
PB
7760 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
7761 return false;
7762
7763 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
7764 ctx->restrictions.sqe_flags_required)
7765 return false;
7766
7767 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
7768 ctx->restrictions.sqe_flags_required))
7769 return false;
7770
7771 return true;
4fe2c963
JL
7772}
7773
22b2ca31
PB
7774static void io_init_req_drain(struct io_kiocb *req)
7775{
7776 struct io_ring_ctx *ctx = req->ctx;
7777 struct io_kiocb *head = ctx->submit_state.link.head;
7778
7779 ctx->drain_active = true;
7780 if (head) {
7781 /*
7782 * If we need to drain a request in the middle of a link, drain
7783 * the head request and the next request/link after the current
7784 * link. Considering sequential execution of links,
b6c7db32 7785 * REQ_F_IO_DRAIN will be maintained for every request of our
22b2ca31
PB
7786 * link.
7787 */
b6c7db32 7788 head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
22b2ca31
PB
7789 ctx->drain_next = true;
7790 }
7791}
7792
b16fed66
PB
7793static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
7794 const struct io_uring_sqe *sqe)
282cdc86 7795 __must_hold(&ctx->uring_lock)
b16fed66 7796{
b16fed66 7797 unsigned int sqe_flags;
fc0ae024 7798 int personality;
4a04d1d1 7799 u8 opcode;
b16fed66 7800
864ea921 7801 /* req is partially pre-initialised, see io_preinit_req() */
4a04d1d1 7802 req->opcode = opcode = READ_ONCE(sqe->opcode);
b16fed66
PB
7803 /* same numerical values with corresponding REQ_F_*, safe to copy */
7804 req->flags = sqe_flags = READ_ONCE(sqe->flags);
cef216fc 7805 req->cqe.user_data = READ_ONCE(sqe->user_data);
b16fed66 7806 req->file = NULL;
c1bdf8ed 7807 req->rsrc_node = NULL;
b16fed66 7808 req->task = current;
b16fed66 7809
4a04d1d1
PB
7810 if (unlikely(opcode >= IORING_OP_LAST)) {
7811 req->opcode = 0;
b16fed66 7812 return -EINVAL;
4a04d1d1 7813 }
68fe256a
PB
7814 if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
7815 /* enforce forwards compatibility on users */
7816 if (sqe_flags & ~SQE_VALID_FLAGS)
7817 return -EINVAL;
7818 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
4a04d1d1 7819 !io_op_defs[opcode].buffer_select)
68fe256a 7820 return -EOPNOTSUPP;
5562a8d7
PB
7821 if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
7822 ctx->drain_disabled = true;
7823 if (sqe_flags & IOSQE_IO_DRAIN) {
7824 if (ctx->drain_disabled)
7825 return -EOPNOTSUPP;
22b2ca31 7826 io_init_req_drain(req);
5562a8d7 7827 }
2a56a9bd
PB
7828 }
7829 if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
7830 if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
7831 return -EACCES;
7832 /* knock it to the slow queue path, will be drained there */
7833 if (ctx->drain_active)
7834 req->flags |= REQ_F_FORCE_ASYNC;
7835 /* if there is no link, we're at "next" request and need to drain */
7836 if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
7837 ctx->drain_next = false;
7838 ctx->drain_active = true;
b6c7db32 7839 req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2a56a9bd 7840 }
68fe256a 7841 }
b16fed66 7842
4a04d1d1 7843 if (io_op_defs[opcode].needs_file) {
6d63416d
PB
7844 struct io_submit_state *state = &ctx->submit_state;
7845
cef216fc 7846 req->cqe.fd = READ_ONCE(sqe->fd);
6bf9c47a 7847
6d63416d
PB
7848 /*
7849 * Plug now if we have more than 2 IO left after this, and the
7850 * target is potentially a read/write to block based storage.
7851 */
4a04d1d1 7852 if (state->need_plug && io_op_defs[opcode].plug) {
6d63416d
PB
7853 state->plug_started = true;
7854 state->need_plug = false;
5ca7a8b3 7855 blk_start_plug_nr_ios(&state->plug, state->submit_nr);
6d63416d 7856 }
b16fed66 7857 }
863e0560 7858
003e8dcc
JA
7859 personality = READ_ONCE(sqe->personality);
7860 if (personality) {
cdab10bf
LT
7861 int ret;
7862
c10d1f98
PB
7863 req->creds = xa_load(&ctx->personalities, personality);
7864 if (!req->creds)
003e8dcc 7865 return -EINVAL;
c10d1f98 7866 get_cred(req->creds);
cdc1404a
PM
7867 ret = security_uring_override_creds(req->creds);
7868 if (ret) {
7869 put_cred(req->creds);
7870 return ret;
7871 }
b8e64b53 7872 req->flags |= REQ_F_CREDS;
003e8dcc 7873 }
b16fed66 7874
fc0ae024 7875 return io_req_prep(req, sqe);
b16fed66
PB
7876}
7877
df3becde
PB
7878static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
7879 struct io_kiocb *req, int ret)
7880{
7881 struct io_ring_ctx *ctx = req->ctx;
7882 struct io_submit_link *link = &ctx->submit_state.link;
7883 struct io_kiocb *head = link->head;
7884
7885 trace_io_uring_req_failed(sqe, ctx, req, ret);
7886
7887 /*
7888 * Avoid breaking links in the middle as it renders links with SQPOLL
7889 * unusable. Instead of failing eagerly, continue assembling the link if
7890 * applicable and mark the head with REQ_F_FAIL. The link flushing code
7891 * should find the flag and handle the rest.
7892 */
7893 req_fail_link_node(req, ret);
7894 if (head && !(head->flags & REQ_F_FAIL))
7895 req_fail_link_node(head, -ECANCELED);
7896
7897 if (!(req->flags & IO_REQ_LINK_FLAGS)) {
7898 if (head) {
7899 link->last->link = req;
7900 link->head = NULL;
7901 req = head;
7902 }
7903 io_queue_sqe_fallback(req);
7904 return ret;
7905 }
7906
7907 if (head)
7908 link->last->link = req;
7909 else
7910 link->head = req;
7911 link->last = req;
7912 return 0;
7913}
7914
7915static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
a1ab7b35 7916 const struct io_uring_sqe *sqe)
282cdc86 7917 __must_hold(&ctx->uring_lock)
9e645e11 7918{
a1ab7b35 7919 struct io_submit_link *link = &ctx->submit_state.link;
ef4ff581 7920 int ret;
9e645e11 7921
a6b8cadc 7922 ret = io_init_req(ctx, req, sqe);
df3becde
PB
7923 if (unlikely(ret))
7924 return io_submit_fail_init(sqe, req, ret);
441b8a78 7925
be7053b7 7926 /* don't need @sqe from now on */
cef216fc 7927 trace_io_uring_submit_sqe(ctx, req, req->cqe.user_data, req->opcode,
236daeae
OL
7928 req->flags, true,
7929 ctx->flags & IORING_SETUP_SQPOLL);
a6b8cadc 7930
9e645e11
JA
7931 /*
7932 * If we already have a head request, queue this one for async
7933 * submittal once the head completes. If we don't have a head but
7934 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
7935 * submitted sync once the chain is complete. If none of those
7936 * conditions are true (normal request), then just queue it.
7937 */
924a07e4 7938 if (unlikely(link->head)) {
df3becde
PB
7939 ret = io_req_prep_async(req);
7940 if (unlikely(ret))
7941 return io_submit_fail_init(sqe, req, ret);
7942
7943 trace_io_uring_link(ctx, req, link->head);
f2f87370 7944 link->last->link = req;
863e0560 7945 link->last = req;
32fe525b 7946
da1a08c5 7947 if (req->flags & IO_REQ_LINK_FLAGS)
f15a3431 7948 return 0;
df3becde
PB
7949 /* last request of the link, flush it */
7950 req = link->head;
f15a3431 7951 link->head = NULL;
924a07e4
PB
7952 if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))
7953 goto fallback;
7954
7955 } else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS |
7956 REQ_F_FORCE_ASYNC | REQ_F_FAIL))) {
7957 if (req->flags & IO_REQ_LINK_FLAGS) {
7958 link->head = req;
7959 link->last = req;
7960 } else {
7961fallback:
7962 io_queue_sqe_fallback(req);
7963 }
f15a3431 7964 return 0;
9e645e11 7965 }
2e6e1fde 7966
924a07e4 7967 io_queue_sqe(req);
1d4240cc 7968 return 0;
9e645e11
JA
7969}
7970
9a56a232
JA
7971/*
7972 * Batched submission is done, ensure local IO is flushed out.
7973 */
553deffd 7974static void io_submit_state_end(struct io_ring_ctx *ctx)
9a56a232 7975{
553deffd
PB
7976 struct io_submit_state *state = &ctx->submit_state;
7977
e126391c
PB
7978 if (unlikely(state->link.head))
7979 io_queue_sqe_fallback(state->link.head);
553deffd 7980 /* flush only after queuing links as they can generate completions */
c450178d 7981 io_submit_flush_completions(ctx);
27926b68
JA
7982 if (state->plug_started)
7983 blk_finish_plug(&state->plug);
9a56a232
JA
7984}
7985
7986/*
7987 * Start submission side cache.
7988 */
7989static void io_submit_state_start(struct io_submit_state *state,
ba88ff11 7990 unsigned int max_ios)
9a56a232 7991{
27926b68 7992 state->plug_started = false;
4b628aeb 7993 state->need_plug = max_ios > 2;
5ca7a8b3 7994 state->submit_nr = max_ios;
a1ab7b35
PB
7995 /* set only head, no need to init link_last in advance */
7996 state->link.head = NULL;
9a56a232
JA
7997}
7998
2b188cc1
JA
7999static void io_commit_sqring(struct io_ring_ctx *ctx)
8000{
75b28aff 8001 struct io_rings *rings = ctx->rings;
2b188cc1 8002
caf582c6
PB
8003 /*
8004 * Ensure any loads from the SQEs are done at this point,
8005 * since once we write the new head, the application could
8006 * write new data to them.
8007 */
8008 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
8009}
8010
2b188cc1 8011/*
dd9ae8a0 8012 * Fetch an sqe, if one is available. Note this returns a pointer to memory
2b188cc1
JA
8013 * that is mapped by userspace. This means that care needs to be taken to
8014 * ensure that reads are stable, as we cannot rely on userspace always
8015 * being a good citizen. If members of the sqe are validated and then later
8016 * used, it's important that those reads are done through READ_ONCE() to
8017 * prevent a re-load down the line.
8018 */
709b302f 8019static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2b188cc1 8020{
ea5ab3b5 8021 unsigned head, mask = ctx->sq_entries - 1;
17d3aeb3 8022 unsigned sq_idx = ctx->cached_sq_head++ & mask;
2b188cc1
JA
8023
8024 /*
8025 * The cached sq head (or cq tail) serves two purposes:
8026 *
8027 * 1) allows us to batch the cost of updating the user visible
8028 * head updates.
8029 * 2) allows the kernel side to track the head on its own, even
8030 * though the application is the one updating it.
8031 */
17d3aeb3 8032 head = READ_ONCE(ctx->sq_array[sq_idx]);
709b302f
PB
8033 if (likely(head < ctx->sq_entries))
8034 return &ctx->sq_sqes[head];
2b188cc1
JA
8035
8036 /* drop invalid entries */
15641e42
PB
8037 ctx->cq_extra--;
8038 WRITE_ONCE(ctx->rings->sq_dropped,
8039 READ_ONCE(ctx->rings->sq_dropped) + 1);
709b302f
PB
8040 return NULL;
8041}
8042
0f212204 8043static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
282cdc86 8044 __must_hold(&ctx->uring_lock)
6c271ce2 8045{
69629809 8046 unsigned int entries = io_sqring_entries(ctx);
8e6971a8
PB
8047 unsigned int left;
8048 int ret;
6c271ce2 8049
51d48dab 8050 if (unlikely(!entries))
69629809 8051 return 0;
ee7d46d9 8052 /* make sure SQ entry isn't read before tail */
8e6971a8
PB
8053 ret = left = min3(nr, ctx->sq_entries, entries);
8054 io_get_task_refs(left);
8055 io_submit_state_start(&ctx->submit_state, left);
6c271ce2 8056
69629809 8057 do {
3529d8c2 8058 const struct io_uring_sqe *sqe;
196be95c 8059 struct io_kiocb *req;
fb5ccc98 8060
8e6971a8 8061 if (unlikely(!io_alloc_req_refill(ctx)))
fb5ccc98 8062 break;
a33ae9ce 8063 req = io_alloc_req(ctx);
4fccfcbb
PB
8064 sqe = io_get_sqe(ctx);
8065 if (unlikely(!sqe)) {
fa05457a 8066 io_req_add_to_cache(req, ctx);
4fccfcbb
PB
8067 break;
8068 }
1cd15904
PB
8069
8070 /*
8071 * Continue submitting even for sqe failure if the
8072 * ring was setup with IORING_SETUP_SUBMIT_ALL
8073 */
8074 if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
8075 !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
8076 left--;
8077 break;
bcbb7bf6 8078 }
1cd15904 8079 } while (--left);
9466f437 8080
8e6971a8
PB
8081 if (unlikely(left)) {
8082 ret -= left;
8083 /* try again if it submitted nothing and can't allocate a req */
8084 if (!ret && io_req_cache_empty(ctx))
8085 ret = -EAGAIN;
8086 current->io_uring->cached_refs += left;
9466f437 8087 }
6c271ce2 8088
553deffd 8089 io_submit_state_end(ctx);
ae9428ca
PB
8090 /* Commit SQ ring head once we've consumed and submitted all SQEs */
8091 io_commit_sqring(ctx);
8e6971a8 8092 return ret;
6c271ce2
JA
8093}
8094
e4b6d902
PB
8095static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
8096{
8097 return READ_ONCE(sqd->state);
8098}
8099
08369246 8100static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
6c271ce2 8101{
c8d1ba58 8102 unsigned int to_submit;
bdcd3eab 8103 int ret = 0;
6c271ce2 8104
c8d1ba58 8105 to_submit = io_sqring_entries(ctx);
e95eee2d 8106 /* if we're handling multiple rings, cap submit size for fairness */
4ce8ad95
OL
8107 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
8108 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
e95eee2d 8109
5eef4e87 8110 if (!wq_list_empty(&ctx->iopoll_list) || to_submit) {
948e1947
PB
8111 const struct cred *creds = NULL;
8112
8113 if (ctx->sq_creds != current_cred())
8114 creds = override_creds(ctx->sq_creds);
a4c0b3de 8115
c8d1ba58 8116 mutex_lock(&ctx->uring_lock);
5eef4e87 8117 if (!wq_list_empty(&ctx->iopoll_list))
5ba3c874 8118 io_do_iopoll(ctx, true);
906a3c6f 8119
3b763ba1
PB
8120 /*
8121 * Don't submit if refs are dying, good for io_uring_register(),
8122 * but also it is relied upon by io_ring_exit_work()
8123 */
0298ef96
PB
8124 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
8125 !(ctx->flags & IORING_SETUP_R_DISABLED))
08369246 8126 ret = io_submit_sqes(ctx, to_submit);
c8d1ba58 8127 mutex_unlock(&ctx->uring_lock);
cb318216 8128
acfb381d
PB
8129 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
8130 wake_up(&ctx->sqo_sq_wait);
948e1947
PB
8131 if (creds)
8132 revert_creds(creds);
acfb381d 8133 }
6c271ce2 8134
08369246
XW
8135 return ret;
8136}
6c271ce2 8137
c072481d 8138static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd)
08369246
XW
8139{
8140 struct io_ring_ctx *ctx;
8141 unsigned sq_thread_idle = 0;
6c271ce2 8142
c9dca27d
PB
8143 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
8144 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
08369246 8145 sqd->sq_thread_idle = sq_thread_idle;
c8d1ba58 8146}
6c271ce2 8147
e4b6d902
PB
8148static bool io_sqd_handle_event(struct io_sq_data *sqd)
8149{
8150 bool did_sig = false;
8151 struct ksignal ksig;
8152
8153 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
8154 signal_pending(current)) {
8155 mutex_unlock(&sqd->lock);
8156 if (signal_pending(current))
8157 did_sig = get_signal(&ksig);
8158 cond_resched();
8159 mutex_lock(&sqd->lock);
8160 }
e4b6d902
PB
8161 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
8162}
8163
c8d1ba58
JA
8164static int io_sq_thread(void *data)
8165{
69fb2131
JA
8166 struct io_sq_data *sqd = data;
8167 struct io_ring_ctx *ctx;
a0d9205f 8168 unsigned long timeout = 0;
37d1e2e3 8169 char buf[TASK_COMM_LEN];
08369246 8170 DEFINE_WAIT(wait);
6c271ce2 8171
696ee88a 8172 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
37d1e2e3 8173 set_task_comm(current, buf);
37d1e2e3
JA
8174
8175 if (sqd->sq_cpu != -1)
8176 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
8177 else
8178 set_cpus_allowed_ptr(current, cpu_online_mask);
8179 current->flags |= PF_NO_SETAFFINITY;
8180
5bd2182d
PM
8181 audit_alloc_kernel(current);
8182
09a6f4ef 8183 mutex_lock(&sqd->lock);
e4b6d902 8184 while (1) {
1a924a80 8185 bool cap_entries, sqt_spin = false;
c1edbf5f 8186
e4b6d902
PB
8187 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
8188 if (io_sqd_handle_event(sqd))
c7d95613 8189 break;
08369246
XW
8190 timeout = jiffies + sqd->sq_thread_idle;
8191 }
e4b6d902 8192
e95eee2d 8193 cap_entries = !list_is_singular(&sqd->ctx_list);
69fb2131 8194 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
948e1947 8195 int ret = __io_sq_thread(ctx, cap_entries);
7c30f36a 8196
5eef4e87 8197 if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list)))
08369246 8198 sqt_spin = true;
69fb2131 8199 }
dd432ea5
PB
8200 if (io_run_task_work())
8201 sqt_spin = true;
6c271ce2 8202
08369246 8203 if (sqt_spin || !time_after(jiffies, timeout)) {
c8d1ba58 8204 cond_resched();
08369246
XW
8205 if (sqt_spin)
8206 timeout = jiffies + sqd->sq_thread_idle;
8207 continue;
8208 }
8209
08369246 8210 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
7f62d40d 8211 if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) {
1a924a80
PB
8212 bool needs_sched = true;
8213
724cb4f9 8214 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
3a4b89a2
JA
8215 atomic_or(IORING_SQ_NEED_WAKEUP,
8216 &ctx->rings->sq_flags);
724cb4f9 8217 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
5eef4e87 8218 !wq_list_empty(&ctx->iopoll_list)) {
724cb4f9
HX
8219 needs_sched = false;
8220 break;
8221 }
649bb75d
AK
8222
8223 /*
8224 * Ensure the store of the wakeup flag is not
8225 * reordered with the load of the SQ tail
8226 */
8227 smp_mb();
8228
724cb4f9
HX
8229 if (io_sqring_entries(ctx)) {
8230 needs_sched = false;
8231 break;
8232 }
8233 }
8234
8235 if (needs_sched) {
8236 mutex_unlock(&sqd->lock);
8237 schedule();
8238 mutex_lock(&sqd->lock);
8239 }
69fb2131 8240 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
3a4b89a2
JA
8241 atomic_andnot(IORING_SQ_NEED_WAKEUP,
8242 &ctx->rings->sq_flags);
6c271ce2 8243 }
08369246
XW
8244
8245 finish_wait(&sqd->wait, &wait);
8246 timeout = jiffies + sqd->sq_thread_idle;
6c271ce2 8247 }
28cea78a 8248
78cc687b 8249 io_uring_cancel_generic(true, sqd);
37d1e2e3 8250 sqd->thread = NULL;
05962f95 8251 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
3a4b89a2 8252 atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags);
521d6a73 8253 io_run_task_work();
734551df
PB
8254 mutex_unlock(&sqd->lock);
8255
5bd2182d
PM
8256 audit_free(current);
8257
37d1e2e3
JA
8258 complete(&sqd->exited);
8259 do_exit(0);
6c271ce2
JA
8260}
8261
bda52162
JA
8262struct io_wait_queue {
8263 struct wait_queue_entry wq;
8264 struct io_ring_ctx *ctx;
5fd46178 8265 unsigned cq_tail;
bda52162
JA
8266 unsigned nr_timeouts;
8267};
8268
6c503150 8269static inline bool io_should_wake(struct io_wait_queue *iowq)
bda52162
JA
8270{
8271 struct io_ring_ctx *ctx = iowq->ctx;
5fd46178 8272 int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
bda52162
JA
8273
8274 /*
d195a66e 8275 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
8276 * started waiting. For timeouts, we always want to return to userspace,
8277 * regardless of event count.
8278 */
5fd46178 8279 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
bda52162
JA
8280}
8281
8282static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
8283 int wake_flags, void *key)
8284{
8285 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
8286 wq);
8287
6c503150
PB
8288 /*
8289 * Cannot safely flush overflowed CQEs from here, ensure we wake up
8290 * the task, and the next invocation will do it.
8291 */
10988a0a
DY
8292 if (io_should_wake(iowq) ||
8293 test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &iowq->ctx->check_cq))
6c503150
PB
8294 return autoremove_wake_function(curr, mode, wake_flags, key);
8295 return -1;
bda52162
JA
8296}
8297
af9c1a44
JA
8298static int io_run_task_work_sig(void)
8299{
8300 if (io_run_task_work())
8301 return 1;
0b8cfa97 8302 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
792ee0f6 8303 return -ERESTARTSYS;
c5020bc8
OL
8304 if (task_sigpending(current))
8305 return -EINTR;
8306 return 0;
af9c1a44
JA
8307}
8308
eeb60b9a
PB
8309/* when returns >0, the caller should retry */
8310static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
8311 struct io_wait_queue *iowq,
22833966 8312 ktime_t timeout)
eeb60b9a
PB
8313{
8314 int ret;
155bc950 8315 unsigned long check_cq;
eeb60b9a
PB
8316
8317 /* make sure we run task_work before checking for signals */
8318 ret = io_run_task_work_sig();
8319 if (ret || io_should_wake(iowq))
8320 return ret;
155bc950 8321 check_cq = READ_ONCE(ctx->check_cq);
eeb60b9a 8322 /* let the caller flush overflows, retry */
155bc950 8323 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
eeb60b9a 8324 return 1;
155bc950
DY
8325 if (unlikely(check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)))
8326 return -EBADR;
22833966
JA
8327 if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
8328 return -ETIME;
8329 return 1;
eeb60b9a
PB
8330}
8331
2b188cc1
JA
8332/*
8333 * Wait until events become available, if we don't already have some. The
8334 * application must reap them itself, as they reside on the shared cq ring.
8335 */
8336static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
c73ebb68
HX
8337 const sigset_t __user *sig, size_t sigsz,
8338 struct __kernel_timespec __user *uts)
2b188cc1 8339{
90291099 8340 struct io_wait_queue iowq;
75b28aff 8341 struct io_rings *rings = ctx->rings;
22833966 8342 ktime_t timeout = KTIME_MAX;
c1d5a224 8343 int ret;
2b188cc1 8344
b41e9852 8345 do {
90f67366 8346 io_cqring_overflow_flush(ctx);
6c503150 8347 if (io_cqring_events(ctx) >= min_events)
b41e9852 8348 return 0;
4c6e277c 8349 if (!io_run_task_work())
b41e9852 8350 break;
b41e9852 8351 } while (1);
2b188cc1
JA
8352
8353 if (sig) {
9e75ad5d
AB
8354#ifdef CONFIG_COMPAT
8355 if (in_compat_syscall())
8356 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 8357 sigsz);
9e75ad5d
AB
8358 else
8359#endif
b772434b 8360 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 8361
2b188cc1
JA
8362 if (ret)
8363 return ret;
8364 }
8365
950e79dd
OL
8366 if (uts) {
8367 struct timespec64 ts;
8368
8369 if (get_timespec64(&ts, uts))
8370 return -EFAULT;
8371 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
8372 }
8373
90291099
PB
8374 init_waitqueue_func_entry(&iowq.wq, io_wake_function);
8375 iowq.wq.private = current;
8376 INIT_LIST_HEAD(&iowq.wq.entry);
8377 iowq.ctx = ctx;
bda52162 8378 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
5fd46178 8379 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
90291099 8380
c826bd7a 8381 trace_io_uring_cqring_wait(ctx, min_events);
bda52162 8382 do {
ca0a2651 8383 /* if we can't even flush overflow, don't wait for more */
90f67366 8384 if (!io_cqring_overflow_flush(ctx)) {
ca0a2651
JA
8385 ret = -EBUSY;
8386 break;
8387 }
311997b3 8388 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
bda52162 8389 TASK_INTERRUPTIBLE);
22833966 8390 ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
ca0a2651 8391 cond_resched();
eeb60b9a 8392 } while (ret > 0);
bda52162 8393
b4f20bb4 8394 finish_wait(&ctx->cq_wait, &iowq.wq);
b7db41c9 8395 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 8396
75b28aff 8397 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
8398}
8399
9123c8ff 8400static void io_free_page_table(void **table, size_t size)
05f3fb3c 8401{
9123c8ff 8402 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
05f3fb3c 8403
846a4ef2 8404 for (i = 0; i < nr_tables; i++)
9123c8ff
PB
8405 kfree(table[i]);
8406 kfree(table);
8407}
8408
c072481d 8409static __cold void **io_alloc_page_table(size_t size)
9123c8ff
PB
8410{
8411 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
8412 size_t init_size = size;
8413 void **table;
8414
0bea96f5 8415 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
9123c8ff
PB
8416 if (!table)
8417 return NULL;
8418
8419 for (i = 0; i < nr_tables; i++) {
27f6b318 8420 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
9123c8ff 8421
0bea96f5 8422 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
9123c8ff
PB
8423 if (!table[i]) {
8424 io_free_page_table(table, init_size);
8425 return NULL;
8426 }
8427 size -= this_size;
8428 }
8429 return table;
05f3fb3c
JA
8430}
8431
28a9fe25 8432static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
1642b445 8433{
28a9fe25
PB
8434 percpu_ref_exit(&ref_node->refs);
8435 kfree(ref_node);
1642b445
PB
8436}
8437
c072481d 8438static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref)
b9bd2bea
PB
8439{
8440 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
8441 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
8442 unsigned long flags;
8443 bool first_add = false;
b36a2050 8444 unsigned long delay = HZ;
b9bd2bea
PB
8445
8446 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
8447 node->done = true;
8448
b36a2050
DY
8449 /* if we are mid-quiesce then do not delay */
8450 if (node->rsrc_data->quiesce)
8451 delay = 0;
8452
b9bd2bea
PB
8453 while (!list_empty(&ctx->rsrc_ref_list)) {
8454 node = list_first_entry(&ctx->rsrc_ref_list,
8455 struct io_rsrc_node, node);
8456 /* recycle ref nodes in order */
8457 if (!node->done)
8458 break;
8459 list_del(&node->node);
8460 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
8461 }
8462 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
8463
8464 if (first_add)
b36a2050 8465 mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
b9bd2bea
PB
8466}
8467
f6133fbd 8468static struct io_rsrc_node *io_rsrc_node_alloc(void)
b9bd2bea
PB
8469{
8470 struct io_rsrc_node *ref_node;
8471
8472 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
8473 if (!ref_node)
8474 return NULL;
8475
8476 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
8477 0, GFP_KERNEL)) {
8478 kfree(ref_node);
8479 return NULL;
8480 }
8481 INIT_LIST_HEAD(&ref_node->node);
8482 INIT_LIST_HEAD(&ref_node->rsrc_list);
8483 ref_node->done = false;
8484 return ref_node;
8485}
8486
a7f0ed5a
PB
8487static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
8488 struct io_rsrc_data *data_to_kill)
ab409402 8489 __must_hold(&ctx->uring_lock)
6b06314c 8490{
a7f0ed5a
PB
8491 WARN_ON_ONCE(!ctx->rsrc_backup_node);
8492 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
6b06314c 8493
ab409402
PB
8494 io_rsrc_refs_drop(ctx);
8495
a7f0ed5a
PB
8496 if (data_to_kill) {
8497 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
82fbcfa9 8498
a7f0ed5a 8499 rsrc_node->rsrc_data = data_to_kill;
4956b9ea 8500 spin_lock_irq(&ctx->rsrc_ref_lock);
a7f0ed5a 8501 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
4956b9ea 8502 spin_unlock_irq(&ctx->rsrc_ref_lock);
82fbcfa9 8503
3e942498 8504 atomic_inc(&data_to_kill->refs);
a7f0ed5a
PB
8505 percpu_ref_kill(&rsrc_node->refs);
8506 ctx->rsrc_node = NULL;
8507 }
6b06314c 8508
a7f0ed5a
PB
8509 if (!ctx->rsrc_node) {
8510 ctx->rsrc_node = ctx->rsrc_backup_node;
8511 ctx->rsrc_backup_node = NULL;
8512 }
8bad28d8
HX
8513}
8514
a7f0ed5a 8515static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
8dd03afe
PB
8516{
8517 if (ctx->rsrc_backup_node)
8518 return 0;
f6133fbd 8519 ctx->rsrc_backup_node = io_rsrc_node_alloc();
8dd03afe 8520 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
8bad28d8
HX
8521}
8522
c072481d
PB
8523static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
8524 struct io_ring_ctx *ctx)
8bad28d8
HX
8525{
8526 int ret;
05589553 8527
215c3902 8528 /* As we may drop ->uring_lock, other task may have started quiesce */
8bad28d8
HX
8529 if (data->quiesce)
8530 return -ENXIO;
05589553 8531
8bad28d8 8532 data->quiesce = true;
1ffc5422 8533 do {
a7f0ed5a 8534 ret = io_rsrc_node_switch_start(ctx);
8dd03afe 8535 if (ret)
f2303b1f 8536 break;
a7f0ed5a 8537 io_rsrc_node_switch(ctx, data);
f2303b1f 8538
3e942498
PB
8539 /* kill initial ref, already quiesced if zero */
8540 if (atomic_dec_and_test(&data->refs))
8541 break;
c018db4a 8542 mutex_unlock(&ctx->uring_lock);
8bad28d8 8543 flush_delayed_work(&ctx->rsrc_put_work);
1ffc5422 8544 ret = wait_for_completion_interruptible(&data->done);
c018db4a
JA
8545 if (!ret) {
8546 mutex_lock(&ctx->uring_lock);
80912cef
DY
8547 if (atomic_read(&data->refs) > 0) {
8548 /*
8549 * it has been revived by another thread while
8550 * we were unlocked
8551 */
8552 mutex_unlock(&ctx->uring_lock);
8553 } else {
8554 break;
8555 }
c018db4a 8556 }
8bad28d8 8557
3e942498
PB
8558 atomic_inc(&data->refs);
8559 /* wait for all works potentially completing data->done */
8560 flush_delayed_work(&ctx->rsrc_put_work);
cb5e1b81 8561 reinit_completion(&data->done);
8dd03afe 8562
1ffc5422 8563 ret = io_run_task_work_sig();
8bad28d8 8564 mutex_lock(&ctx->uring_lock);
f2303b1f 8565 } while (ret >= 0);
8bad28d8 8566 data->quiesce = false;
05f3fb3c 8567
8bad28d8 8568 return ret;
d7954b2b
BM
8569}
8570
2d091d62
PB
8571static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
8572{
8573 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
8574 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
8575
8576 return &data->tags[table_idx][off];
8577}
8578
44b31f2f 8579static void io_rsrc_data_free(struct io_rsrc_data *data)
1ad555c6 8580{
2d091d62
PB
8581 size_t size = data->nr * sizeof(data->tags[0][0]);
8582
8583 if (data->tags)
8584 io_free_page_table((void **)data->tags, size);
44b31f2f
PB
8585 kfree(data);
8586}
8587
c072481d
PB
8588static __cold int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
8589 u64 __user *utags, unsigned nr,
8590 struct io_rsrc_data **pdata)
1ad555c6 8591{
b895c9a6 8592 struct io_rsrc_data *data;
2d091d62 8593 int ret = -ENOMEM;
d878c816 8594 unsigned i;
1ad555c6
BM
8595
8596 data = kzalloc(sizeof(*data), GFP_KERNEL);
8597 if (!data)
d878c816 8598 return -ENOMEM;
2d091d62 8599 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
b60c8dce 8600 if (!data->tags) {
1ad555c6 8601 kfree(data);
d878c816
PB
8602 return -ENOMEM;
8603 }
2d091d62
PB
8604
8605 data->nr = nr;
8606 data->ctx = ctx;
8607 data->do_put = do_put;
d878c816 8608 if (utags) {
2d091d62 8609 ret = -EFAULT;
d878c816 8610 for (i = 0; i < nr; i++) {
fdd1dc31
CIK
8611 u64 *tag_slot = io_get_tag_slot(data, i);
8612
8613 if (copy_from_user(tag_slot, &utags[i],
8614 sizeof(*tag_slot)))
2d091d62 8615 goto fail;
d878c816 8616 }
1ad555c6 8617 }
b60c8dce 8618
3e942498 8619 atomic_set(&data->refs, 1);
1ad555c6 8620 init_completion(&data->done);
d878c816
PB
8621 *pdata = data;
8622 return 0;
2d091d62
PB
8623fail:
8624 io_rsrc_data_free(data);
8625 return ret;
1ad555c6
BM
8626}
8627
9123c8ff
PB
8628static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
8629{
0bea96f5
PB
8630 table->files = kvcalloc(nr_files, sizeof(table->files[0]),
8631 GFP_KERNEL_ACCOUNT);
9123c8ff
PB
8632 return !!table->files;
8633}
8634
042b0d85 8635static void io_free_file_tables(struct io_file_table *table)
9123c8ff 8636{
042b0d85 8637 kvfree(table->files);
9123c8ff
PB
8638 table->files = NULL;
8639}
8640
fff4db76 8641static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
1ad555c6 8642{
69cc1b6f 8643#if !defined(IO_URING_SCM_ALL)
1f59bc0f
PB
8644 int i;
8645
8646 for (i = 0; i < ctx->nr_user_files; i++) {
8647 struct file *file = io_file_from_index(ctx, i);
8648
5e45690a
JA
8649 if (!file)
8650 continue;
8651 if (io_fixed_file_slot(&ctx->file_table, i)->file_ptr & FFS_SCM)
1f59bc0f 8652 continue;
1f59bc0f
PB
8653 fput(file);
8654 }
5e45690a 8655#endif
1f59bc0f 8656
fff4db76
PB
8657#if defined(CONFIG_UNIX)
8658 if (ctx->ring_sock) {
8659 struct sock *sock = ctx->ring_sock->sk;
8660 struct sk_buff *skb;
8661
8662 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
8663 kfree_skb(skb);
8664 }
fff4db76 8665#endif
042b0d85 8666 io_free_file_tables(&ctx->file_table);
44b31f2f 8667 io_rsrc_data_free(ctx->file_data);
fff4db76
PB
8668 ctx->file_data = NULL;
8669 ctx->nr_user_files = 0;
1ad555c6
BM
8670}
8671
d7954b2b
BM
8672static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
8673{
d7954b2b
BM
8674 int ret;
8675
08480400 8676 if (!ctx->file_data)
d7954b2b 8677 return -ENXIO;
08480400
PB
8678 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
8679 if (!ret)
8680 __io_sqe_files_unregister(ctx);
8681 return ret;
6b06314c
JA
8682}
8683
37d1e2e3 8684static void io_sq_thread_unpark(struct io_sq_data *sqd)
09a6f4ef 8685 __releases(&sqd->lock)
37d1e2e3 8686{
521d6a73
PB
8687 WARN_ON_ONCE(sqd->thread == current);
8688
9e138a48
PB
8689 /*
8690 * Do the dance but not conditional clear_bit() because it'd race with
8691 * other threads incrementing park_pending and setting the bit.
8692 */
37d1e2e3 8693 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
9e138a48
PB
8694 if (atomic_dec_return(&sqd->park_pending))
8695 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
09a6f4ef 8696 mutex_unlock(&sqd->lock);
37d1e2e3
JA
8697}
8698
86e0d676 8699static void io_sq_thread_park(struct io_sq_data *sqd)
09a6f4ef 8700 __acquires(&sqd->lock)
37d1e2e3 8701{
521d6a73
PB
8702 WARN_ON_ONCE(sqd->thread == current);
8703
9e138a48 8704 atomic_inc(&sqd->park_pending);
86e0d676 8705 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
09a6f4ef 8706 mutex_lock(&sqd->lock);
05962f95 8707 if (sqd->thread)
86e0d676 8708 wake_up_process(sqd->thread);
37d1e2e3
JA
8709}
8710
8711static void io_sq_thread_stop(struct io_sq_data *sqd)
8712{
521d6a73 8713 WARN_ON_ONCE(sqd->thread == current);
88885f66 8714 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
521d6a73 8715
05962f95 8716 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
88885f66 8717 mutex_lock(&sqd->lock);
e8f98f24
JA
8718 if (sqd->thread)
8719 wake_up_process(sqd->thread);
09a6f4ef 8720 mutex_unlock(&sqd->lock);
05962f95 8721 wait_for_completion(&sqd->exited);
37d1e2e3
JA
8722}
8723
534ca6d6 8724static void io_put_sq_data(struct io_sq_data *sqd)
6c271ce2 8725{
534ca6d6 8726 if (refcount_dec_and_test(&sqd->refs)) {
9e138a48
PB
8727 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
8728
37d1e2e3
JA
8729 io_sq_thread_stop(sqd);
8730 kfree(sqd);
8731 }
8732}
8733
8734static void io_sq_thread_finish(struct io_ring_ctx *ctx)
8735{
8736 struct io_sq_data *sqd = ctx->sq_data;
8737
8738 if (sqd) {
05962f95 8739 io_sq_thread_park(sqd);
521d6a73 8740 list_del_init(&ctx->sqd_list);
37d1e2e3 8741 io_sqd_update_thread_idle(sqd);
05962f95 8742 io_sq_thread_unpark(sqd);
37d1e2e3
JA
8743
8744 io_put_sq_data(sqd);
8745 ctx->sq_data = NULL;
534ca6d6
JA
8746 }
8747}
8748
aa06165d
JA
8749static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
8750{
8751 struct io_ring_ctx *ctx_attach;
8752 struct io_sq_data *sqd;
8753 struct fd f;
8754
8755 f = fdget(p->wq_fd);
8756 if (!f.file)
8757 return ERR_PTR(-ENXIO);
8758 if (f.file->f_op != &io_uring_fops) {
8759 fdput(f);
8760 return ERR_PTR(-EINVAL);
8761 }
8762
8763 ctx_attach = f.file->private_data;
8764 sqd = ctx_attach->sq_data;
8765 if (!sqd) {
8766 fdput(f);
8767 return ERR_PTR(-EINVAL);
8768 }
5c2469e0
JA
8769 if (sqd->task_tgid != current->tgid) {
8770 fdput(f);
8771 return ERR_PTR(-EPERM);
8772 }
aa06165d
JA
8773
8774 refcount_inc(&sqd->refs);
8775 fdput(f);
8776 return sqd;
8777}
8778
26984fbf
PB
8779static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
8780 bool *attached)
534ca6d6
JA
8781{
8782 struct io_sq_data *sqd;
8783
26984fbf 8784 *attached = false;
5c2469e0
JA
8785 if (p->flags & IORING_SETUP_ATTACH_WQ) {
8786 sqd = io_attach_sq_data(p);
26984fbf
PB
8787 if (!IS_ERR(sqd)) {
8788 *attached = true;
5c2469e0 8789 return sqd;
26984fbf 8790 }
5c2469e0
JA
8791 /* fall through for EPERM case, setup new sqd/task */
8792 if (PTR_ERR(sqd) != -EPERM)
8793 return sqd;
8794 }
aa06165d 8795
534ca6d6
JA
8796 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
8797 if (!sqd)
8798 return ERR_PTR(-ENOMEM);
8799
9e138a48 8800 atomic_set(&sqd->park_pending, 0);
534ca6d6 8801 refcount_set(&sqd->refs, 1);
69fb2131 8802 INIT_LIST_HEAD(&sqd->ctx_list);
09a6f4ef 8803 mutex_init(&sqd->lock);
534ca6d6 8804 init_waitqueue_head(&sqd->wait);
37d1e2e3 8805 init_completion(&sqd->exited);
534ca6d6
JA
8806 return sqd;
8807}
8808
6b06314c
JA
8809/*
8810 * Ensure the UNIX gc is aware of our file set, so we are certain that
8811 * the io_uring can be safely unregistered on process exit, even if we have
1f59bc0f
PB
8812 * loops in the file referencing. We account only files that can hold other
8813 * files because otherwise they can't form a loop and so are not interesting
8814 * for GC.
6b06314c 8815 */
8b3171bd 8816static int io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
6b06314c 8817{
73b25d3b 8818#if defined(CONFIG_UNIX)
6b06314c 8819 struct sock *sk = ctx->ring_sock->sk;
73b25d3b 8820 struct sk_buff_head *head = &sk->sk_receive_queue;
6b06314c
JA
8821 struct scm_fp_list *fpl;
8822 struct sk_buff *skb;
6b06314c 8823
73b25d3b
PB
8824 if (likely(!io_file_need_scm(file)))
8825 return 0;
8826
8827 /*
8828 * See if we can merge this file into an existing skb SCM_RIGHTS
8829 * file set. If there's no room, fall back to allocating a new skb
8830 * and filling it in.
8831 */
8832 spin_lock_irq(&head->lock);
8833 skb = skb_peek(head);
8834 if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
8835 __skb_unlink(skb, head);
8836 else
8837 skb = NULL;
8838 spin_unlock_irq(&head->lock);
6b06314c 8839
6b06314c 8840 if (!skb) {
73b25d3b
PB
8841 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
8842 if (!fpl)
8843 return -ENOMEM;
6b06314c 8844
73b25d3b
PB
8845 skb = alloc_skb(0, GFP_KERNEL);
8846 if (!skb) {
8847 kfree(fpl);
8848 return -ENOMEM;
8849 }
6b06314c 8850
73b25d3b
PB
8851 fpl->user = get_uid(current_user());
8852 fpl->max = SCM_MAX_FD;
8853 fpl->count = 0;
dca58c6a 8854
73b25d3b
PB
8855 UNIXCB(skb).fp = fpl;
8856 skb->sk = sk;
8857 skb->destructor = unix_destruct_scm;
8858 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
8859 }
8860
8861 fpl = UNIXCB(skb).fp;
8862 fpl->fp[fpl->count++] = get_file(file);
8863 unix_inflight(fpl->user, file);
8864 skb_queue_head(head, skb);
dca58c6a 8865 fput(file);
73b25d3b 8866#endif
6b06314c
JA
8867 return 0;
8868}
6b06314c 8869
47e90392 8870static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
05f3fb3c 8871{
50238531 8872 struct file *file = prsrc->file;
05f3fb3c
JA
8873#if defined(CONFIG_UNIX)
8874 struct sock *sock = ctx->ring_sock->sk;
8875 struct sk_buff_head list, *head = &sock->sk_receive_queue;
8876 struct sk_buff *skb;
8877 int i;
8878
1f59bc0f
PB
8879 if (!io_file_need_scm(file)) {
8880 fput(file);
8881 return;
8882 }
8883
05f3fb3c
JA
8884 __skb_queue_head_init(&list);
8885
8886 /*
8887 * Find the skb that holds this file in its SCM_RIGHTS. When found,
8888 * remove this entry and rearrange the file array.
8889 */
8890 skb = skb_dequeue(head);
8891 while (skb) {
8892 struct scm_fp_list *fp;
8893
8894 fp = UNIXCB(skb).fp;
8895 for (i = 0; i < fp->count; i++) {
8896 int left;
8897
8898 if (fp->fp[i] != file)
8899 continue;
8900
8901 unix_notinflight(fp->user, fp->fp[i]);
8902 left = fp->count - 1 - i;
8903 if (left) {
8904 memmove(&fp->fp[i], &fp->fp[i + 1],
8905 left * sizeof(struct file *));
8906 }
8907 fp->count--;
8908 if (!fp->count) {
8909 kfree_skb(skb);
8910 skb = NULL;
8911 } else {
8912 __skb_queue_tail(&list, skb);
8913 }
8914 fput(file);
8915 file = NULL;
8916 break;
8917 }
8918
8919 if (!file)
8920 break;
8921
8922 __skb_queue_tail(&list, skb);
8923
8924 skb = skb_dequeue(head);
8925 }
8926
8927 if (skb_peek(&list)) {
8928 spin_lock_irq(&head->lock);
8929 while ((skb = __skb_dequeue(&list)) != NULL)
8930 __skb_queue_tail(head, skb);
8931 spin_unlock_irq(&head->lock);
8932 }
8933#else
8934 fput(file);
8935#endif
8936}
8937
b895c9a6 8938static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
65e19f54 8939{
b895c9a6 8940 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
269bbe5f
BM
8941 struct io_ring_ctx *ctx = rsrc_data->ctx;
8942 struct io_rsrc_put *prsrc, *tmp;
05589553 8943
269bbe5f
BM
8944 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
8945 list_del(&prsrc->list);
b60c8dce
PB
8946
8947 if (prsrc->tag) {
f8929630
PB
8948 if (ctx->flags & IORING_SETUP_IOPOLL)
8949 mutex_lock(&ctx->uring_lock);
b60c8dce 8950
79ebeaee 8951 spin_lock(&ctx->completion_lock);
913a571a 8952 io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
b60c8dce 8953 io_commit_cqring(ctx);
79ebeaee 8954 spin_unlock(&ctx->completion_lock);
b60c8dce 8955 io_cqring_ev_posted(ctx);
f8929630
PB
8956
8957 if (ctx->flags & IORING_SETUP_IOPOLL)
8958 mutex_unlock(&ctx->uring_lock);
b60c8dce
PB
8959 }
8960
40ae0ff7 8961 rsrc_data->do_put(ctx, prsrc);
269bbe5f 8962 kfree(prsrc);
65e19f54 8963 }
05589553 8964
28a9fe25 8965 io_rsrc_node_destroy(ref_node);
3e942498
PB
8966 if (atomic_dec_and_test(&rsrc_data->refs))
8967 complete(&rsrc_data->done);
2faf852d 8968}
65e19f54 8969
269bbe5f 8970static void io_rsrc_put_work(struct work_struct *work)
4a38aed2
JA
8971{
8972 struct io_ring_ctx *ctx;
8973 struct llist_node *node;
8974
269bbe5f
BM
8975 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
8976 node = llist_del_all(&ctx->rsrc_put_llist);
4a38aed2
JA
8977
8978 while (node) {
b895c9a6 8979 struct io_rsrc_node *ref_node;
4a38aed2
JA
8980 struct llist_node *next = node->next;
8981
b895c9a6 8982 ref_node = llist_entry(node, struct io_rsrc_node, llist);
269bbe5f 8983 __io_rsrc_put_work(ref_node);
4a38aed2
JA
8984 node = next;
8985 }
8986}
8987
6b06314c 8988static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
792e3582 8989 unsigned nr_args, u64 __user *tags)
6b06314c
JA
8990{
8991 __s32 __user *fds = (__s32 __user *) arg;
05f3fb3c 8992 struct file *file;
f3baed39 8993 int fd, ret;
846a4ef2 8994 unsigned i;
6b06314c 8995
05f3fb3c 8996 if (ctx->file_data)
6b06314c
JA
8997 return -EBUSY;
8998 if (!nr_args)
8999 return -EINVAL;
9000 if (nr_args > IORING_MAX_FIXED_FILES)
9001 return -EMFILE;
3a1b8a4e
PB
9002 if (nr_args > rlimit(RLIMIT_NOFILE))
9003 return -EMFILE;
a7f0ed5a 9004 ret = io_rsrc_node_switch_start(ctx);
f3baed39
PB
9005 if (ret)
9006 return ret;
d878c816
PB
9007 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
9008 &ctx->file_data);
9009 if (ret)
9010 return ret;
6b06314c 9011
a03a2a20
PB
9012 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
9013 io_rsrc_data_free(ctx->file_data);
9014 ctx->file_data = NULL;
9015 return -ENOMEM;
9016 }
65e19f54 9017
08a45173 9018 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
a03a2a20
PB
9019 struct io_fixed_file *file_slot;
9020
d878c816 9021 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
600cf3f8 9022 ret = -EFAULT;
a03a2a20 9023 goto fail;
600cf3f8 9024 }
08a45173 9025 /* allow sparse sets */
792e3582
PB
9026 if (fd == -1) {
9027 ret = -EINVAL;
2d091d62 9028 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
a03a2a20 9029 goto fail;
08a45173 9030 continue;
792e3582 9031 }
6b06314c 9032
05f3fb3c 9033 file = fget(fd);
6b06314c 9034 ret = -EBADF;
792e3582 9035 if (unlikely(!file))
a03a2a20 9036 goto fail;
05f3fb3c 9037
6b06314c
JA
9038 /*
9039 * Don't allow io_uring instances to be registered. If UNIX
9040 * isn't enabled, then this causes a reference cycle and this
9041 * instance can never get freed. If UNIX is enabled we'll
9042 * handle it just fine, but there's still no point in allowing
9043 * a ring fd as it doesn't support regular read/write anyway.
9044 */
05f3fb3c
JA
9045 if (file->f_op == &io_uring_fops) {
9046 fput(file);
a03a2a20
PB
9047 goto fail;
9048 }
8b3171bd 9049 ret = io_scm_file_account(ctx, file);
a03a2a20 9050 if (ret) {
a03a2a20
PB
9051 fput(file);
9052 goto fail;
6b06314c 9053 }
e390510a
PB
9054 file_slot = io_fixed_file_slot(&ctx->file_table, i);
9055 io_fixed_file_set(file_slot, file);
05589553 9056 }
6b06314c 9057
a7f0ed5a 9058 io_rsrc_node_switch(ctx, NULL);
a03a2a20
PB
9059 return 0;
9060fail:
9061 __io_sqe_files_unregister(ctx);
6b06314c
JA
9062 return ret;
9063}
9064
9c7b0ba8
PB
9065static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
9066 struct io_rsrc_node *node, void *rsrc)
9067{
8f0a2480 9068 u64 *tag_slot = io_get_tag_slot(data, idx);
9c7b0ba8
PB
9069 struct io_rsrc_put *prsrc;
9070
9071 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
9072 if (!prsrc)
9073 return -ENOMEM;
9074
8f0a2480
PB
9075 prsrc->tag = *tag_slot;
9076 *tag_slot = 0;
9c7b0ba8
PB
9077 prsrc->rsrc = rsrc;
9078 list_add(&prsrc->list, &node->rsrc_list);
9079 return 0;
9080}
9081
b9445598
PB
9082static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
9083 unsigned int issue_flags, u32 slot_index)
9084{
9085 struct io_ring_ctx *ctx = req->ctx;
9c7b0ba8 9086 bool needs_switch = false;
b9445598
PB
9087 struct io_fixed_file *file_slot;
9088 int ret = -EBADF;
9089
f8929630 9090 io_ring_submit_lock(ctx, issue_flags);
b9445598
PB
9091 if (file->f_op == &io_uring_fops)
9092 goto err;
9093 ret = -ENXIO;
9094 if (!ctx->file_data)
9095 goto err;
9096 ret = -EINVAL;
9097 if (slot_index >= ctx->nr_user_files)
9098 goto err;
9099
9100 slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
9101 file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
9c7b0ba8
PB
9102
9103 if (file_slot->file_ptr) {
9104 struct file *old_file;
9105
9106 ret = io_rsrc_node_switch_start(ctx);
9107 if (ret)
9108 goto err;
9109
9110 old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
9111 ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
9112 ctx->rsrc_node, old_file);
9113 if (ret)
9114 goto err;
9115 file_slot->file_ptr = 0;
9116 needs_switch = true;
9117 }
b9445598 9118
8b3171bd 9119 ret = io_scm_file_account(ctx, file);
e390510a
PB
9120 if (!ret) {
9121 *io_get_tag_slot(ctx->file_data, slot_index) = 0;
9122 io_fixed_file_set(file_slot, file);
b9445598 9123 }
b9445598 9124err:
9c7b0ba8
PB
9125 if (needs_switch)
9126 io_rsrc_node_switch(ctx, ctx->file_data);
f8929630 9127 io_ring_submit_unlock(ctx, issue_flags);
b9445598
PB
9128 if (ret)
9129 fput(file);
9130 return ret;
9131}
9132
7df778be
PB
9133static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
9134{
9135 unsigned int offset = req->close.file_slot - 1;
9136 struct io_ring_ctx *ctx = req->ctx;
9137 struct io_fixed_file *file_slot;
9138 struct file *file;
4cdd158b 9139 int ret;
7df778be 9140
f8929630 9141 io_ring_submit_lock(ctx, issue_flags);
7df778be
PB
9142 ret = -ENXIO;
9143 if (unlikely(!ctx->file_data))
9144 goto out;
9145 ret = -EINVAL;
9146 if (offset >= ctx->nr_user_files)
9147 goto out;
9148 ret = io_rsrc_node_switch_start(ctx);
9149 if (ret)
9150 goto out;
9151
4cdd158b
PB
9152 offset = array_index_nospec(offset, ctx->nr_user_files);
9153 file_slot = io_fixed_file_slot(&ctx->file_table, offset);
7df778be
PB
9154 ret = -EBADF;
9155 if (!file_slot->file_ptr)
9156 goto out;
9157
9158 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
9159 ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
9160 if (ret)
9161 goto out;
9162
9163 file_slot->file_ptr = 0;
9164 io_rsrc_node_switch(ctx, ctx->file_data);
9165 ret = 0;
9166out:
f8929630 9167 io_ring_submit_unlock(ctx, issue_flags);
7df778be
PB
9168 return ret;
9169}
9170
05f3fb3c 9171static int __io_sqe_files_update(struct io_ring_ctx *ctx,
c3bdad02 9172 struct io_uring_rsrc_update2 *up,
05f3fb3c
JA
9173 unsigned nr_args)
9174{
c3bdad02 9175 u64 __user *tags = u64_to_user_ptr(up->tags);
98f0b3b4 9176 __s32 __user *fds = u64_to_user_ptr(up->data);
b895c9a6 9177 struct io_rsrc_data *data = ctx->file_data;
a04b0ac0
PB
9178 struct io_fixed_file *file_slot;
9179 struct file *file;
98f0b3b4
PB
9180 int fd, i, err = 0;
9181 unsigned int done;
05589553 9182 bool needs_switch = false;
c3a31e60 9183
98f0b3b4
PB
9184 if (!ctx->file_data)
9185 return -ENXIO;
9186 if (up->offset + nr_args > ctx->nr_user_files)
c3a31e60
JA
9187 return -EINVAL;
9188
67973b93 9189 for (done = 0; done < nr_args; done++) {
c3bdad02
PB
9190 u64 tag = 0;
9191
9192 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
9193 copy_from_user(&fd, &fds[done], sizeof(fd))) {
c3a31e60
JA
9194 err = -EFAULT;
9195 break;
9196 }
c3bdad02
PB
9197 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
9198 err = -EINVAL;
9199 break;
9200 }
4e0377a1 9201 if (fd == IORING_REGISTER_FILES_SKIP)
9202 continue;
9203
67973b93 9204 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
aeca241b 9205 file_slot = io_fixed_file_slot(&ctx->file_table, i);
ea64ec02 9206
a04b0ac0
PB
9207 if (file_slot->file_ptr) {
9208 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
4cdd158b 9209 err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
a5318d3c
HD
9210 if (err)
9211 break;
a04b0ac0 9212 file_slot->file_ptr = 0;
05589553 9213 needs_switch = true;
c3a31e60
JA
9214 }
9215 if (fd != -1) {
c3a31e60
JA
9216 file = fget(fd);
9217 if (!file) {
9218 err = -EBADF;
9219 break;
9220 }
9221 /*
9222 * Don't allow io_uring instances to be registered. If
9223 * UNIX isn't enabled, then this causes a reference
9224 * cycle and this instance can never get freed. If UNIX
9225 * is enabled we'll handle it just fine, but there's
9226 * still no point in allowing a ring fd as it doesn't
9227 * support regular read/write anyway.
9228 */
9229 if (file->f_op == &io_uring_fops) {
9230 fput(file);
9231 err = -EBADF;
9232 break;
9233 }
8b3171bd 9234 err = io_scm_file_account(ctx, file);
f3bd9dae
YY
9235 if (err) {
9236 fput(file);
c3a31e60 9237 break;
f3bd9dae 9238 }
e390510a
PB
9239 *io_get_tag_slot(data, i) = tag;
9240 io_fixed_file_set(file_slot, file);
c3a31e60 9241 }
05f3fb3c
JA
9242 }
9243
a7f0ed5a
PB
9244 if (needs_switch)
9245 io_rsrc_node_switch(ctx, data);
c3a31e60
JA
9246 return done ? done : err;
9247}
05589553 9248
685fe7fe
JA
9249static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
9250 struct task_struct *task)
24369c2e 9251{
e941894e 9252 struct io_wq_hash *hash;
24369c2e 9253 struct io_wq_data data;
24369c2e 9254 unsigned int concurrency;
24369c2e 9255
362a9e65 9256 mutex_lock(&ctx->uring_lock);
e941894e
JA
9257 hash = ctx->hash_map;
9258 if (!hash) {
9259 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
362a9e65
YY
9260 if (!hash) {
9261 mutex_unlock(&ctx->uring_lock);
e941894e 9262 return ERR_PTR(-ENOMEM);
362a9e65 9263 }
e941894e
JA
9264 refcount_set(&hash->refs, 1);
9265 init_waitqueue_head(&hash->wait);
9266 ctx->hash_map = hash;
24369c2e 9267 }
362a9e65 9268 mutex_unlock(&ctx->uring_lock);
24369c2e 9269
e941894e 9270 data.hash = hash;
685fe7fe 9271 data.task = task;
ebc11b6c 9272 data.free_work = io_wq_free_work;
f5fa38c5 9273 data.do_work = io_wq_submit_work;
24369c2e 9274
d25e3a3d
JA
9275 /* Do QD, or 4 * CPUS, whatever is smallest */
9276 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
24369c2e 9277
5aa75ed5 9278 return io_wq_create(concurrency, &data);
24369c2e
PB
9279}
9280
c072481d
PB
9281static __cold int io_uring_alloc_task_context(struct task_struct *task,
9282 struct io_ring_ctx *ctx)
0f212204
JA
9283{
9284 struct io_uring_task *tctx;
d8a6df10 9285 int ret;
0f212204 9286
09899b19 9287 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
0f212204
JA
9288 if (unlikely(!tctx))
9289 return -ENOMEM;
9290
e7a6c00d
JA
9291 tctx->registered_rings = kcalloc(IO_RINGFD_REG_MAX,
9292 sizeof(struct file *), GFP_KERNEL);
9293 if (unlikely(!tctx->registered_rings)) {
9294 kfree(tctx);
9295 return -ENOMEM;
9296 }
9297
d8a6df10
JA
9298 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
9299 if (unlikely(ret)) {
e7a6c00d 9300 kfree(tctx->registered_rings);
d8a6df10
JA
9301 kfree(tctx);
9302 return ret;
9303 }
9304
685fe7fe 9305 tctx->io_wq = io_init_wq_offload(ctx, task);
5aa75ed5
JA
9306 if (IS_ERR(tctx->io_wq)) {
9307 ret = PTR_ERR(tctx->io_wq);
9308 percpu_counter_destroy(&tctx->inflight);
e7a6c00d 9309 kfree(tctx->registered_rings);
5aa75ed5
JA
9310 kfree(tctx);
9311 return ret;
9312 }
9313
0f212204
JA
9314 xa_init(&tctx->xa);
9315 init_waitqueue_head(&tctx->wait);
fdaf083c 9316 atomic_set(&tctx->in_idle, 0);
0f212204 9317 task->io_uring = tctx;
7cbf1722
JA
9318 spin_lock_init(&tctx->task_lock);
9319 INIT_WQ_LIST(&tctx->task_list);
4813c377 9320 INIT_WQ_LIST(&tctx->prior_task_list);
7cbf1722 9321 init_task_work(&tctx->task_work, tctx_task_work);
0f212204
JA
9322 return 0;
9323}
9324
9325void __io_uring_free(struct task_struct *tsk)
9326{
9327 struct io_uring_task *tctx = tsk->io_uring;
9328
9329 WARN_ON_ONCE(!xa_empty(&tctx->xa));
ef8eaa4e 9330 WARN_ON_ONCE(tctx->io_wq);
09899b19 9331 WARN_ON_ONCE(tctx->cached_refs);
ef8eaa4e 9332
e7a6c00d 9333 kfree(tctx->registered_rings);
d8a6df10 9334 percpu_counter_destroy(&tctx->inflight);
0f212204
JA
9335 kfree(tctx);
9336 tsk->io_uring = NULL;
9337}
9338
c072481d
PB
9339static __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
9340 struct io_uring_params *p)
2b188cc1
JA
9341{
9342 int ret;
9343
d25e3a3d
JA
9344 /* Retain compatibility with failing for an invalid attach attempt */
9345 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
9346 IORING_SETUP_ATTACH_WQ) {
9347 struct fd f;
9348
9349 f = fdget(p->wq_fd);
9350 if (!f.file)
9351 return -ENXIO;
0cc936f7
JA
9352 if (f.file->f_op != &io_uring_fops) {
9353 fdput(f);
f2a48dd0 9354 return -EINVAL;
0cc936f7
JA
9355 }
9356 fdput(f);
d25e3a3d 9357 }
6c271ce2 9358 if (ctx->flags & IORING_SETUP_SQPOLL) {
46fe18b1 9359 struct task_struct *tsk;
534ca6d6 9360 struct io_sq_data *sqd;
26984fbf 9361 bool attached;
534ca6d6 9362
cdc1404a
PM
9363 ret = security_uring_sqpoll();
9364 if (ret)
9365 return ret;
9366
26984fbf 9367 sqd = io_get_sq_data(p, &attached);
534ca6d6
JA
9368 if (IS_ERR(sqd)) {
9369 ret = PTR_ERR(sqd);
9370 goto err;
9371 }
69fb2131 9372
7c30f36a 9373 ctx->sq_creds = get_current_cred();
534ca6d6 9374 ctx->sq_data = sqd;
917257da
JA
9375 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
9376 if (!ctx->sq_thread_idle)
9377 ctx->sq_thread_idle = HZ;
9378
78d7f6ba 9379 io_sq_thread_park(sqd);
de75a3d3
PB
9380 list_add(&ctx->sqd_list, &sqd->ctx_list);
9381 io_sqd_update_thread_idle(sqd);
26984fbf 9382 /* don't attach to a dying SQPOLL thread, would be racy */
f2a48dd0 9383 ret = (attached && !sqd->thread) ? -ENXIO : 0;
78d7f6ba
PB
9384 io_sq_thread_unpark(sqd);
9385
de75a3d3
PB
9386 if (ret < 0)
9387 goto err;
9388 if (attached)
5aa75ed5 9389 return 0;
aa06165d 9390
6c271ce2 9391 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 9392 int cpu = p->sq_thread_cpu;
6c271ce2 9393
917257da 9394 ret = -EINVAL;
f2a48dd0 9395 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
e8f98f24 9396 goto err_sqpoll;
37d1e2e3 9397 sqd->sq_cpu = cpu;
6c271ce2 9398 } else {
37d1e2e3 9399 sqd->sq_cpu = -1;
6c271ce2 9400 }
37d1e2e3
JA
9401
9402 sqd->task_pid = current->pid;
5c2469e0 9403 sqd->task_tgid = current->tgid;
46fe18b1
JA
9404 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
9405 if (IS_ERR(tsk)) {
9406 ret = PTR_ERR(tsk);
e8f98f24 9407 goto err_sqpoll;
6c271ce2 9408 }
97a73a0f 9409
46fe18b1 9410 sqd->thread = tsk;
97a73a0f 9411 ret = io_uring_alloc_task_context(tsk, ctx);
46fe18b1 9412 wake_up_new_task(tsk);
0f212204
JA
9413 if (ret)
9414 goto err;
6c271ce2
JA
9415 } else if (p->flags & IORING_SETUP_SQ_AFF) {
9416 /* Can't have SQ_AFF without SQPOLL */
9417 ret = -EINVAL;
9418 goto err;
9419 }
9420
2b188cc1 9421 return 0;
f2a48dd0
PB
9422err_sqpoll:
9423 complete(&ctx->sq_data->exited);
2b188cc1 9424err:
37d1e2e3 9425 io_sq_thread_finish(ctx);
2b188cc1
JA
9426 return ret;
9427}
9428
a087e2b5
BM
9429static inline void __io_unaccount_mem(struct user_struct *user,
9430 unsigned long nr_pages)
2b188cc1
JA
9431{
9432 atomic_long_sub(nr_pages, &user->locked_vm);
9433}
9434
a087e2b5
BM
9435static inline int __io_account_mem(struct user_struct *user,
9436 unsigned long nr_pages)
2b188cc1
JA
9437{
9438 unsigned long page_limit, cur_pages, new_pages;
9439
9440 /* Don't allow more pages than we can safely lock */
9441 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
9442
9443 do {
9444 cur_pages = atomic_long_read(&user->locked_vm);
9445 new_pages = cur_pages + nr_pages;
9446 if (new_pages > page_limit)
9447 return -ENOMEM;
9448 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
9449 new_pages) != cur_pages);
9450
9451 return 0;
9452}
9453
26bfa89e 9454static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
a087e2b5 9455{
62e398be 9456 if (ctx->user)
a087e2b5 9457 __io_unaccount_mem(ctx->user, nr_pages);
30975825 9458
26bfa89e
JA
9459 if (ctx->mm_account)
9460 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
a087e2b5
BM
9461}
9462
26bfa89e 9463static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
a087e2b5 9464{
30975825
BM
9465 int ret;
9466
62e398be 9467 if (ctx->user) {
30975825
BM
9468 ret = __io_account_mem(ctx->user, nr_pages);
9469 if (ret)
9470 return ret;
9471 }
9472
26bfa89e
JA
9473 if (ctx->mm_account)
9474 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
a087e2b5
BM
9475
9476 return 0;
9477}
9478
2b188cc1
JA
9479static void io_mem_free(void *ptr)
9480{
52e04ef4
MR
9481 struct page *page;
9482
9483 if (!ptr)
9484 return;
2b188cc1 9485
52e04ef4 9486 page = virt_to_head_page(ptr);
2b188cc1
JA
9487 if (put_page_testzero(page))
9488 free_compound_page(page);
9489}
9490
9491static void *io_mem_alloc(size_t size)
9492{
0a3f1e0b 9493 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
2b188cc1 9494
0a3f1e0b 9495 return (void *) __get_free_pages(gfp, get_order(size));
2b188cc1
JA
9496}
9497
75b28aff
HV
9498static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
9499 size_t *sq_offset)
9500{
9501 struct io_rings *rings;
9502 size_t off, sq_array_size;
9503
9504 off = struct_size(rings, cqes, cq_entries);
9505 if (off == SIZE_MAX)
9506 return SIZE_MAX;
9507
9508#ifdef CONFIG_SMP
9509 off = ALIGN(off, SMP_CACHE_BYTES);
9510 if (off == 0)
9511 return SIZE_MAX;
9512#endif
9513
b36200f5
DV
9514 if (sq_offset)
9515 *sq_offset = off;
9516
75b28aff
HV
9517 sq_array_size = array_size(sizeof(u32), sq_entries);
9518 if (sq_array_size == SIZE_MAX)
9519 return SIZE_MAX;
9520
9521 if (check_add_overflow(off, sq_array_size, &off))
9522 return SIZE_MAX;
9523
75b28aff
HV
9524 return off;
9525}
9526
41edf1a5 9527static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
7f61a1e9 9528{
41edf1a5 9529 struct io_mapped_ubuf *imu = *slot;
7f61a1e9
PB
9530 unsigned int i;
9531
6224843d
PB
9532 if (imu != ctx->dummy_ubuf) {
9533 for (i = 0; i < imu->nr_bvecs; i++)
9534 unpin_user_page(imu->bvec[i].bv_page);
9535 if (imu->acct_pages)
9536 io_unaccount_mem(ctx, imu->acct_pages);
9537 kvfree(imu);
9538 }
41edf1a5 9539 *slot = NULL;
7f61a1e9
PB
9540}
9541
bd54b6fe 9542static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
edafccee 9543{
634d00df
PB
9544 io_buffer_unmap(ctx, &prsrc->buf);
9545 prsrc->buf = NULL;
bd54b6fe 9546}
edafccee 9547
bd54b6fe
BM
9548static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
9549{
9550 unsigned int i;
edafccee 9551
7f61a1e9
PB
9552 for (i = 0; i < ctx->nr_user_bufs; i++)
9553 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
edafccee 9554 kfree(ctx->user_bufs);
bb6659cc 9555 io_rsrc_data_free(ctx->buf_data);
edafccee 9556 ctx->user_bufs = NULL;
bd54b6fe 9557 ctx->buf_data = NULL;
edafccee 9558 ctx->nr_user_bufs = 0;
bd54b6fe
BM
9559}
9560
0a96bbe4 9561static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
edafccee 9562{
bd54b6fe 9563 int ret;
edafccee 9564
bd54b6fe 9565 if (!ctx->buf_data)
edafccee
JA
9566 return -ENXIO;
9567
bd54b6fe
BM
9568 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
9569 if (!ret)
9570 __io_sqe_buffers_unregister(ctx);
9571 return ret;
edafccee
JA
9572}
9573
9574static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
9575 void __user *arg, unsigned index)
9576{
9577 struct iovec __user *src;
9578
9579#ifdef CONFIG_COMPAT
9580 if (ctx->compat) {
9581 struct compat_iovec __user *ciovs;
9582 struct compat_iovec ciov;
9583
9584 ciovs = (struct compat_iovec __user *) arg;
9585 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
9586 return -EFAULT;
9587
d55e5f5b 9588 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
9589 dst->iov_len = ciov.iov_len;
9590 return 0;
9591 }
9592#endif
9593 src = (struct iovec __user *) arg;
9594 if (copy_from_user(dst, &src[index], sizeof(*dst)))
9595 return -EFAULT;
9596 return 0;
9597}
9598
de293938
JA
9599/*
9600 * Not super efficient, but this is just a registration time. And we do cache
9601 * the last compound head, so generally we'll only do a full search if we don't
9602 * match that one.
9603 *
9604 * We check if the given compound head page has already been accounted, to
9605 * avoid double accounting it. This allows us to account the full size of the
9606 * page, not just the constituent pages of a huge page.
9607 */
9608static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
9609 int nr_pages, struct page *hpage)
9610{
9611 int i, j;
9612
9613 /* check current page array */
9614 for (i = 0; i < nr_pages; i++) {
9615 if (!PageCompound(pages[i]))
9616 continue;
9617 if (compound_head(pages[i]) == hpage)
9618 return true;
9619 }
9620
9621 /* check previously registered pages */
9622 for (i = 0; i < ctx->nr_user_bufs; i++) {
41edf1a5 9623 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
de293938
JA
9624
9625 for (j = 0; j < imu->nr_bvecs; j++) {
9626 if (!PageCompound(imu->bvec[j].bv_page))
9627 continue;
9628 if (compound_head(imu->bvec[j].bv_page) == hpage)
9629 return true;
9630 }
9631 }
9632
9633 return false;
9634}
9635
9636static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
9637 int nr_pages, struct io_mapped_ubuf *imu,
9638 struct page **last_hpage)
9639{
9640 int i, ret;
9641
216e5835 9642 imu->acct_pages = 0;
de293938
JA
9643 for (i = 0; i < nr_pages; i++) {
9644 if (!PageCompound(pages[i])) {
9645 imu->acct_pages++;
9646 } else {
9647 struct page *hpage;
9648
9649 hpage = compound_head(pages[i]);
9650 if (hpage == *last_hpage)
9651 continue;
9652 *last_hpage = hpage;
9653 if (headpage_already_acct(ctx, pages, i, hpage))
9654 continue;
9655 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
9656 }
9657 }
9658
9659 if (!imu->acct_pages)
9660 return 0;
9661
26bfa89e 9662 ret = io_account_mem(ctx, imu->acct_pages);
de293938
JA
9663 if (ret)
9664 imu->acct_pages = 0;
9665 return ret;
9666}
9667
0a96bbe4 9668static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
41edf1a5 9669 struct io_mapped_ubuf **pimu,
0a96bbe4 9670 struct page **last_hpage)
edafccee 9671{
41edf1a5 9672 struct io_mapped_ubuf *imu = NULL;
edafccee
JA
9673 struct vm_area_struct **vmas = NULL;
9674 struct page **pages = NULL;
0a96bbe4
BM
9675 unsigned long off, start, end, ubuf;
9676 size_t size;
9677 int ret, pret, nr_pages, i;
9678
6224843d
PB
9679 if (!iov->iov_base) {
9680 *pimu = ctx->dummy_ubuf;
9681 return 0;
9682 }
9683
0a96bbe4
BM
9684 ubuf = (unsigned long) iov->iov_base;
9685 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
9686 start = ubuf >> PAGE_SHIFT;
9687 nr_pages = end - start;
9688
41edf1a5 9689 *pimu = NULL;
0a96bbe4
BM
9690 ret = -ENOMEM;
9691
9692 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
9693 if (!pages)
9694 goto done;
9695
9696 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
9697 GFP_KERNEL);
9698 if (!vmas)
9699 goto done;
edafccee 9700
41edf1a5 9701 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
a2b4198c 9702 if (!imu)
0a96bbe4
BM
9703 goto done;
9704
9705 ret = 0;
9706 mmap_read_lock(current->mm);
9707 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
9708 pages, vmas);
9709 if (pret == nr_pages) {
9710 /* don't support file backed memory */
9711 for (i = 0; i < nr_pages; i++) {
9712 struct vm_area_struct *vma = vmas[i];
9713
40dad765
PB
9714 if (vma_is_shmem(vma))
9715 continue;
0a96bbe4
BM
9716 if (vma->vm_file &&
9717 !is_file_hugepages(vma->vm_file)) {
9718 ret = -EOPNOTSUPP;
9719 break;
9720 }
9721 }
9722 } else {
9723 ret = pret < 0 ? pret : -EFAULT;
9724 }
9725 mmap_read_unlock(current->mm);
9726 if (ret) {
9727 /*
9728 * if we did partial map, or found file backed vmas,
9729 * release any pages we did get
9730 */
9731 if (pret > 0)
9732 unpin_user_pages(pages, pret);
0a96bbe4
BM
9733 goto done;
9734 }
9735
9736 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
9737 if (ret) {
9738 unpin_user_pages(pages, pret);
0a96bbe4
BM
9739 goto done;
9740 }
9741
9742 off = ubuf & ~PAGE_MASK;
9743 size = iov->iov_len;
9744 for (i = 0; i < nr_pages; i++) {
9745 size_t vec_len;
9746
9747 vec_len = min_t(size_t, size, PAGE_SIZE - off);
9748 imu->bvec[i].bv_page = pages[i];
9749 imu->bvec[i].bv_len = vec_len;
9750 imu->bvec[i].bv_offset = off;
9751 off = 0;
9752 size -= vec_len;
9753 }
9754 /* store original address for later verification */
9755 imu->ubuf = ubuf;
4751f53d 9756 imu->ubuf_end = ubuf + iov->iov_len;
0a96bbe4 9757 imu->nr_bvecs = nr_pages;
41edf1a5 9758 *pimu = imu;
0a96bbe4
BM
9759 ret = 0;
9760done:
41edf1a5
PB
9761 if (ret)
9762 kvfree(imu);
0a96bbe4
BM
9763 kvfree(pages);
9764 kvfree(vmas);
9765 return ret;
9766}
9767
2b358604 9768static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
0a96bbe4 9769{
87094465
PB
9770 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
9771 return ctx->user_bufs ? 0 : -ENOMEM;
2b358604 9772}
edafccee 9773
2b358604
BM
9774static int io_buffer_validate(struct iovec *iov)
9775{
50e96989
PB
9776 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
9777
2b358604
BM
9778 /*
9779 * Don't impose further limits on the size and buffer
9780 * constraints here, we'll -EINVAL later when IO is
9781 * submitted if they are wrong.
9782 */
6224843d
PB
9783 if (!iov->iov_base)
9784 return iov->iov_len ? -EFAULT : 0;
9785 if (!iov->iov_len)
2b358604 9786 return -EFAULT;
edafccee 9787
2b358604
BM
9788 /* arbitrary limit, but we need something */
9789 if (iov->iov_len > SZ_1G)
9790 return -EFAULT;
edafccee 9791
50e96989
PB
9792 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
9793 return -EOVERFLOW;
9794
2b358604
BM
9795 return 0;
9796}
edafccee 9797
2b358604 9798static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
634d00df 9799 unsigned int nr_args, u64 __user *tags)
2b358604 9800{
bd54b6fe
BM
9801 struct page *last_hpage = NULL;
9802 struct io_rsrc_data *data;
2b358604
BM
9803 int i, ret;
9804 struct iovec iov;
edafccee 9805
87094465
PB
9806 if (ctx->user_bufs)
9807 return -EBUSY;
489809e2 9808 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
87094465 9809 return -EINVAL;
bd54b6fe 9810 ret = io_rsrc_node_switch_start(ctx);
2b358604
BM
9811 if (ret)
9812 return ret;
d878c816
PB
9813 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
9814 if (ret)
9815 return ret;
bd54b6fe
BM
9816 ret = io_buffers_map_alloc(ctx, nr_args);
9817 if (ret) {
bb6659cc 9818 io_rsrc_data_free(data);
bd54b6fe
BM
9819 return ret;
9820 }
edafccee 9821
87094465 9822 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
edafccee
JA
9823 ret = io_copy_iov(ctx, &iov, arg, i);
9824 if (ret)
0a96bbe4 9825 break;
2b358604
BM
9826 ret = io_buffer_validate(&iov);
9827 if (ret)
0a96bbe4 9828 break;
2d091d62 9829 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
cf3770e7
CIK
9830 ret = -EINVAL;
9831 break;
9832 }
edafccee 9833
41edf1a5
PB
9834 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
9835 &last_hpage);
0a96bbe4
BM
9836 if (ret)
9837 break;
edafccee 9838 }
0a96bbe4 9839
bd54b6fe 9840 WARN_ON_ONCE(ctx->buf_data);
0a96bbe4 9841
bd54b6fe
BM
9842 ctx->buf_data = data;
9843 if (ret)
9844 __io_sqe_buffers_unregister(ctx);
9845 else
9846 io_rsrc_node_switch(ctx, NULL);
edafccee
JA
9847 return ret;
9848}
9849
634d00df
PB
9850static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
9851 struct io_uring_rsrc_update2 *up,
9852 unsigned int nr_args)
9853{
9854 u64 __user *tags = u64_to_user_ptr(up->tags);
9855 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
634d00df
PB
9856 struct page *last_hpage = NULL;
9857 bool needs_switch = false;
9858 __u32 done;
9859 int i, err;
9860
9861 if (!ctx->buf_data)
9862 return -ENXIO;
9863 if (up->offset + nr_args > ctx->nr_user_bufs)
9864 return -EINVAL;
9865
9866 for (done = 0; done < nr_args; done++) {
0b8c0e7c
PB
9867 struct io_mapped_ubuf *imu;
9868 int offset = up->offset + done;
634d00df
PB
9869 u64 tag = 0;
9870
9871 err = io_copy_iov(ctx, &iov, iovs, done);
9872 if (err)
9873 break;
9874 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
9875 err = -EFAULT;
9876 break;
9877 }
0b8c0e7c
PB
9878 err = io_buffer_validate(&iov);
9879 if (err)
9880 break;
cf3770e7
CIK
9881 if (!iov.iov_base && tag) {
9882 err = -EINVAL;
9883 break;
9884 }
0b8c0e7c
PB
9885 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
9886 if (err)
9887 break;
634d00df 9888
0b8c0e7c 9889 i = array_index_nospec(offset, ctx->nr_user_bufs);
6224843d 9890 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
4cdd158b 9891 err = io_queue_rsrc_removal(ctx->buf_data, i,
0b8c0e7c
PB
9892 ctx->rsrc_node, ctx->user_bufs[i]);
9893 if (unlikely(err)) {
9894 io_buffer_unmap(ctx, &imu);
634d00df 9895 break;
0b8c0e7c 9896 }
634d00df
PB
9897 ctx->user_bufs[i] = NULL;
9898 needs_switch = true;
9899 }
9900
0b8c0e7c 9901 ctx->user_bufs[i] = imu;
2d091d62 9902 *io_get_tag_slot(ctx->buf_data, offset) = tag;
634d00df
PB
9903 }
9904
9905 if (needs_switch)
9906 io_rsrc_node_switch(ctx, ctx->buf_data);
9907 return done ? done : err;
9908}
9909
c75312dd
UA
9910static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
9911 unsigned int eventfd_async)
9b402849 9912{
77bc59b4 9913 struct io_ev_fd *ev_fd;
9b402849 9914 __s32 __user *fds = arg;
f0a4e62b 9915 int fd;
9b402849 9916
77bc59b4
UA
9917 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
9918 lockdep_is_held(&ctx->uring_lock));
9919 if (ev_fd)
9b402849
JA
9920 return -EBUSY;
9921
9922 if (copy_from_user(&fd, fds, sizeof(*fds)))
9923 return -EFAULT;
9924
77bc59b4
UA
9925 ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
9926 if (!ev_fd)
9927 return -ENOMEM;
fe7e3257 9928
77bc59b4
UA
9929 ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
9930 if (IS_ERR(ev_fd->cq_ev_fd)) {
f0a4e62b 9931 int ret = PTR_ERR(ev_fd->cq_ev_fd);
77bc59b4 9932 kfree(ev_fd);
9b402849
JA
9933 return ret;
9934 }
c75312dd 9935 ev_fd->eventfd_async = eventfd_async;
9aa8dfde 9936 ctx->has_evfd = true;
77bc59b4 9937 rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
f0a4e62b 9938 return 0;
77bc59b4
UA
9939}
9940
9941static void io_eventfd_put(struct rcu_head *rcu)
9942{
9943 struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
9944
9945 eventfd_ctx_put(ev_fd->cq_ev_fd);
9946 kfree(ev_fd);
9b402849
JA
9947}
9948
9949static int io_eventfd_unregister(struct io_ring_ctx *ctx)
9950{
77bc59b4
UA
9951 struct io_ev_fd *ev_fd;
9952
9953 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
9954 lockdep_is_held(&ctx->uring_lock));
9955 if (ev_fd) {
9aa8dfde 9956 ctx->has_evfd = false;
77bc59b4
UA
9957 rcu_assign_pointer(ctx->io_ev_fd, NULL);
9958 call_rcu(&ev_fd->rcu, io_eventfd_put);
9b402849
JA
9959 return 0;
9960 }
9961
9962 return -ENXIO;
9963}
9964
5a2e745d
JA
9965static void io_destroy_buffers(struct io_ring_ctx *ctx)
9966{
dbc7d452
JA
9967 int i;
9968
9969 for (i = 0; i < (1U << IO_BUFFERS_HASH_BITS); i++) {
9970 struct list_head *list = &ctx->io_buffers[i];
9e15c3a0 9971
dbc7d452
JA
9972 while (!list_empty(list)) {
9973 struct io_buffer_list *bl;
9974
9975 bl = list_first_entry(list, struct io_buffer_list, list);
9976 __io_remove_buffers(ctx, bl, -1U);
9977 list_del(&bl->list);
9978 kfree(bl);
9979 }
9980 }
cc3cec83
JA
9981
9982 while (!list_empty(&ctx->io_buffers_pages)) {
9983 struct page *page;
9984
9985 page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
9986 list_del_init(&page->lru);
9987 __free_page(page);
9988 }
5a2e745d
JA
9989}
9990
4010fec4 9991static void io_req_caches_free(struct io_ring_ctx *ctx)
2b188cc1 9992{
cd0ca2e0 9993 struct io_submit_state *state = &ctx->submit_state;
37f0e767 9994 int nr = 0;
bf019da7 9995
9a4fdbd8 9996 mutex_lock(&ctx->uring_lock);
cd0ca2e0 9997 io_flush_cached_locked_reqs(ctx, state);
9a4fdbd8 9998
88ab95be 9999 while (!io_req_cache_empty(ctx)) {
c2b6c6bc
PB
10000 struct io_wq_work_node *node;
10001 struct io_kiocb *req;
9a4fdbd8 10002
c2b6c6bc
PB
10003 node = wq_stack_extract(&state->free_list);
10004 req = container_of(node, struct io_kiocb, comp_list);
10005 kmem_cache_free(req_cachep, req);
37f0e767 10006 nr++;
c2b6c6bc 10007 }
37f0e767
PB
10008 if (nr)
10009 percpu_ref_put_many(&ctx->refs, nr);
9a4fdbd8
JA
10010 mutex_unlock(&ctx->uring_lock);
10011}
10012
43597aac 10013static void io_wait_rsrc_data(struct io_rsrc_data *data)
2b188cc1 10014{
43597aac 10015 if (data && !atomic_dec_and_test(&data->refs))
bd54b6fe 10016 wait_for_completion(&data->done);
bd54b6fe 10017}
04fc6c80 10018
4d9237e3
JA
10019static void io_flush_apoll_cache(struct io_ring_ctx *ctx)
10020{
10021 struct async_poll *apoll;
10022
10023 while (!list_empty(&ctx->apoll_cache)) {
10024 apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
10025 poll.wait.entry);
10026 list_del(&apoll->poll.wait.entry);
10027 kfree(apoll);
10028 }
10029}
10030
c072481d 10031static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
2b188cc1 10032{
37d1e2e3 10033 io_sq_thread_finish(ctx);
2aede0e4 10034
37d1e2e3 10035 if (ctx->mm_account) {
2aede0e4
JA
10036 mmdrop(ctx->mm_account);
10037 ctx->mm_account = NULL;
30975825 10038 }
def596e9 10039
ab409402 10040 io_rsrc_refs_drop(ctx);
43597aac
PB
10041 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
10042 io_wait_rsrc_data(ctx->buf_data);
10043 io_wait_rsrc_data(ctx->file_data);
10044
8bad28d8 10045 mutex_lock(&ctx->uring_lock);
43597aac 10046 if (ctx->buf_data)
bd54b6fe 10047 __io_sqe_buffers_unregister(ctx);
43597aac 10048 if (ctx->file_data)
08480400 10049 __io_sqe_files_unregister(ctx);
c4ea060e
PB
10050 if (ctx->rings)
10051 __io_cqring_overflow_flush(ctx, true);
9b402849 10052 io_eventfd_unregister(ctx);
4d9237e3 10053 io_flush_apoll_cache(ctx);
77bc59b4 10054 mutex_unlock(&ctx->uring_lock);
5a2e745d 10055 io_destroy_buffers(ctx);
07db298a
PB
10056 if (ctx->sq_creds)
10057 put_cred(ctx->sq_creds);
def596e9 10058
a7f0ed5a
PB
10059 /* there are no registered resources left, nobody uses it */
10060 if (ctx->rsrc_node)
10061 io_rsrc_node_destroy(ctx->rsrc_node);
8dd03afe 10062 if (ctx->rsrc_backup_node)
b895c9a6 10063 io_rsrc_node_destroy(ctx->rsrc_backup_node);
a7f0ed5a 10064 flush_delayed_work(&ctx->rsrc_put_work);
756ab7c0 10065 flush_delayed_work(&ctx->fallback_work);
a7f0ed5a
PB
10066
10067 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
10068 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
def596e9 10069
2b188cc1 10070#if defined(CONFIG_UNIX)
355e8d26
EB
10071 if (ctx->ring_sock) {
10072 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 10073 sock_release(ctx->ring_sock);
355e8d26 10074 }
2b188cc1 10075#endif
ef9dd637 10076 WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
2b188cc1 10077
75b28aff 10078 io_mem_free(ctx->rings);
2b188cc1 10079 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
10080
10081 percpu_ref_exit(&ctx->refs);
2b188cc1 10082 free_uid(ctx->user);
4010fec4 10083 io_req_caches_free(ctx);
e941894e
JA
10084 if (ctx->hash_map)
10085 io_wq_put_hash(ctx->hash_map);
78076bb6 10086 kfree(ctx->cancel_hash);
6224843d 10087 kfree(ctx->dummy_ubuf);
dbc7d452 10088 kfree(ctx->io_buffers);
2b188cc1
JA
10089 kfree(ctx);
10090}
10091
10092static __poll_t io_uring_poll(struct file *file, poll_table *wait)
10093{
10094 struct io_ring_ctx *ctx = file->private_data;
10095 __poll_t mask = 0;
10096
d60aa65b 10097 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
10098 /*
10099 * synchronizes with barrier from wq_has_sleeper call in
10100 * io_commit_cqring
10101 */
2b188cc1 10102 smp_rmb();
90554200 10103 if (!io_sqring_full(ctx))
2b188cc1 10104 mask |= EPOLLOUT | EPOLLWRNORM;
ed670c3f
HX
10105
10106 /*
10107 * Don't flush cqring overflow list here, just do a simple check.
10108 * Otherwise there could possible be ABBA deadlock:
10109 * CPU0 CPU1
10110 * ---- ----
10111 * lock(&ctx->uring_lock);
10112 * lock(&ep->mtx);
10113 * lock(&ctx->uring_lock);
10114 * lock(&ep->mtx);
10115 *
10116 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
10117 * pushs them to do the flush.
10118 */
10988a0a
DY
10119 if (io_cqring_events(ctx) ||
10120 test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
2b188cc1
JA
10121 mask |= EPOLLIN | EPOLLRDNORM;
10122
10123 return mask;
10124}
10125
0bead8cd 10126static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
071698e1 10127{
4379bf8b 10128 const struct cred *creds;
071698e1 10129
61cf9370 10130 creds = xa_erase(&ctx->personalities, id);
4379bf8b
JA
10131 if (creds) {
10132 put_cred(creds);
0bead8cd 10133 return 0;
1e6fa521 10134 }
0bead8cd
YD
10135
10136 return -EINVAL;
10137}
10138
d56d938b
PB
10139struct io_tctx_exit {
10140 struct callback_head task_work;
10141 struct completion completion;
baf186c4 10142 struct io_ring_ctx *ctx;
d56d938b
PB
10143};
10144
c072481d 10145static __cold void io_tctx_exit_cb(struct callback_head *cb)
d56d938b
PB
10146{
10147 struct io_uring_task *tctx = current->io_uring;
10148 struct io_tctx_exit *work;
10149
10150 work = container_of(cb, struct io_tctx_exit, task_work);
10151 /*
10152 * When @in_idle, we're in cancellation and it's racy to remove the
10153 * node. It'll be removed by the end of cancellation, just ignore it.
10154 */
10155 if (!atomic_read(&tctx->in_idle))
eef51daa 10156 io_uring_del_tctx_node((unsigned long)work->ctx);
d56d938b
PB
10157 complete(&work->completion);
10158}
10159
c072481d 10160static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
28090c13
PB
10161{
10162 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
10163
10164 return req->ctx == data;
10165}
10166
c072481d 10167static __cold void io_ring_exit_work(struct work_struct *work)
85faa7b8 10168{
d56d938b 10169 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
b5bb3a24 10170 unsigned long timeout = jiffies + HZ * 60 * 5;
58d3be2c 10171 unsigned long interval = HZ / 20;
d56d938b
PB
10172 struct io_tctx_exit exit;
10173 struct io_tctx_node *node;
10174 int ret;
85faa7b8 10175
56952e91
JA
10176 /*
10177 * If we're doing polled IO and end up having requests being
10178 * submitted async (out-of-line), then completions can come in while
10179 * we're waiting for refs to drop. We need to reap these manually,
10180 * as nobody else will be looking for them.
10181 */
b2edc0a7 10182 do {
3dd0c97a 10183 io_uring_try_cancel_requests(ctx, NULL, true);
28090c13
PB
10184 if (ctx->sq_data) {
10185 struct io_sq_data *sqd = ctx->sq_data;
10186 struct task_struct *tsk;
10187
10188 io_sq_thread_park(sqd);
10189 tsk = sqd->thread;
10190 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
10191 io_wq_cancel_cb(tsk->io_uring->io_wq,
10192 io_cancel_ctx_cb, ctx, true);
10193 io_sq_thread_unpark(sqd);
10194 }
b5bb3a24 10195
37f0e767
PB
10196 io_req_caches_free(ctx);
10197
58d3be2c
PB
10198 if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
10199 /* there is little hope left, don't run it too often */
10200 interval = HZ * 60;
10201 }
10202 } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
d56d938b 10203
7f00651a
PB
10204 init_completion(&exit.completion);
10205 init_task_work(&exit.task_work, io_tctx_exit_cb);
10206 exit.ctx = ctx;
89b5066e
PB
10207 /*
10208 * Some may use context even when all refs and requests have been put,
10209 * and they are free to do so while still holding uring_lock or
5b0a6acc 10210 * completion_lock, see io_req_task_submit(). Apart from other work,
89b5066e
PB
10211 * this lock/unlock section also waits them to finish.
10212 */
d56d938b
PB
10213 mutex_lock(&ctx->uring_lock);
10214 while (!list_empty(&ctx->tctx_list)) {
b5bb3a24
PB
10215 WARN_ON_ONCE(time_after(jiffies, timeout));
10216
d56d938b
PB
10217 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
10218 ctx_node);
7f00651a
PB
10219 /* don't spin on a single task if cancellation failed */
10220 list_rotate_left(&ctx->tctx_list);
d56d938b
PB
10221 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
10222 if (WARN_ON_ONCE(ret))
10223 continue;
d56d938b
PB
10224
10225 mutex_unlock(&ctx->uring_lock);
10226 wait_for_completion(&exit.completion);
d56d938b
PB
10227 mutex_lock(&ctx->uring_lock);
10228 }
10229 mutex_unlock(&ctx->uring_lock);
79ebeaee
JA
10230 spin_lock(&ctx->completion_lock);
10231 spin_unlock(&ctx->completion_lock);
d56d938b 10232
85faa7b8
JA
10233 io_ring_ctx_free(ctx);
10234}
10235
80c4cbdb 10236/* Returns true if we found and killed one or more timeouts */
c072481d
PB
10237static __cold bool io_kill_timeouts(struct io_ring_ctx *ctx,
10238 struct task_struct *tsk, bool cancel_all)
80c4cbdb
PB
10239{
10240 struct io_kiocb *req, *tmp;
10241 int canceled = 0;
10242
79ebeaee
JA
10243 spin_lock(&ctx->completion_lock);
10244 spin_lock_irq(&ctx->timeout_lock);
80c4cbdb 10245 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
3dd0c97a 10246 if (io_match_task(req, tsk, cancel_all)) {
80c4cbdb
PB
10247 io_kill_timeout(req, -ECANCELED);
10248 canceled++;
10249 }
10250 }
79ebeaee 10251 spin_unlock_irq(&ctx->timeout_lock);
60053be8 10252 io_commit_cqring(ctx);
79ebeaee 10253 spin_unlock(&ctx->completion_lock);
80c4cbdb
PB
10254 if (canceled != 0)
10255 io_cqring_ev_posted(ctx);
10256 return canceled != 0;
10257}
10258
c072481d 10259static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
2b188cc1 10260{
61cf9370
MWO
10261 unsigned long index;
10262 struct creds *creds;
10263
2b188cc1
JA
10264 mutex_lock(&ctx->uring_lock);
10265 percpu_ref_kill(&ctx->refs);
634578f8 10266 if (ctx->rings)
6c2450ae 10267 __io_cqring_overflow_flush(ctx, true);
61cf9370
MWO
10268 xa_for_each(&ctx->personalities, index, creds)
10269 io_unregister_personality(ctx, index);
2b188cc1
JA
10270 mutex_unlock(&ctx->uring_lock);
10271
60053be8
PB
10272 /* failed during ring init, it couldn't have issued any requests */
10273 if (ctx->rings) {
10274 io_kill_timeouts(ctx, NULL, true);
10275 io_poll_remove_all(ctx, NULL, true);
10276 /* if we failed setting up the ctx, we might not have any rings */
10277 io_iopoll_try_reap_events(ctx);
10278 }
309fc03a 10279
85faa7b8 10280 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
fc666777
JA
10281 /*
10282 * Use system_unbound_wq to avoid spawning tons of event kworkers
10283 * if we're exiting a ton of rings at the same time. It just adds
10284 * noise and overhead, there's no discernable change in runtime
10285 * over using system_wq.
10286 */
10287 queue_work(system_unbound_wq, &ctx->exit_work);
2b188cc1
JA
10288}
10289
10290static int io_uring_release(struct inode *inode, struct file *file)
10291{
10292 struct io_ring_ctx *ctx = file->private_data;
10293
10294 file->private_data = NULL;
10295 io_ring_ctx_wait_and_kill(ctx);
10296 return 0;
10297}
10298
f6edbabb
PB
10299struct io_task_cancel {
10300 struct task_struct *task;
3dd0c97a 10301 bool all;
f6edbabb 10302};
f254ac04 10303
f6edbabb 10304static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
b711d4ea 10305{
9a472ef7 10306 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
f6edbabb 10307 struct io_task_cancel *cancel = data;
9a472ef7 10308
6af3f48b 10309 return io_match_task_safe(req, cancel->task, cancel->all);
b711d4ea
JA
10310}
10311
c072481d
PB
10312static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
10313 struct task_struct *task,
10314 bool cancel_all)
b7ddce3c 10315{
e1915f76 10316 struct io_defer_entry *de;
b7ddce3c
PB
10317 LIST_HEAD(list);
10318
79ebeaee 10319 spin_lock(&ctx->completion_lock);
b7ddce3c 10320 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
6af3f48b 10321 if (io_match_task_safe(de->req, task, cancel_all)) {
b7ddce3c
PB
10322 list_cut_position(&list, &ctx->defer_list, &de->list);
10323 break;
10324 }
10325 }
79ebeaee 10326 spin_unlock(&ctx->completion_lock);
e1915f76
PB
10327 if (list_empty(&list))
10328 return false;
b7ddce3c
PB
10329
10330 while (!list_empty(&list)) {
10331 de = list_first_entry(&list, struct io_defer_entry, list);
10332 list_del_init(&de->list);
f41db273 10333 io_req_complete_failed(de->req, -ECANCELED);
b7ddce3c
PB
10334 kfree(de);
10335 }
e1915f76 10336 return true;
b7ddce3c
PB
10337}
10338
c072481d 10339static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
1b00764f
PB
10340{
10341 struct io_tctx_node *node;
10342 enum io_wq_cancel cret;
10343 bool ret = false;
10344
10345 mutex_lock(&ctx->uring_lock);
10346 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
10347 struct io_uring_task *tctx = node->task->io_uring;
10348
10349 /*
10350 * io_wq will stay alive while we hold uring_lock, because it's
10351 * killed after ctx nodes, which requires to take the lock.
10352 */
10353 if (!tctx || !tctx->io_wq)
10354 continue;
10355 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
10356 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
10357 }
10358 mutex_unlock(&ctx->uring_lock);
10359
10360 return ret;
10361}
10362
c072481d
PB
10363static __cold void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
10364 struct task_struct *task,
10365 bool cancel_all)
9936c7c2 10366{
3dd0c97a 10367 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
1b00764f 10368 struct io_uring_task *tctx = task ? task->io_uring : NULL;
9936c7c2 10369
60053be8
PB
10370 /* failed during ring init, it couldn't have issued any requests */
10371 if (!ctx->rings)
10372 return;
10373
9936c7c2
PB
10374 while (1) {
10375 enum io_wq_cancel cret;
10376 bool ret = false;
10377
1b00764f
PB
10378 if (!task) {
10379 ret |= io_uring_try_cancel_iowq(ctx);
10380 } else if (tctx && tctx->io_wq) {
10381 /*
10382 * Cancels requests of all rings, not only @ctx, but
10383 * it's fine as the task is in exit/exec.
10384 */
5aa75ed5 10385 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
9936c7c2
PB
10386 &cancel, true);
10387 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
10388 }
10389
10390 /* SQPOLL thread does its own polling */
3dd0c97a 10391 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
d052d1d6 10392 (ctx->sq_data && ctx->sq_data->thread == current)) {
5eef4e87 10393 while (!wq_list_empty(&ctx->iopoll_list)) {
9936c7c2
PB
10394 io_iopoll_try_reap_events(ctx);
10395 ret = true;
10396 }
10397 }
10398
3dd0c97a
PB
10399 ret |= io_cancel_defer_files(ctx, task, cancel_all);
10400 ret |= io_poll_remove_all(ctx, task, cancel_all);
10401 ret |= io_kill_timeouts(ctx, task, cancel_all);
e5dc480d
PB
10402 if (task)
10403 ret |= io_run_task_work();
9936c7c2
PB
10404 if (!ret)
10405 break;
10406 cond_resched();
10407 }
10408}
10409
eef51daa 10410static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
0f212204 10411{
236434c3 10412 struct io_uring_task *tctx = current->io_uring;
13bf43f5 10413 struct io_tctx_node *node;
a528b04e 10414 int ret;
236434c3
MWO
10415
10416 if (unlikely(!tctx)) {
5aa75ed5 10417 ret = io_uring_alloc_task_context(current, ctx);
0f212204
JA
10418 if (unlikely(ret))
10419 return ret;
e139a1ec 10420
236434c3 10421 tctx = current->io_uring;
e139a1ec
PB
10422 if (ctx->iowq_limits_set) {
10423 unsigned int limits[2] = { ctx->iowq_limits[0],
10424 ctx->iowq_limits[1], };
10425
10426 ret = io_wq_max_workers(tctx->io_wq, limits);
10427 if (ret)
10428 return ret;
10429 }
0f212204 10430 }
cf27f3b1
PB
10431 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
10432 node = kmalloc(sizeof(*node), GFP_KERNEL);
10433 if (!node)
10434 return -ENOMEM;
10435 node->ctx = ctx;
10436 node->task = current;
13bf43f5 10437
cf27f3b1
PB
10438 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
10439 node, GFP_KERNEL));
10440 if (ret) {
10441 kfree(node);
10442 return ret;
0f212204 10443 }
cf27f3b1
PB
10444
10445 mutex_lock(&ctx->uring_lock);
10446 list_add(&node->ctx_node, &ctx->tctx_list);
10447 mutex_unlock(&ctx->uring_lock);
0f212204 10448 }
cf27f3b1 10449 tctx->last = ctx;
0f212204
JA
10450 return 0;
10451}
10452
cf27f3b1
PB
10453/*
10454 * Note that this task has used io_uring. We use it for cancelation purposes.
10455 */
eef51daa 10456static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
cf27f3b1
PB
10457{
10458 struct io_uring_task *tctx = current->io_uring;
10459
10460 if (likely(tctx && tctx->last == ctx))
10461 return 0;
eef51daa 10462 return __io_uring_add_tctx_node(ctx);
cf27f3b1
PB
10463}
10464
0f212204
JA
10465/*
10466 * Remove this io_uring_file -> task mapping.
10467 */
c072481d 10468static __cold void io_uring_del_tctx_node(unsigned long index)
0f212204
JA
10469{
10470 struct io_uring_task *tctx = current->io_uring;
13bf43f5 10471 struct io_tctx_node *node;
2941267b 10472
eebd2e37
PB
10473 if (!tctx)
10474 return;
13bf43f5
PB
10475 node = xa_erase(&tctx->xa, index);
10476 if (!node)
2941267b 10477 return;
0f212204 10478
13bf43f5
PB
10479 WARN_ON_ONCE(current != node->task);
10480 WARN_ON_ONCE(list_empty(&node->ctx_node));
10481
10482 mutex_lock(&node->ctx->uring_lock);
10483 list_del(&node->ctx_node);
10484 mutex_unlock(&node->ctx->uring_lock);
10485
baf186c4 10486 if (tctx->last == node->ctx)
0f212204 10487 tctx->last = NULL;
13bf43f5 10488 kfree(node);
0f212204
JA
10489}
10490
c072481d 10491static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
de7f1d9e 10492{
ba5ef6dc 10493 struct io_wq *wq = tctx->io_wq;
13bf43f5 10494 struct io_tctx_node *node;
de7f1d9e
PB
10495 unsigned long index;
10496
8bab4c09 10497 xa_for_each(&tctx->xa, index, node) {
eef51daa 10498 io_uring_del_tctx_node(index);
8bab4c09
JA
10499 cond_resched();
10500 }
b16ef427
ME
10501 if (wq) {
10502 /*
f6f9b278 10503 * Must be after io_uring_del_tctx_node() (removes nodes under
b16ef427
ME
10504 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
10505 */
ba5ef6dc 10506 io_wq_put_and_exit(wq);
dadebc35 10507 tctx->io_wq = NULL;
b16ef427 10508 }
de7f1d9e
PB
10509}
10510
3f48cf18 10511static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
521d6a73 10512{
3f48cf18 10513 if (tracked)
d5361233 10514 return 0;
521d6a73
PB
10515 return percpu_counter_sum(&tctx->inflight);
10516}
10517
78cc687b
PB
10518/*
10519 * Find any io_uring ctx that this task has registered or done IO on, and cancel
78a78060 10520 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
78cc687b 10521 */
c072481d
PB
10522static __cold void io_uring_cancel_generic(bool cancel_all,
10523 struct io_sq_data *sqd)
0e9ddb39 10524{
521d6a73 10525 struct io_uring_task *tctx = current->io_uring;
734551df 10526 struct io_ring_ctx *ctx;
0e9ddb39
PB
10527 s64 inflight;
10528 DEFINE_WAIT(wait);
fdaf083c 10529
78cc687b
PB
10530 WARN_ON_ONCE(sqd && sqd->thread != current);
10531
6d042ffb
PO
10532 if (!current->io_uring)
10533 return;
17a91051
PB
10534 if (tctx->io_wq)
10535 io_wq_exit_start(tctx->io_wq);
10536
0e9ddb39
PB
10537 atomic_inc(&tctx->in_idle);
10538 do {
e9dbe221 10539 io_uring_drop_tctx_refs(current);
0e9ddb39 10540 /* read completions before cancelations */
78cc687b 10541 inflight = tctx_inflight(tctx, !cancel_all);
0e9ddb39
PB
10542 if (!inflight)
10543 break;
fdaf083c 10544
78cc687b
PB
10545 if (!sqd) {
10546 struct io_tctx_node *node;
10547 unsigned long index;
0f212204 10548
78cc687b
PB
10549 xa_for_each(&tctx->xa, index, node) {
10550 /* sqpoll task will cancel all its requests */
10551 if (node->ctx->sq_data)
10552 continue;
10553 io_uring_try_cancel_requests(node->ctx, current,
10554 cancel_all);
10555 }
10556 } else {
10557 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
10558 io_uring_try_cancel_requests(ctx, current,
10559 cancel_all);
10560 }
17a91051 10561
78a78060
JA
10562 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
10563 io_run_task_work();
e9dbe221 10564 io_uring_drop_tctx_refs(current);
78a78060 10565
0f212204 10566 /*
a1bb3cd5
PB
10567 * If we've seen completions, retry without waiting. This
10568 * avoids a race where a completion comes in before we did
10569 * prepare_to_wait().
0f212204 10570 */
3dd0c97a 10571 if (inflight == tctx_inflight(tctx, !cancel_all))
a1bb3cd5 10572 schedule();
f57555ed 10573 finish_wait(&tctx->wait, &wait);
d8a6df10 10574 } while (1);
de7f1d9e 10575
8452d4a6 10576 io_uring_clean_tctx(tctx);
3dd0c97a 10577 if (cancel_all) {
3cc7fdb9
PB
10578 /*
10579 * We shouldn't run task_works after cancel, so just leave
10580 * ->in_idle set for normal exit.
10581 */
10582 atomic_dec(&tctx->in_idle);
3f48cf18
PB
10583 /* for exec all current's requests should be gone, kill tctx */
10584 __io_uring_free(current);
10585 }
44e728b8
PB
10586}
10587
f552a27a 10588void __io_uring_cancel(bool cancel_all)
78cc687b 10589{
f552a27a 10590 io_uring_cancel_generic(cancel_all, NULL);
78cc687b
PB
10591}
10592
e7a6c00d
JA
10593void io_uring_unreg_ringfd(void)
10594{
10595 struct io_uring_task *tctx = current->io_uring;
10596 int i;
10597
10598 for (i = 0; i < IO_RINGFD_REG_MAX; i++) {
10599 if (tctx->registered_rings[i]) {
10600 fput(tctx->registered_rings[i]);
10601 tctx->registered_rings[i] = NULL;
10602 }
10603 }
10604}
10605
10606static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
10607 int start, int end)
10608{
10609 struct file *file;
10610 int offset;
10611
10612 for (offset = start; offset < end; offset++) {
10613 offset = array_index_nospec(offset, IO_RINGFD_REG_MAX);
10614 if (tctx->registered_rings[offset])
10615 continue;
10616
10617 file = fget(fd);
10618 if (!file) {
10619 return -EBADF;
10620 } else if (file->f_op != &io_uring_fops) {
10621 fput(file);
10622 return -EOPNOTSUPP;
10623 }
10624 tctx->registered_rings[offset] = file;
10625 return offset;
10626 }
10627
10628 return -EBUSY;
10629}
10630
10631/*
10632 * Register a ring fd to avoid fdget/fdput for each io_uring_enter()
10633 * invocation. User passes in an array of struct io_uring_rsrc_update
10634 * with ->data set to the ring_fd, and ->offset given for the desired
10635 * index. If no index is desired, application may set ->offset == -1U
10636 * and we'll find an available index. Returns number of entries
10637 * successfully processed, or < 0 on error if none were processed.
10638 */
10639static int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
10640 unsigned nr_args)
10641{
10642 struct io_uring_rsrc_update __user *arg = __arg;
10643 struct io_uring_rsrc_update reg;
10644 struct io_uring_task *tctx;
10645 int ret, i;
10646
10647 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
10648 return -EINVAL;
10649
10650 mutex_unlock(&ctx->uring_lock);
10651 ret = io_uring_add_tctx_node(ctx);
10652 mutex_lock(&ctx->uring_lock);
10653 if (ret)
10654 return ret;
10655
10656 tctx = current->io_uring;
10657 for (i = 0; i < nr_args; i++) {
10658 int start, end;
10659
10660 if (copy_from_user(&reg, &arg[i], sizeof(reg))) {
10661 ret = -EFAULT;
10662 break;
10663 }
10664
6fb53cf8
DY
10665 if (reg.resv) {
10666 ret = -EINVAL;
10667 break;
10668 }
10669
e7a6c00d
JA
10670 if (reg.offset == -1U) {
10671 start = 0;
10672 end = IO_RINGFD_REG_MAX;
10673 } else {
10674 if (reg.offset >= IO_RINGFD_REG_MAX) {
10675 ret = -EINVAL;
10676 break;
10677 }
10678 start = reg.offset;
10679 end = start + 1;
10680 }
10681
10682 ret = io_ring_add_registered_fd(tctx, reg.data, start, end);
10683 if (ret < 0)
10684 break;
10685
10686 reg.offset = ret;
10687 if (copy_to_user(&arg[i], &reg, sizeof(reg))) {
10688 fput(tctx->registered_rings[reg.offset]);
10689 tctx->registered_rings[reg.offset] = NULL;
10690 ret = -EFAULT;
10691 break;
10692 }
10693 }
10694
10695 return i ? i : ret;
10696}
10697
10698static int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
10699 unsigned nr_args)
10700{
10701 struct io_uring_rsrc_update __user *arg = __arg;
10702 struct io_uring_task *tctx = current->io_uring;
10703 struct io_uring_rsrc_update reg;
10704 int ret = 0, i;
10705
10706 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
10707 return -EINVAL;
10708 if (!tctx)
10709 return 0;
10710
10711 for (i = 0; i < nr_args; i++) {
10712 if (copy_from_user(&reg, &arg[i], sizeof(reg))) {
10713 ret = -EFAULT;
10714 break;
10715 }
6fb53cf8 10716 if (reg.resv || reg.offset >= IO_RINGFD_REG_MAX) {
e7a6c00d
JA
10717 ret = -EINVAL;
10718 break;
10719 }
10720
10721 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX);
10722 if (tctx->registered_rings[reg.offset]) {
10723 fput(tctx->registered_rings[reg.offset]);
10724 tctx->registered_rings[reg.offset] = NULL;
10725 }
10726 }
10727
10728 return i ? i : ret;
10729}
10730
6c5c240e
RP
10731static void *io_uring_validate_mmap_request(struct file *file,
10732 loff_t pgoff, size_t sz)
2b188cc1 10733{
2b188cc1 10734 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 10735 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
10736 struct page *page;
10737 void *ptr;
10738
10739 switch (offset) {
10740 case IORING_OFF_SQ_RING:
75b28aff
HV
10741 case IORING_OFF_CQ_RING:
10742 ptr = ctx->rings;
2b188cc1
JA
10743 break;
10744 case IORING_OFF_SQES:
10745 ptr = ctx->sq_sqes;
10746 break;
2b188cc1 10747 default:
6c5c240e 10748 return ERR_PTR(-EINVAL);
2b188cc1
JA
10749 }
10750
10751 page = virt_to_head_page(ptr);
a50b854e 10752 if (sz > page_size(page))
6c5c240e
RP
10753 return ERR_PTR(-EINVAL);
10754
10755 return ptr;
10756}
10757
10758#ifdef CONFIG_MMU
10759
c072481d 10760static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
6c5c240e
RP
10761{
10762 size_t sz = vma->vm_end - vma->vm_start;
10763 unsigned long pfn;
10764 void *ptr;
10765
10766 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
10767 if (IS_ERR(ptr))
10768 return PTR_ERR(ptr);
2b188cc1
JA
10769
10770 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
10771 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
10772}
10773
6c5c240e
RP
10774#else /* !CONFIG_MMU */
10775
10776static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
10777{
10778 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
10779}
10780
10781static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
10782{
10783 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
10784}
10785
10786static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
10787 unsigned long addr, unsigned long len,
10788 unsigned long pgoff, unsigned long flags)
10789{
10790 void *ptr;
10791
10792 ptr = io_uring_validate_mmap_request(file, pgoff, len);
10793 if (IS_ERR(ptr))
10794 return PTR_ERR(ptr);
10795
10796 return (unsigned long) ptr;
10797}
10798
10799#endif /* !CONFIG_MMU */
10800
d9d05217 10801static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
90554200
JA
10802{
10803 DEFINE_WAIT(wait);
10804
10805 do {
10806 if (!io_sqring_full(ctx))
10807 break;
90554200
JA
10808 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
10809
10810 if (!io_sqring_full(ctx))
10811 break;
90554200
JA
10812 schedule();
10813 } while (!signal_pending(current));
10814
10815 finish_wait(&ctx->sqo_sq_wait, &wait);
5199328a 10816 return 0;
90554200
JA
10817}
10818
f81440d3
PB
10819static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
10820{
10821 if (flags & IORING_ENTER_EXT_ARG) {
10822 struct io_uring_getevents_arg arg;
10823
10824 if (argsz != sizeof(arg))
10825 return -EINVAL;
10826 if (copy_from_user(&arg, argp, sizeof(arg)))
10827 return -EFAULT;
10828 }
10829 return 0;
10830}
10831
c73ebb68
HX
10832static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
10833 struct __kernel_timespec __user **ts,
10834 const sigset_t __user **sig)
10835{
10836 struct io_uring_getevents_arg arg;
10837
10838 /*
10839 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
10840 * is just a pointer to the sigset_t.
10841 */
10842 if (!(flags & IORING_ENTER_EXT_ARG)) {
10843 *sig = (const sigset_t __user *) argp;
10844 *ts = NULL;
10845 return 0;
10846 }
10847
10848 /*
10849 * EXT_ARG is set - ensure we agree on the size of it and copy in our
10850 * timespec and sigset_t pointers if good.
10851 */
10852 if (*argsz != sizeof(arg))
10853 return -EINVAL;
10854 if (copy_from_user(&arg, argp, sizeof(arg)))
10855 return -EFAULT;
d2347b96
DY
10856 if (arg.pad)
10857 return -EINVAL;
c73ebb68
HX
10858 *sig = u64_to_user_ptr(arg.sigmask);
10859 *argsz = arg.sigmask_sz;
10860 *ts = u64_to_user_ptr(arg.ts);
10861 return 0;
10862}
10863
2b188cc1 10864SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
c73ebb68
HX
10865 u32, min_complete, u32, flags, const void __user *, argp,
10866 size_t, argsz)
2b188cc1
JA
10867{
10868 struct io_ring_ctx *ctx;
2b188cc1 10869 struct fd f;
33f993da 10870 long ret;
2b188cc1 10871
4c6e277c 10872 io_run_task_work();
b41e9852 10873
33f993da 10874 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
e7a6c00d
JA
10875 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
10876 IORING_ENTER_REGISTERED_RING)))
2b188cc1
JA
10877 return -EINVAL;
10878
e7a6c00d
JA
10879 /*
10880 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
10881 * need only dereference our task private array to find it.
10882 */
10883 if (flags & IORING_ENTER_REGISTERED_RING) {
10884 struct io_uring_task *tctx = current->io_uring;
10885
10886 if (!tctx || fd >= IO_RINGFD_REG_MAX)
10887 return -EINVAL;
10888 fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
10889 f.file = tctx->registered_rings[fd];
10890 if (unlikely(!f.file))
10891 return -EBADF;
10892 } else {
10893 f = fdget(fd);
10894 if (unlikely(!f.file))
10895 return -EBADF;
10896 }
2b188cc1
JA
10897
10898 ret = -EOPNOTSUPP;
33f993da 10899 if (unlikely(f.file->f_op != &io_uring_fops))
2b188cc1
JA
10900 goto out_fput;
10901
10902 ret = -ENXIO;
10903 ctx = f.file->private_data;
33f993da 10904 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
2b188cc1
JA
10905 goto out_fput;
10906
7e84e1c7 10907 ret = -EBADFD;
33f993da 10908 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
7e84e1c7
SG
10909 goto out;
10910
6c271ce2
JA
10911 /*
10912 * For SQ polling, the thread will do all submissions and completions.
10913 * Just return the requested submit count, and wake the thread if
10914 * we were asked to.
10915 */
b2a9eada 10916 ret = 0;
6c271ce2 10917 if (ctx->flags & IORING_SETUP_SQPOLL) {
90f67366 10918 io_cqring_overflow_flush(ctx);
89448c47 10919
21f96522
JA
10920 if (unlikely(ctx->sq_data->thread == NULL)) {
10921 ret = -EOWNERDEAD;
04147488 10922 goto out;
21f96522 10923 }
6c271ce2 10924 if (flags & IORING_ENTER_SQ_WAKEUP)
534ca6d6 10925 wake_up(&ctx->sq_data->wait);
d9d05217
PB
10926 if (flags & IORING_ENTER_SQ_WAIT) {
10927 ret = io_sqpoll_wait_sq(ctx);
10928 if (ret)
10929 goto out;
10930 }
3e813c90 10931 ret = to_submit;
b2a9eada 10932 } else if (to_submit) {
eef51daa 10933 ret = io_uring_add_tctx_node(ctx);
0f212204
JA
10934 if (unlikely(ret))
10935 goto out;
d487b43c 10936
2b188cc1 10937 mutex_lock(&ctx->uring_lock);
3e813c90
DY
10938 ret = io_submit_sqes(ctx, to_submit);
10939 if (ret != to_submit) {
d487b43c 10940 mutex_unlock(&ctx->uring_lock);
7c504e65 10941 goto out;
d487b43c
PB
10942 }
10943 if ((flags & IORING_ENTER_GETEVENTS) && ctx->syscall_iopoll)
10944 goto iopoll_locked;
10945 mutex_unlock(&ctx->uring_lock);
2b188cc1
JA
10946 }
10947 if (flags & IORING_ENTER_GETEVENTS) {
3e813c90 10948 int ret2;
773697b6 10949 if (ctx->syscall_iopoll) {
d487b43c
PB
10950 /*
10951 * We disallow the app entering submit/complete with
10952 * polling, but we still need to lock the ring to
10953 * prevent racing with polled issue that got punted to
10954 * a workqueue.
10955 */
10956 mutex_lock(&ctx->uring_lock);
10957iopoll_locked:
3e813c90
DY
10958 ret2 = io_validate_ext_arg(flags, argp, argsz);
10959 if (likely(!ret2)) {
10960 min_complete = min(min_complete,
10961 ctx->cq_entries);
10962 ret2 = io_iopoll_check(ctx, min_complete);
d487b43c
PB
10963 }
10964 mutex_unlock(&ctx->uring_lock);
def596e9 10965 } else {
f81440d3
PB
10966 const sigset_t __user *sig;
10967 struct __kernel_timespec __user *ts;
10968
3e813c90
DY
10969 ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
10970 if (likely(!ret2)) {
10971 min_complete = min(min_complete,
10972 ctx->cq_entries);
10973 ret2 = io_cqring_wait(ctx, min_complete, sig,
10974 argsz, ts);
10975 }
def596e9 10976 }
3e813c90 10977
155bc950 10978 if (!ret) {
3e813c90
DY
10979 ret = ret2;
10980
155bc950
DY
10981 /*
10982 * EBADR indicates that one or more CQE were dropped.
10983 * Once the user has been informed we can clear the bit
10984 * as they are obviously ok with those drops.
10985 */
10986 if (unlikely(ret2 == -EBADR))
10987 clear_bit(IO_CHECK_CQ_DROPPED_BIT,
10988 &ctx->check_cq);
10989 }
2b188cc1
JA
10990 }
10991
7c504e65 10992out:
6805b32e 10993 percpu_ref_put(&ctx->refs);
2b188cc1 10994out_fput:
e7a6c00d
JA
10995 if (!(flags & IORING_ENTER_REGISTERED_RING))
10996 fdput(f);
3e813c90 10997 return ret;
2b188cc1
JA
10998}
10999
bebdb65e 11000#ifdef CONFIG_PROC_FS
c072481d 11001static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
61cf9370 11002 const struct cred *cred)
87ce955b 11003{
87ce955b
JA
11004 struct user_namespace *uns = seq_user_ns(m);
11005 struct group_info *gi;
11006 kernel_cap_t cap;
11007 unsigned __capi;
11008 int g;
11009
11010 seq_printf(m, "%5d\n", id);
11011 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
11012 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
11013 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
11014 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
11015 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
11016 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
11017 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
11018 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
11019 seq_puts(m, "\n\tGroups:\t");
11020 gi = cred->group_info;
11021 for (g = 0; g < gi->ngroups; g++) {
11022 seq_put_decimal_ull(m, g ? " " : "",
11023 from_kgid_munged(uns, gi->gid[g]));
11024 }
11025 seq_puts(m, "\n\tCapEff:\t");
11026 cap = cred->cap_effective;
11027 CAP_FOR_EACH_U32(__capi)
11028 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
11029 seq_putc(m, '\n');
11030 return 0;
11031}
11032
c072481d
PB
11033static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
11034 struct seq_file *m)
87ce955b 11035{
dbbe9c64 11036 struct io_sq_data *sq = NULL;
83f84356
HX
11037 struct io_overflow_cqe *ocqe;
11038 struct io_rings *r = ctx->rings;
11039 unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
83f84356
HX
11040 unsigned int sq_head = READ_ONCE(r->sq.head);
11041 unsigned int sq_tail = READ_ONCE(r->sq.tail);
11042 unsigned int cq_head = READ_ONCE(r->cq.head);
11043 unsigned int cq_tail = READ_ONCE(r->cq.tail);
f75d1183 11044 unsigned int sq_entries, cq_entries;
fad8e0de 11045 bool has_lock;
83f84356
HX
11046 unsigned int i;
11047
11048 /*
11049 * we may get imprecise sqe and cqe info if uring is actively running
11050 * since we get cached_sq_head and cached_cq_tail without uring_lock
11051 * and sq_tail and cq_head are changed by userspace. But it's ok since
11052 * we usually use these info when it is stuck.
11053 */
c0235652 11054 seq_printf(m, "SqMask:\t0x%x\n", sq_mask);
f75d1183
JA
11055 seq_printf(m, "SqHead:\t%u\n", sq_head);
11056 seq_printf(m, "SqTail:\t%u\n", sq_tail);
11057 seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head);
11058 seq_printf(m, "CqMask:\t0x%x\n", cq_mask);
11059 seq_printf(m, "CqHead:\t%u\n", cq_head);
11060 seq_printf(m, "CqTail:\t%u\n", cq_tail);
11061 seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
11062 seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head);
11063 sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
11064 for (i = 0; i < sq_entries; i++) {
11065 unsigned int entry = i + sq_head;
11066 unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
a1957780 11067 struct io_uring_sqe *sqe;
f75d1183
JA
11068
11069 if (sq_idx > sq_mask)
11070 continue;
11071 sqe = &ctx->sq_sqes[sq_idx];
11072 seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n",
11073 sq_idx, sqe->opcode, sqe->fd, sqe->flags,
11074 sqe->user_data);
83f84356 11075 }
f75d1183
JA
11076 seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
11077 cq_entries = min(cq_tail - cq_head, ctx->cq_entries);
11078 for (i = 0; i < cq_entries; i++) {
11079 unsigned int entry = i + cq_head;
11080 struct io_uring_cqe *cqe = &r->cqes[entry & cq_mask];
83f84356
HX
11081
11082 seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n",
f75d1183
JA
11083 entry & cq_mask, cqe->user_data, cqe->res,
11084 cqe->flags);
83f84356 11085 }
87ce955b 11086
fad8e0de
JA
11087 /*
11088 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
11089 * since fdinfo case grabs it in the opposite direction of normal use
11090 * cases. If we fail to get the lock, we just don't iterate any
11091 * structures that could be going away outside the io_uring mutex.
11092 */
11093 has_lock = mutex_trylock(&ctx->uring_lock);
11094
5f3f26f9 11095 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
dbbe9c64 11096 sq = ctx->sq_data;
5f3f26f9
JA
11097 if (!sq->thread)
11098 sq = NULL;
11099 }
dbbe9c64
JQ
11100
11101 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
11102 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
87ce955b 11103 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
fad8e0de 11104 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
7b29f92d 11105 struct file *f = io_file_from_index(ctx, i);
87ce955b 11106
87ce955b
JA
11107 if (f)
11108 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
11109 else
11110 seq_printf(m, "%5u: <none>\n", i);
11111 }
11112 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
fad8e0de 11113 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
41edf1a5 11114 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
4751f53d 11115 unsigned int len = buf->ubuf_end - buf->ubuf;
87ce955b 11116
4751f53d 11117 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
87ce955b 11118 }
61cf9370
MWO
11119 if (has_lock && !xa_empty(&ctx->personalities)) {
11120 unsigned long index;
11121 const struct cred *cred;
11122
87ce955b 11123 seq_printf(m, "Personalities:\n");
61cf9370
MWO
11124 xa_for_each(&ctx->personalities, index, cred)
11125 io_uring_show_cred(m, index, cred);
87ce955b 11126 }
83f84356
HX
11127 if (has_lock)
11128 mutex_unlock(&ctx->uring_lock);
11129
11130 seq_puts(m, "PollList:\n");
79ebeaee 11131 spin_lock(&ctx->completion_lock);
d7718a9d
JA
11132 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
11133 struct hlist_head *list = &ctx->cancel_hash[i];
11134 struct io_kiocb *req;
11135
11136 hlist_for_each_entry(req, list, hash_node)
11137 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
7f62d40d 11138 task_work_pending(req->task));
d7718a9d 11139 }
83f84356
HX
11140
11141 seq_puts(m, "CqOverflowList:\n");
11142 list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
11143 struct io_uring_cqe *cqe = &ocqe->cqe;
11144
11145 seq_printf(m, " user_data=%llu, res=%d, flags=%x\n",
11146 cqe->user_data, cqe->res, cqe->flags);
11147
11148 }
11149
79ebeaee 11150 spin_unlock(&ctx->completion_lock);
87ce955b
JA
11151}
11152
c072481d 11153static __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
87ce955b
JA
11154{
11155 struct io_ring_ctx *ctx = f->private_data;
11156
11157 if (percpu_ref_tryget(&ctx->refs)) {
11158 __io_uring_show_fdinfo(ctx, m);
11159 percpu_ref_put(&ctx->refs);
11160 }
11161}
bebdb65e 11162#endif
87ce955b 11163
2b188cc1
JA
11164static const struct file_operations io_uring_fops = {
11165 .release = io_uring_release,
11166 .mmap = io_uring_mmap,
6c5c240e
RP
11167#ifndef CONFIG_MMU
11168 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
11169 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
11170#endif
2b188cc1 11171 .poll = io_uring_poll,
bebdb65e 11172#ifdef CONFIG_PROC_FS
87ce955b 11173 .show_fdinfo = io_uring_show_fdinfo,
bebdb65e 11174#endif
2b188cc1
JA
11175};
11176
c072481d
PB
11177static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
11178 struct io_uring_params *p)
2b188cc1 11179{
75b28aff
HV
11180 struct io_rings *rings;
11181 size_t size, sq_array_offset;
2b188cc1 11182
bd740481
JA
11183 /* make sure these are sane, as we already accounted them */
11184 ctx->sq_entries = p->sq_entries;
11185 ctx->cq_entries = p->cq_entries;
11186
75b28aff
HV
11187 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
11188 if (size == SIZE_MAX)
11189 return -EOVERFLOW;
11190
11191 rings = io_mem_alloc(size);
11192 if (!rings)
2b188cc1
JA
11193 return -ENOMEM;
11194
75b28aff
HV
11195 ctx->rings = rings;
11196 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
11197 rings->sq_ring_mask = p->sq_entries - 1;
11198 rings->cq_ring_mask = p->cq_entries - 1;
11199 rings->sq_ring_entries = p->sq_entries;
11200 rings->cq_ring_entries = p->cq_entries;
2b188cc1
JA
11201
11202 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
11203 if (size == SIZE_MAX) {
11204 io_mem_free(ctx->rings);
11205 ctx->rings = NULL;
2b188cc1 11206 return -EOVERFLOW;
eb065d30 11207 }
2b188cc1
JA
11208
11209 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
11210 if (!ctx->sq_sqes) {
11211 io_mem_free(ctx->rings);
11212 ctx->rings = NULL;
2b188cc1 11213 return -ENOMEM;
eb065d30 11214 }
2b188cc1 11215
2b188cc1
JA
11216 return 0;
11217}
11218
9faadcc8
PB
11219static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
11220{
11221 int ret, fd;
11222
11223 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
11224 if (fd < 0)
11225 return fd;
11226
eef51daa 11227 ret = io_uring_add_tctx_node(ctx);
9faadcc8
PB
11228 if (ret) {
11229 put_unused_fd(fd);
11230 return ret;
11231 }
11232 fd_install(fd, file);
11233 return fd;
11234}
11235
2b188cc1
JA
11236/*
11237 * Allocate an anonymous fd, this is what constitutes the application
11238 * visible backing of an io_uring instance. The application mmaps this
11239 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
11240 * we have to tie this fd to a socket for file garbage collection purposes.
11241 */
9faadcc8 11242static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
2b188cc1
JA
11243{
11244 struct file *file;
9faadcc8 11245#if defined(CONFIG_UNIX)
2b188cc1
JA
11246 int ret;
11247
2b188cc1
JA
11248 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
11249 &ctx->ring_sock);
11250 if (ret)
9faadcc8 11251 return ERR_PTR(ret);
2b188cc1
JA
11252#endif
11253
91a9ab7c
PM
11254 file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
11255 O_RDWR | O_CLOEXEC, NULL);
2b188cc1 11256#if defined(CONFIG_UNIX)
9faadcc8
PB
11257 if (IS_ERR(file)) {
11258 sock_release(ctx->ring_sock);
11259 ctx->ring_sock = NULL;
11260 } else {
11261 ctx->ring_sock->file = file;
0f212204 11262 }
2b188cc1 11263#endif
9faadcc8 11264 return file;
2b188cc1
JA
11265}
11266
c072481d
PB
11267static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
11268 struct io_uring_params __user *params)
2b188cc1 11269{
2b188cc1 11270 struct io_ring_ctx *ctx;
9faadcc8 11271 struct file *file;
2b188cc1
JA
11272 int ret;
11273
8110c1a6 11274 if (!entries)
2b188cc1 11275 return -EINVAL;
8110c1a6
JA
11276 if (entries > IORING_MAX_ENTRIES) {
11277 if (!(p->flags & IORING_SETUP_CLAMP))
11278 return -EINVAL;
11279 entries = IORING_MAX_ENTRIES;
11280 }
2b188cc1
JA
11281
11282 /*
11283 * Use twice as many entries for the CQ ring. It's possible for the
11284 * application to drive a higher depth than the size of the SQ ring,
11285 * since the sqes are only used at submission time. This allows for
33a107f0
JA
11286 * some flexibility in overcommitting a bit. If the application has
11287 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
11288 * of CQ ring entries manually.
2b188cc1
JA
11289 */
11290 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
11291 if (p->flags & IORING_SETUP_CQSIZE) {
11292 /*
11293 * If IORING_SETUP_CQSIZE is set, we do the same roundup
11294 * to a power-of-two, if it isn't already. We do NOT impose
11295 * any cq vs sq ring sizing.
11296 */
eb2667b3 11297 if (!p->cq_entries)
33a107f0 11298 return -EINVAL;
8110c1a6
JA
11299 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
11300 if (!(p->flags & IORING_SETUP_CLAMP))
11301 return -EINVAL;
11302 p->cq_entries = IORING_MAX_CQ_ENTRIES;
11303 }
eb2667b3
JQ
11304 p->cq_entries = roundup_pow_of_two(p->cq_entries);
11305 if (p->cq_entries < p->sq_entries)
11306 return -EINVAL;
33a107f0
JA
11307 } else {
11308 p->cq_entries = 2 * p->sq_entries;
11309 }
2b188cc1 11310
2b188cc1 11311 ctx = io_ring_ctx_alloc(p);
62e398be 11312 if (!ctx)
2b188cc1 11313 return -ENOMEM;
773697b6
PB
11314
11315 /*
11316 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
11317 * space applications don't need to do io completion events
11318 * polling again, they can rely on io_sq_thread to do polling
11319 * work, which can reduce cpu usage and uring_lock contention.
11320 */
11321 if (ctx->flags & IORING_SETUP_IOPOLL &&
11322 !(ctx->flags & IORING_SETUP_SQPOLL))
11323 ctx->syscall_iopoll = 1;
11324
2b188cc1 11325 ctx->compat = in_compat_syscall();
62e398be
JA
11326 if (!capable(CAP_IPC_LOCK))
11327 ctx->user = get_uid(current_user());
2aede0e4 11328
9f010507 11329 /*
e1169f06
JA
11330 * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
11331 * COOP_TASKRUN is set, then IPIs are never needed by the app.
9f010507 11332 */
e1169f06
JA
11333 ret = -EINVAL;
11334 if (ctx->flags & IORING_SETUP_SQPOLL) {
11335 /* IPI related flags don't make sense with SQPOLL */
11336 if (ctx->flags & IORING_SETUP_COOP_TASKRUN)
11337 goto err;
9f010507 11338 ctx->notify_method = TWA_SIGNAL_NO_IPI;
e1169f06
JA
11339 } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
11340 ctx->notify_method = TWA_SIGNAL_NO_IPI;
11341 } else {
9f010507 11342 ctx->notify_method = TWA_SIGNAL;
e1169f06 11343 }
9f010507 11344
2aede0e4
JA
11345 /*
11346 * This is just grabbed for accounting purposes. When a process exits,
11347 * the mm is exited and dropped before the files, hence we need to hang
11348 * on to this mm purely for the purposes of being able to unaccount
11349 * memory (locked/pinned vm). It's not used for anything else.
11350 */
6b7898eb 11351 mmgrab(current->mm);
2aede0e4 11352 ctx->mm_account = current->mm;
6b7898eb 11353
2b188cc1
JA
11354 ret = io_allocate_scq_urings(ctx, p);
11355 if (ret)
11356 goto err;
11357
7e84e1c7 11358 ret = io_sq_offload_create(ctx, p);
2b188cc1
JA
11359 if (ret)
11360 goto err;
eae071c9 11361 /* always set a rsrc node */
47b228ce
PB
11362 ret = io_rsrc_node_switch_start(ctx);
11363 if (ret)
11364 goto err;
eae071c9 11365 io_rsrc_node_switch(ctx, NULL);
2b188cc1 11366
2b188cc1 11367 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
11368 p->sq_off.head = offsetof(struct io_rings, sq.head);
11369 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
11370 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
11371 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
11372 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
11373 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
11374 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
11375
11376 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
11377 p->cq_off.head = offsetof(struct io_rings, cq.head);
11378 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
11379 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
11380 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
11381 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
11382 p->cq_off.cqes = offsetof(struct io_rings, cqes);
0d9b5b3a 11383 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
ac90f249 11384
7f13657d
XW
11385 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
11386 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
5769a351 11387 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
c73ebb68 11388 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
9690557e 11389 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
c4212f3e
JA
11390 IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
11391 IORING_FEAT_LINKED_FILE;
7f13657d
XW
11392
11393 if (copy_to_user(params, p, sizeof(*p))) {
11394 ret = -EFAULT;
11395 goto err;
11396 }
d1719f70 11397
9faadcc8
PB
11398 file = io_uring_get_file(ctx);
11399 if (IS_ERR(file)) {
11400 ret = PTR_ERR(file);
11401 goto err;
11402 }
11403
044c1ab3
JA
11404 /*
11405 * Install ring fd as the very last thing, so we don't risk someone
11406 * having closed it before we finish setup
11407 */
9faadcc8
PB
11408 ret = io_uring_install_fd(ctx, file);
11409 if (ret < 0) {
11410 /* fput will clean it up */
11411 fput(file);
11412 return ret;
11413 }
044c1ab3 11414
c826bd7a 11415 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
11416 return ret;
11417err:
11418 io_ring_ctx_wait_and_kill(ctx);
11419 return ret;
11420}
11421
11422/*
11423 * Sets up an aio uring context, and returns the fd. Applications asks for a
11424 * ring size, we return the actual sq/cq ring sizes (among other things) in the
11425 * params structure passed in.
11426 */
11427static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
11428{
11429 struct io_uring_params p;
2b188cc1
JA
11430 int i;
11431
11432 if (copy_from_user(&p, params, sizeof(p)))
11433 return -EFAULT;
11434 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
11435 if (p.resv[i])
11436 return -EINVAL;
11437 }
11438
6c271ce2 11439 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 11440 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
7e84e1c7 11441 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
e1169f06
JA
11442 IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
11443 IORING_SETUP_COOP_TASKRUN))
2b188cc1
JA
11444 return -EINVAL;
11445
7f13657d 11446 return io_uring_create(entries, &p, params);
2b188cc1
JA
11447}
11448
11449SYSCALL_DEFINE2(io_uring_setup, u32, entries,
11450 struct io_uring_params __user *, params)
11451{
11452 return io_uring_setup(entries, params);
11453}
11454
c072481d
PB
11455static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
11456 unsigned nr_args)
66f4af93
JA
11457{
11458 struct io_uring_probe *p;
11459 size_t size;
11460 int i, ret;
11461
11462 size = struct_size(p, ops, nr_args);
11463 if (size == SIZE_MAX)
11464 return -EOVERFLOW;
11465 p = kzalloc(size, GFP_KERNEL);
11466 if (!p)
11467 return -ENOMEM;
11468
11469 ret = -EFAULT;
11470 if (copy_from_user(p, arg, size))
11471 goto out;
11472 ret = -EINVAL;
11473 if (memchr_inv(p, 0, size))
11474 goto out;
11475
11476 p->last_op = IORING_OP_LAST - 1;
11477 if (nr_args > IORING_OP_LAST)
11478 nr_args = IORING_OP_LAST;
11479
11480 for (i = 0; i < nr_args; i++) {
11481 p->ops[i].op = i;
11482 if (!io_op_defs[i].not_supported)
11483 p->ops[i].flags = IO_URING_OP_SUPPORTED;
11484 }
11485 p->ops_len = i;
11486
11487 ret = 0;
11488 if (copy_to_user(arg, p, size))
11489 ret = -EFAULT;
11490out:
11491 kfree(p);
11492 return ret;
11493}
11494
071698e1
JA
11495static int io_register_personality(struct io_ring_ctx *ctx)
11496{
4379bf8b 11497 const struct cred *creds;
61cf9370 11498 u32 id;
1e6fa521 11499 int ret;
071698e1 11500
4379bf8b 11501 creds = get_current_cred();
1e6fa521 11502
61cf9370
MWO
11503 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
11504 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
a30f895a
JA
11505 if (ret < 0) {
11506 put_cred(creds);
11507 return ret;
11508 }
11509 return id;
071698e1
JA
11510}
11511
c072481d
PB
11512static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
11513 void __user *arg, unsigned int nr_args)
21b55dbc
SG
11514{
11515 struct io_uring_restriction *res;
11516 size_t size;
11517 int i, ret;
11518
7e84e1c7
SG
11519 /* Restrictions allowed only if rings started disabled */
11520 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
11521 return -EBADFD;
11522
21b55dbc 11523 /* We allow only a single restrictions registration */
7e84e1c7 11524 if (ctx->restrictions.registered)
21b55dbc
SG
11525 return -EBUSY;
11526
11527 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
11528 return -EINVAL;
11529
11530 size = array_size(nr_args, sizeof(*res));
11531 if (size == SIZE_MAX)
11532 return -EOVERFLOW;
11533
11534 res = memdup_user(arg, size);
11535 if (IS_ERR(res))
11536 return PTR_ERR(res);
11537
11538 ret = 0;
11539
11540 for (i = 0; i < nr_args; i++) {
11541 switch (res[i].opcode) {
11542 case IORING_RESTRICTION_REGISTER_OP:
11543 if (res[i].register_op >= IORING_REGISTER_LAST) {
11544 ret = -EINVAL;
11545 goto out;
11546 }
11547
11548 __set_bit(res[i].register_op,
11549 ctx->restrictions.register_op);
11550 break;
11551 case IORING_RESTRICTION_SQE_OP:
11552 if (res[i].sqe_op >= IORING_OP_LAST) {
11553 ret = -EINVAL;
11554 goto out;
11555 }
11556
11557 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
11558 break;
11559 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
11560 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
11561 break;
11562 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
11563 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
11564 break;
11565 default:
11566 ret = -EINVAL;
11567 goto out;
11568 }
11569 }
11570
11571out:
11572 /* Reset all restrictions if an error happened */
11573 if (ret != 0)
11574 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
11575 else
7e84e1c7 11576 ctx->restrictions.registered = true;
21b55dbc
SG
11577
11578 kfree(res);
11579 return ret;
11580}
11581
7e84e1c7
SG
11582static int io_register_enable_rings(struct io_ring_ctx *ctx)
11583{
11584 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
11585 return -EBADFD;
11586
11587 if (ctx->restrictions.registered)
11588 ctx->restricted = 1;
11589
0298ef96
PB
11590 ctx->flags &= ~IORING_SETUP_R_DISABLED;
11591 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
11592 wake_up(&ctx->sq_data->wait);
7e84e1c7
SG
11593 return 0;
11594}
11595
fdecb662 11596static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
c3bdad02 11597 struct io_uring_rsrc_update2 *up,
98f0b3b4
PB
11598 unsigned nr_args)
11599{
11600 __u32 tmp;
11601 int err;
11602
11603 if (check_add_overflow(up->offset, nr_args, &tmp))
11604 return -EOVERFLOW;
11605 err = io_rsrc_node_switch_start(ctx);
11606 if (err)
11607 return err;
11608
fdecb662
PB
11609 switch (type) {
11610 case IORING_RSRC_FILE:
98f0b3b4 11611 return __io_sqe_files_update(ctx, up, nr_args);
634d00df
PB
11612 case IORING_RSRC_BUFFER:
11613 return __io_sqe_buffers_update(ctx, up, nr_args);
98f0b3b4
PB
11614 }
11615 return -EINVAL;
11616}
11617
c3bdad02
PB
11618static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
11619 unsigned nr_args)
98f0b3b4 11620{
c3bdad02 11621 struct io_uring_rsrc_update2 up;
98f0b3b4
PB
11622
11623 if (!nr_args)
11624 return -EINVAL;
c3bdad02
PB
11625 memset(&up, 0, sizeof(up));
11626 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
11627 return -EFAULT;
d8a3ba9c 11628 if (up.resv || up.resv2)
565c5e61 11629 return -EINVAL;
c3bdad02
PB
11630 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
11631}
11632
11633static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
992da01a 11634 unsigned size, unsigned type)
c3bdad02
PB
11635{
11636 struct io_uring_rsrc_update2 up;
11637
11638 if (size != sizeof(up))
11639 return -EINVAL;
98f0b3b4
PB
11640 if (copy_from_user(&up, arg, sizeof(up)))
11641 return -EFAULT;
d8a3ba9c 11642 if (!up.nr || up.resv || up.resv2)
98f0b3b4 11643 return -EINVAL;
992da01a 11644 return __io_register_rsrc_update(ctx, type, &up, up.nr);
98f0b3b4
PB
11645}
11646
c072481d 11647static __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
992da01a 11648 unsigned int size, unsigned int type)
792e3582
PB
11649{
11650 struct io_uring_rsrc_register rr;
11651
11652 /* keep it extendible */
11653 if (size != sizeof(rr))
11654 return -EINVAL;
11655
11656 memset(&rr, 0, sizeof(rr));
11657 if (copy_from_user(&rr, arg, size))
11658 return -EFAULT;
992da01a 11659 if (!rr.nr || rr.resv || rr.resv2)
792e3582
PB
11660 return -EINVAL;
11661
992da01a 11662 switch (type) {
792e3582
PB
11663 case IORING_RSRC_FILE:
11664 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
11665 rr.nr, u64_to_user_ptr(rr.tags));
634d00df
PB
11666 case IORING_RSRC_BUFFER:
11667 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
11668 rr.nr, u64_to_user_ptr(rr.tags));
792e3582
PB
11669 }
11670 return -EINVAL;
11671}
11672
c072481d
PB
11673static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
11674 void __user *arg, unsigned len)
fe76421d
JA
11675{
11676 struct io_uring_task *tctx = current->io_uring;
11677 cpumask_var_t new_mask;
11678 int ret;
11679
11680 if (!tctx || !tctx->io_wq)
11681 return -EINVAL;
11682
11683 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
11684 return -ENOMEM;
11685
11686 cpumask_clear(new_mask);
11687 if (len > cpumask_size())
11688 len = cpumask_size();
11689
0f5e4b83
ES
11690 if (in_compat_syscall()) {
11691 ret = compat_get_bitmap(cpumask_bits(new_mask),
11692 (const compat_ulong_t __user *)arg,
11693 len * 8 /* CHAR_BIT */);
11694 } else {
11695 ret = copy_from_user(new_mask, arg, len);
11696 }
11697
11698 if (ret) {
fe76421d
JA
11699 free_cpumask_var(new_mask);
11700 return -EFAULT;
11701 }
11702
11703 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
11704 free_cpumask_var(new_mask);
11705 return ret;
11706}
11707
c072481d 11708static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
fe76421d
JA
11709{
11710 struct io_uring_task *tctx = current->io_uring;
11711
11712 if (!tctx || !tctx->io_wq)
11713 return -EINVAL;
11714
11715 return io_wq_cpu_affinity(tctx->io_wq, NULL);
11716}
11717
c072481d
PB
11718static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
11719 void __user *arg)
b22fa62a 11720 __must_hold(&ctx->uring_lock)
2e480058 11721{
b22fa62a 11722 struct io_tctx_node *node;
fa84693b
JA
11723 struct io_uring_task *tctx = NULL;
11724 struct io_sq_data *sqd = NULL;
2e480058
JA
11725 __u32 new_count[2];
11726 int i, ret;
11727
2e480058
JA
11728 if (copy_from_user(new_count, arg, sizeof(new_count)))
11729 return -EFAULT;
11730 for (i = 0; i < ARRAY_SIZE(new_count); i++)
11731 if (new_count[i] > INT_MAX)
11732 return -EINVAL;
11733
fa84693b
JA
11734 if (ctx->flags & IORING_SETUP_SQPOLL) {
11735 sqd = ctx->sq_data;
11736 if (sqd) {
009ad9f0
JA
11737 /*
11738 * Observe the correct sqd->lock -> ctx->uring_lock
11739 * ordering. Fine to drop uring_lock here, we hold
11740 * a ref to the ctx.
11741 */
41d3a6bd 11742 refcount_inc(&sqd->refs);
009ad9f0 11743 mutex_unlock(&ctx->uring_lock);
fa84693b 11744 mutex_lock(&sqd->lock);
009ad9f0 11745 mutex_lock(&ctx->uring_lock);
41d3a6bd
JA
11746 if (sqd->thread)
11747 tctx = sqd->thread->io_uring;
fa84693b
JA
11748 }
11749 } else {
11750 tctx = current->io_uring;
11751 }
11752
e139a1ec 11753 BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
fa84693b 11754
bad119b9
PB
11755 for (i = 0; i < ARRAY_SIZE(new_count); i++)
11756 if (new_count[i])
11757 ctx->iowq_limits[i] = new_count[i];
e139a1ec
PB
11758 ctx->iowq_limits_set = true;
11759
e139a1ec
PB
11760 if (tctx && tctx->io_wq) {
11761 ret = io_wq_max_workers(tctx->io_wq, new_count);
11762 if (ret)
11763 goto err;
11764 } else {
11765 memset(new_count, 0, sizeof(new_count));
11766 }
fa84693b 11767
41d3a6bd 11768 if (sqd) {
fa84693b 11769 mutex_unlock(&sqd->lock);
41d3a6bd
JA
11770 io_put_sq_data(sqd);
11771 }
2e480058
JA
11772
11773 if (copy_to_user(arg, new_count, sizeof(new_count)))
11774 return -EFAULT;
11775
b22fa62a
PB
11776 /* that's it for SQPOLL, only the SQPOLL task creates requests */
11777 if (sqd)
11778 return 0;
11779
11780 /* now propagate the restriction to all registered users */
11781 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
11782 struct io_uring_task *tctx = node->task->io_uring;
11783
11784 if (WARN_ON_ONCE(!tctx->io_wq))
11785 continue;
11786
11787 for (i = 0; i < ARRAY_SIZE(new_count); i++)
11788 new_count[i] = ctx->iowq_limits[i];
11789 /* ignore errors, it always returns zero anyway */
11790 (void)io_wq_max_workers(tctx->io_wq, new_count);
11791 }
2e480058 11792 return 0;
fa84693b 11793err:
41d3a6bd 11794 if (sqd) {
fa84693b 11795 mutex_unlock(&sqd->lock);
41d3a6bd
JA
11796 io_put_sq_data(sqd);
11797 }
fa84693b 11798 return ret;
2e480058
JA
11799}
11800
edafccee
JA
11801static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
11802 void __user *arg, unsigned nr_args)
b19062a5
JA
11803 __releases(ctx->uring_lock)
11804 __acquires(ctx->uring_lock)
edafccee
JA
11805{
11806 int ret;
11807
35fa71a0
JA
11808 /*
11809 * We're inside the ring mutex, if the ref is already dying, then
11810 * someone else killed the ctx or is already going through
11811 * io_uring_register().
11812 */
11813 if (percpu_ref_is_dying(&ctx->refs))
11814 return -ENXIO;
11815
75c4021a
PB
11816 if (ctx->restricted) {
11817 if (opcode >= IORING_REGISTER_LAST)
11818 return -EINVAL;
11819 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
11820 if (!test_bit(opcode, ctx->restrictions.register_op))
11821 return -EACCES;
11822 }
11823
edafccee
JA
11824 switch (opcode) {
11825 case IORING_REGISTER_BUFFERS:
634d00df 11826 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
edafccee
JA
11827 break;
11828 case IORING_UNREGISTER_BUFFERS:
11829 ret = -EINVAL;
11830 if (arg || nr_args)
11831 break;
0a96bbe4 11832 ret = io_sqe_buffers_unregister(ctx);
edafccee 11833 break;
6b06314c 11834 case IORING_REGISTER_FILES:
792e3582 11835 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
6b06314c
JA
11836 break;
11837 case IORING_UNREGISTER_FILES:
11838 ret = -EINVAL;
11839 if (arg || nr_args)
11840 break;
11841 ret = io_sqe_files_unregister(ctx);
11842 break;
c3a31e60 11843 case IORING_REGISTER_FILES_UPDATE:
c3bdad02 11844 ret = io_register_files_update(ctx, arg, nr_args);
c3a31e60 11845 break;
9b402849
JA
11846 case IORING_REGISTER_EVENTFD:
11847 ret = -EINVAL;
11848 if (nr_args != 1)
11849 break;
c75312dd
UA
11850 ret = io_eventfd_register(ctx, arg, 0);
11851 break;
11852 case IORING_REGISTER_EVENTFD_ASYNC:
11853 ret = -EINVAL;
11854 if (nr_args != 1)
f2842ab5 11855 break;
c75312dd 11856 ret = io_eventfd_register(ctx, arg, 1);
9b402849
JA
11857 break;
11858 case IORING_UNREGISTER_EVENTFD:
11859 ret = -EINVAL;
11860 if (arg || nr_args)
11861 break;
11862 ret = io_eventfd_unregister(ctx);
11863 break;
66f4af93
JA
11864 case IORING_REGISTER_PROBE:
11865 ret = -EINVAL;
11866 if (!arg || nr_args > 256)
11867 break;
11868 ret = io_probe(ctx, arg, nr_args);
11869 break;
071698e1
JA
11870 case IORING_REGISTER_PERSONALITY:
11871 ret = -EINVAL;
11872 if (arg || nr_args)
11873 break;
11874 ret = io_register_personality(ctx);
11875 break;
11876 case IORING_UNREGISTER_PERSONALITY:
11877 ret = -EINVAL;
11878 if (arg)
11879 break;
11880 ret = io_unregister_personality(ctx, nr_args);
11881 break;
7e84e1c7
SG
11882 case IORING_REGISTER_ENABLE_RINGS:
11883 ret = -EINVAL;
11884 if (arg || nr_args)
11885 break;
11886 ret = io_register_enable_rings(ctx);
11887 break;
21b55dbc
SG
11888 case IORING_REGISTER_RESTRICTIONS:
11889 ret = io_register_restrictions(ctx, arg, nr_args);
11890 break;
992da01a
PB
11891 case IORING_REGISTER_FILES2:
11892 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
11893 break;
11894 case IORING_REGISTER_FILES_UPDATE2:
11895 ret = io_register_rsrc_update(ctx, arg, nr_args,
11896 IORING_RSRC_FILE);
11897 break;
11898 case IORING_REGISTER_BUFFERS2:
11899 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
792e3582 11900 break;
992da01a
PB
11901 case IORING_REGISTER_BUFFERS_UPDATE:
11902 ret = io_register_rsrc_update(ctx, arg, nr_args,
11903 IORING_RSRC_BUFFER);
c3bdad02 11904 break;
fe76421d
JA
11905 case IORING_REGISTER_IOWQ_AFF:
11906 ret = -EINVAL;
11907 if (!arg || !nr_args)
11908 break;
11909 ret = io_register_iowq_aff(ctx, arg, nr_args);
11910 break;
11911 case IORING_UNREGISTER_IOWQ_AFF:
11912 ret = -EINVAL;
11913 if (arg || nr_args)
11914 break;
11915 ret = io_unregister_iowq_aff(ctx);
11916 break;
2e480058
JA
11917 case IORING_REGISTER_IOWQ_MAX_WORKERS:
11918 ret = -EINVAL;
11919 if (!arg || nr_args != 2)
11920 break;
11921 ret = io_register_iowq_max_workers(ctx, arg);
11922 break;
e7a6c00d
JA
11923 case IORING_REGISTER_RING_FDS:
11924 ret = io_ringfd_register(ctx, arg, nr_args);
11925 break;
11926 case IORING_UNREGISTER_RING_FDS:
11927 ret = io_ringfd_unregister(ctx, arg, nr_args);
11928 break;
edafccee
JA
11929 default:
11930 ret = -EINVAL;
11931 break;
11932 }
11933
edafccee
JA
11934 return ret;
11935}
11936
11937SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
11938 void __user *, arg, unsigned int, nr_args)
11939{
11940 struct io_ring_ctx *ctx;
11941 long ret = -EBADF;
11942 struct fd f;
11943
11944 f = fdget(fd);
11945 if (!f.file)
11946 return -EBADF;
11947
11948 ret = -EOPNOTSUPP;
11949 if (f.file->f_op != &io_uring_fops)
11950 goto out_fput;
11951
11952 ctx = f.file->private_data;
11953
b6c23dd5
PB
11954 io_run_task_work();
11955
edafccee
JA
11956 mutex_lock(&ctx->uring_lock);
11957 ret = __io_uring_register(ctx, opcode, arg, nr_args);
11958 mutex_unlock(&ctx->uring_lock);
2757be22 11959 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
edafccee
JA
11960out_fput:
11961 fdput(f);
11962 return ret;
11963}
11964
2b188cc1
JA
11965static int __init io_uring_init(void)
11966{
d7f62e82
SM
11967#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
11968 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
11969 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
11970} while (0)
11971
11972#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
11973 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
11974 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
11975 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
11976 BUILD_BUG_SQE_ELEM(1, __u8, flags);
11977 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
11978 BUILD_BUG_SQE_ELEM(4, __s32, fd);
11979 BUILD_BUG_SQE_ELEM(8, __u64, off);
11980 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
11981 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7d67af2c 11982 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
d7f62e82
SM
11983 BUILD_BUG_SQE_ELEM(24, __u32, len);
11984 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
11985 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
11986 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
11987 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
5769a351
JX
11988 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
11989 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
d7f62e82
SM
11990 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
11991 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
11992 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
11993 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
11994 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
11995 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
11996 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
11997 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7d67af2c 11998 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
d7f62e82
SM
11999 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
12000 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
16340eab 12001 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
d7f62e82 12002 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7d67af2c 12003 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
b9445598 12004 BUILD_BUG_SQE_ELEM(44, __u32, file_index);
d7f62e82 12005
b0d658ec
PB
12006 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
12007 sizeof(struct io_uring_rsrc_update));
12008 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
12009 sizeof(struct io_uring_rsrc_update2));
90499ad0
PB
12010
12011 /* ->buf_index is u16 */
12012 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
12013
b0d658ec
PB
12014 /* should fit into one byte */
12015 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
68fe256a
PB
12016 BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
12017 BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
b0d658ec 12018
d3656344 12019 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
32c2d33e 12020 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
16340eab 12021
3a4b89a2
JA
12022 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32));
12023
91f245d5
JA
12024 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
12025 SLAB_ACCOUNT);
2b188cc1
JA
12026 return 0;
12027};
12028__initcall(io_uring_init);