io_uring: add helper to return req to cache list
[linux-2.6-block.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
d068b506 14 * through a control-dependency in io_get_cqe (smp_store_release to
1e84b97b
SB
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
52de1fe1 47#include <net/compat.h>
2b188cc1
JA
48#include <linux/refcount.h>
49#include <linux/uio.h>
6b47ee6e 50#include <linux/bits.h>
2b188cc1
JA
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
2b188cc1
JA
58#include <linux/percpu.h>
59#include <linux/slab.h>
edce22e1 60#include <linux/blk-mq.h>
edafccee 61#include <linux/bvec.h>
2b188cc1
JA
62#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
6b06314c 65#include <net/scm.h>
2b188cc1
JA
66#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
edafccee
JA
70#include <linux/sizes.h>
71#include <linux/hugetlb.h>
aa4c3967 72#include <linux/highmem.h>
15b71abe
JA
73#include <linux/namei.h>
74#include <linux/fsnotify.h>
4840e418 75#include <linux/fadvise.h>
3e4827b0 76#include <linux/eventpoll.h>
7d67af2c 77#include <linux/splice.h>
b41e9852 78#include <linux/task_work.h>
bcf5a063 79#include <linux/pagemap.h>
0f212204 80#include <linux/io_uring.h>
5bd2182d 81#include <linux/audit.h>
cdc1404a 82#include <linux/security.h>
2b188cc1 83
c826bd7a
DD
84#define CREATE_TRACE_POINTS
85#include <trace/events/io_uring.h>
86
2b188cc1
JA
87#include <uapi/linux/io_uring.h>
88
89#include "internal.h"
561fb04a 90#include "io-wq.h"
2b188cc1 91
5277deaa 92#define IORING_MAX_ENTRIES 32768
33a107f0 93#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
4ce8ad95 94#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
65e19f54 95
187f08c1 96/* only define max */
042b0d85 97#define IORING_MAX_FIXED_FILES (1U << 15)
21b55dbc
SG
98#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
99 IORING_REGISTER_LAST + IORING_OP_LAST)
2b188cc1 100
187f08c1 101#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
2d091d62
PB
102#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
103#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
104
489809e2
PB
105#define IORING_MAX_REG_BUFFERS (1U << 14)
106
68fe256a
PB
107#define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
108 IOSQE_IO_HARDLINK | IOSQE_ASYNC)
109
5562a8d7
PB
110#define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
111 IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
68fe256a 112
c854357b 113#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
d5361233 114 REQ_F_POLLED | REQ_F_CREDS | REQ_F_ASYNC_DATA)
b16fed66 115
a538be5b
PB
116#define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
117 IO_REQ_CLEAN_FLAGS)
118
09899b19
PB
119#define IO_TCTX_REFS_CACHE_NR (1U << 10)
120
2b188cc1
JA
121struct io_uring {
122 u32 head ____cacheline_aligned_in_smp;
123 u32 tail ____cacheline_aligned_in_smp;
124};
125
1e84b97b 126/*
75b28aff
HV
127 * This data is shared with the application through the mmap at offsets
128 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
129 *
130 * The offsets to the member fields are published through struct
131 * io_sqring_offsets when calling io_uring_setup.
132 */
75b28aff 133struct io_rings {
1e84b97b
SB
134 /*
135 * Head and tail offsets into the ring; the offsets need to be
136 * masked to get valid indices.
137 *
75b28aff
HV
138 * The kernel controls head of the sq ring and the tail of the cq ring,
139 * and the application controls tail of the sq ring and the head of the
140 * cq ring.
1e84b97b 141 */
75b28aff 142 struct io_uring sq, cq;
1e84b97b 143 /*
75b28aff 144 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
145 * ring_entries - 1)
146 */
75b28aff
HV
147 u32 sq_ring_mask, cq_ring_mask;
148 /* Ring sizes (constant, power of 2) */
149 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
150 /*
151 * Number of invalid entries dropped by the kernel due to
152 * invalid index stored in array
153 *
154 * Written by the kernel, shouldn't be modified by the
155 * application (i.e. get number of "new events" by comparing to
156 * cached value).
157 *
158 * After a new SQ head value was read by the application this
159 * counter includes all submissions that were dropped reaching
160 * the new SQ head (and possibly more).
161 */
75b28aff 162 u32 sq_dropped;
1e84b97b 163 /*
0d9b5b3a 164 * Runtime SQ flags
1e84b97b
SB
165 *
166 * Written by the kernel, shouldn't be modified by the
167 * application.
168 *
169 * The application needs a full memory barrier before checking
170 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
171 */
75b28aff 172 u32 sq_flags;
0d9b5b3a
SG
173 /*
174 * Runtime CQ flags
175 *
176 * Written by the application, shouldn't be modified by the
177 * kernel.
178 */
fe7e3257 179 u32 cq_flags;
1e84b97b
SB
180 /*
181 * Number of completion events lost because the queue was full;
182 * this should be avoided by the application by making sure
0b4295b5 183 * there are not more requests pending than there is space in
1e84b97b
SB
184 * the completion queue.
185 *
186 * Written by the kernel, shouldn't be modified by the
187 * application (i.e. get number of "new events" by comparing to
188 * cached value).
189 *
190 * As completion events come in out of order this counter is not
191 * ordered with any other data.
192 */
75b28aff 193 u32 cq_overflow;
1e84b97b
SB
194 /*
195 * Ring buffer of completion events.
196 *
197 * The kernel writes completion events fresh every time they are
198 * produced, so the application is allowed to modify pending
199 * entries.
200 */
75b28aff 201 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
202};
203
45d189c6 204enum io_uring_cmd_flags {
51aac424 205 IO_URING_F_COMPLETE_DEFER = 1,
3b44b371 206 IO_URING_F_UNLOCKED = 2,
51aac424
PB
207 /* int's last bit, sign checks are usually faster than a bit test */
208 IO_URING_F_NONBLOCK = INT_MIN,
45d189c6
PB
209};
210
edafccee
JA
211struct io_mapped_ubuf {
212 u64 ubuf;
4751f53d 213 u64 ubuf_end;
edafccee 214 unsigned int nr_bvecs;
de293938 215 unsigned long acct_pages;
41edf1a5 216 struct bio_vec bvec[];
edafccee
JA
217};
218
50238531
BM
219struct io_ring_ctx;
220
6c2450ae
PB
221struct io_overflow_cqe {
222 struct io_uring_cqe cqe;
223 struct list_head list;
224};
225
a04b0ac0
PB
226struct io_fixed_file {
227 /* file * with additional FFS_* flags */
228 unsigned long file_ptr;
229};
230
269bbe5f
BM
231struct io_rsrc_put {
232 struct list_head list;
b60c8dce 233 u64 tag;
50238531
BM
234 union {
235 void *rsrc;
236 struct file *file;
bd54b6fe 237 struct io_mapped_ubuf *buf;
50238531 238 };
269bbe5f
BM
239};
240
aeca241b 241struct io_file_table {
042b0d85 242 struct io_fixed_file *files;
31b51510
JA
243};
244
b895c9a6 245struct io_rsrc_node {
05589553
XW
246 struct percpu_ref refs;
247 struct list_head node;
269bbe5f 248 struct list_head rsrc_list;
b895c9a6 249 struct io_rsrc_data *rsrc_data;
4a38aed2 250 struct llist_node llist;
e297822b 251 bool done;
05589553
XW
252};
253
40ae0ff7
PB
254typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
255
b895c9a6 256struct io_rsrc_data {
05f3fb3c
JA
257 struct io_ring_ctx *ctx;
258
2d091d62
PB
259 u64 **tags;
260 unsigned int nr;
40ae0ff7 261 rsrc_put_fn *do_put;
3e942498 262 atomic_t refs;
05f3fb3c 263 struct completion done;
8bad28d8 264 bool quiesce;
05f3fb3c
JA
265};
266
dbc7d452
JA
267struct io_buffer_list {
268 struct list_head list;
269 struct list_head buf_list;
270 __u16 bgid;
271};
272
5a2e745d
JA
273struct io_buffer {
274 struct list_head list;
275 __u64 addr;
d1f82808 276 __u32 len;
5a2e745d 277 __u16 bid;
b1c62645 278 __u16 bgid;
5a2e745d
JA
279};
280
21b55dbc
SG
281struct io_restriction {
282 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
283 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
284 u8 sqe_flags_allowed;
285 u8 sqe_flags_required;
7e84e1c7 286 bool registered;
21b55dbc
SG
287};
288
37d1e2e3
JA
289enum {
290 IO_SQ_THREAD_SHOULD_STOP = 0,
291 IO_SQ_THREAD_SHOULD_PARK,
292};
293
534ca6d6
JA
294struct io_sq_data {
295 refcount_t refs;
9e138a48 296 atomic_t park_pending;
09a6f4ef 297 struct mutex lock;
69fb2131
JA
298
299 /* ctx's that are using this sqd */
300 struct list_head ctx_list;
69fb2131 301
534ca6d6
JA
302 struct task_struct *thread;
303 struct wait_queue_head wait;
08369246
XW
304
305 unsigned sq_thread_idle;
37d1e2e3
JA
306 int sq_cpu;
307 pid_t task_pid;
5c2469e0 308 pid_t task_tgid;
37d1e2e3
JA
309
310 unsigned long state;
37d1e2e3 311 struct completion exited;
534ca6d6
JA
312};
313
6dd0be1e 314#define IO_COMPL_BATCH 32
6ff119a6 315#define IO_REQ_CACHE_SIZE 32
bf019da7 316#define IO_REQ_ALLOC_BATCH 8
258b29a9 317
a1ab7b35
PB
318struct io_submit_link {
319 struct io_kiocb *head;
320 struct io_kiocb *last;
321};
322
258b29a9 323struct io_submit_state {
5a158c6b
PB
324 /* inline/task_work completion list, under ->uring_lock */
325 struct io_wq_work_node free_list;
326 /* batch completion logic */
327 struct io_wq_work_list compl_reqs;
a1ab7b35 328 struct io_submit_link link;
258b29a9 329
258b29a9 330 bool plug_started;
4b628aeb 331 bool need_plug;
3d4aeb9f 332 bool flush_cqes;
5ca7a8b3 333 unsigned short submit_nr;
5a158c6b 334 struct blk_plug plug;
258b29a9
PB
335};
336
77bc59b4
UA
337struct io_ev_fd {
338 struct eventfd_ctx *cq_ev_fd;
c75312dd 339 unsigned int eventfd_async: 1;
77bc59b4
UA
340 struct rcu_head rcu;
341};
342
dbc7d452
JA
343#define IO_BUFFERS_HASH_BITS 5
344
2b188cc1 345struct io_ring_ctx {
b52ecf8c 346 /* const or read-mostly hot data */
2b188cc1
JA
347 struct {
348 struct percpu_ref refs;
2b188cc1 349
b52ecf8c 350 struct io_rings *rings;
2b188cc1 351 unsigned int flags;
e1d85334 352 unsigned int compat: 1;
e1d85334 353 unsigned int drain_next: 1;
21b55dbc 354 unsigned int restricted: 1;
f18ee4cf 355 unsigned int off_timeout_used: 1;
10c66904 356 unsigned int drain_active: 1;
5562a8d7 357 unsigned int drain_disabled: 1;
9aa8dfde 358 unsigned int has_evfd: 1;
773697b6 359 unsigned int syscall_iopoll: 1;
b52ecf8c 360 } ____cacheline_aligned_in_smp;
2b188cc1 361
7f1129d2 362 /* submission data */
b52ecf8c 363 struct {
0499e582
PB
364 struct mutex uring_lock;
365
75b28aff
HV
366 /*
367 * Ring buffer of indices into array of io_uring_sqe, which is
368 * mmapped by the application using the IORING_OFF_SQES offset.
369 *
370 * This indirection could e.g. be used to assign fixed
371 * io_uring_sqe entries to operations and only submit them to
372 * the queue when needed.
373 *
374 * The kernel modifies neither the indices array nor the entries
375 * array.
376 */
377 u32 *sq_array;
c7af47cf 378 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
379 unsigned cached_sq_head;
380 unsigned sq_entries;
de0617e4 381 struct list_head defer_list;
7f1129d2
PB
382
383 /*
384 * Fixed resources fast path, should be accessed only under
385 * uring_lock, and updated through io_uring_register(2)
386 */
387 struct io_rsrc_node *rsrc_node;
ab409402 388 int rsrc_cached_refs;
7f1129d2
PB
389 struct io_file_table file_table;
390 unsigned nr_user_files;
391 unsigned nr_user_bufs;
392 struct io_mapped_ubuf **user_bufs;
393
394 struct io_submit_state submit_state;
5262f567 395 struct list_head timeout_list;
ef9dd637 396 struct list_head ltimeout_list;
1d7bb1d5 397 struct list_head cq_overflow_list;
dbc7d452 398 struct list_head *io_buffers;
cc3cec83 399 struct list_head io_buffers_cache;
4d9237e3 400 struct list_head apoll_cache;
7f1129d2
PB
401 struct xarray personalities;
402 u32 pers_next;
403 unsigned sq_thread_idle;
2b188cc1
JA
404 } ____cacheline_aligned_in_smp;
405
d0acdee2 406 /* IRQ completion list, under ->completion_lock */
c2b6c6bc 407 struct io_wq_work_list locked_free_list;
d0acdee2 408 unsigned int locked_free_nr;
3c1a2ead 409
7c30f36a 410 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
534ca6d6
JA
411 struct io_sq_data *sq_data; /* if using sq thread polling */
412
90554200 413 struct wait_queue_head sqo_sq_wait;
69fb2131 414 struct list_head sqd_list;
75b28aff 415
5ed7a37d
PB
416 unsigned long check_cq_overflow;
417
206aefde
JA
418 struct {
419 unsigned cached_cq_tail;
420 unsigned cq_entries;
77bc59b4 421 struct io_ev_fd __rcu *io_ev_fd;
0499e582
PB
422 struct wait_queue_head cq_wait;
423 unsigned cq_extra;
424 atomic_t cq_timeouts;
0499e582 425 unsigned cq_last_tm_flush;
206aefde 426 } ____cacheline_aligned_in_smp;
2b188cc1 427
2b188cc1
JA
428 struct {
429 spinlock_t completion_lock;
e94f141b 430
89850fce
JA
431 spinlock_t timeout_lock;
432
def596e9 433 /*
540e32a0 434 * ->iopoll_list is protected by the ctx->uring_lock for
def596e9
JA
435 * io_uring instances that don't use IORING_SETUP_SQPOLL.
436 * For SQPOLL, only the single threaded io_sq_thread() will
437 * manipulate the list, hence no extra locking is needed there.
438 */
5eef4e87 439 struct io_wq_work_list iopoll_list;
78076bb6
JA
440 struct hlist_head *cancel_hash;
441 unsigned cancel_hash_bits;
915b3dde 442 bool poll_multi_queue;
cc3cec83
JA
443
444 struct list_head io_buffers_comp;
2b188cc1 445 } ____cacheline_aligned_in_smp;
85faa7b8 446
21b55dbc 447 struct io_restriction restrictions;
3c1a2ead 448
b13a8918
PB
449 /* slow path rsrc auxilary data, used by update/register */
450 struct {
451 struct io_rsrc_node *rsrc_backup_node;
452 struct io_mapped_ubuf *dummy_ubuf;
453 struct io_rsrc_data *file_data;
454 struct io_rsrc_data *buf_data;
455
456 struct delayed_work rsrc_put_work;
457 struct llist_head rsrc_put_llist;
458 struct list_head rsrc_ref_list;
459 spinlock_t rsrc_ref_lock;
cc3cec83
JA
460
461 struct list_head io_buffers_pages;
b13a8918
PB
462 };
463
3c1a2ead 464 /* Keep this last, we don't need it for the fast path */
b986af7e
PB
465 struct {
466 #if defined(CONFIG_UNIX)
467 struct socket *ring_sock;
468 #endif
469 /* hashed buffered write serialization */
470 struct io_wq_hash *hash_map;
471
472 /* Only used for accounting purposes */
473 struct user_struct *user;
474 struct mm_struct *mm_account;
475
476 /* ctx exit and cancelation */
9011bf9a
PB
477 struct llist_head fallback_llist;
478 struct delayed_work fallback_work;
b986af7e
PB
479 struct work_struct exit_work;
480 struct list_head tctx_list;
481 struct completion ref_comp;
e139a1ec
PB
482 u32 iowq_limits[2];
483 bool iowq_limits_set;
b986af7e 484 };
2b188cc1
JA
485};
486
e7a6c00d
JA
487/*
488 * Arbitrary limit, can be raised if need be
489 */
490#define IO_RINGFD_REG_MAX 16
491
53e043b2
SM
492struct io_uring_task {
493 /* submission side */
09899b19 494 int cached_refs;
53e043b2
SM
495 struct xarray xa;
496 struct wait_queue_head wait;
ee53fb2b
SM
497 const struct io_ring_ctx *last;
498 struct io_wq *io_wq;
53e043b2
SM
499 struct percpu_counter inflight;
500 atomic_t in_idle;
53e043b2
SM
501
502 spinlock_t task_lock;
503 struct io_wq_work_list task_list;
4813c377 504 struct io_wq_work_list prior_task_list;
53e043b2 505 struct callback_head task_work;
e7a6c00d 506 struct file **registered_rings;
6294f368 507 bool task_running;
53e043b2
SM
508};
509
09bb8394
JA
510/*
511 * First field must be the file pointer in all the
512 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
513 */
221c5eb2
JA
514struct io_poll_iocb {
515 struct file *file;
018043be 516 struct wait_queue_head *head;
221c5eb2 517 __poll_t events;
392edb45 518 struct wait_queue_entry wait;
221c5eb2
JA
519};
520
9d805892 521struct io_poll_update {
018043be 522 struct file *file;
9d805892
PB
523 u64 old_user_data;
524 u64 new_user_data;
525 __poll_t events;
b69de288
JA
526 bool update_events;
527 bool update_user_data;
018043be
PB
528};
529
b5dba59e
JA
530struct io_close {
531 struct file *file;
b5dba59e 532 int fd;
7df778be 533 u32 file_slot;
b5dba59e
JA
534};
535
ad8a48ac
JA
536struct io_timeout_data {
537 struct io_kiocb *req;
538 struct hrtimer timer;
539 struct timespec64 ts;
540 enum hrtimer_mode mode;
50c1df2b 541 u32 flags;
ad8a48ac
JA
542};
543
8ed8d3c3
JA
544struct io_accept {
545 struct file *file;
546 struct sockaddr __user *addr;
547 int __user *addr_len;
548 int flags;
aaa4db12 549 u32 file_slot;
09952e3e 550 unsigned long nofile;
8ed8d3c3
JA
551};
552
553struct io_sync {
554 struct file *file;
555 loff_t len;
556 loff_t off;
557 int flags;
d63d1b5e 558 int mode;
8ed8d3c3
JA
559};
560
fbf23849
JA
561struct io_cancel {
562 struct file *file;
563 u64 addr;
564};
565
b29472ee
JA
566struct io_timeout {
567 struct file *file;
bfe68a22
PB
568 u32 off;
569 u32 target_seq;
135fcde8 570 struct list_head list;
90cd7e42
PB
571 /* head of the link, used by linked timeouts only */
572 struct io_kiocb *head;
89b263f6
JA
573 /* for linked completions */
574 struct io_kiocb *prev;
b29472ee
JA
575};
576
0bdf7a2d
PB
577struct io_timeout_rem {
578 struct file *file;
579 u64 addr;
9c8e11b3
PB
580
581 /* timeout update */
582 struct timespec64 ts;
583 u32 flags;
f1042b6c 584 bool ltimeout;
0bdf7a2d
PB
585};
586
9adbd45d
JA
587struct io_rw {
588 /* NOTE: kiocb has the file as the first member, so don't do it here */
589 struct kiocb kiocb;
590 u64 addr;
584b0180
JA
591 u32 len;
592 u32 flags;
9adbd45d
JA
593};
594
3fbb51c1
JA
595struct io_connect {
596 struct file *file;
597 struct sockaddr __user *addr;
598 int addr_len;
599};
600
e47293fd
JA
601struct io_sr_msg {
602 struct file *file;
fddaface 603 union {
4af3417a
PB
604 struct compat_msghdr __user *umsg_compat;
605 struct user_msghdr __user *umsg;
606 void __user *buf;
fddaface 607 };
e47293fd 608 int msg_flags;
bcda7baa 609 int bgid;
fddaface 610 size_t len;
7ba89d2a 611 size_t done_io;
e47293fd
JA
612};
613
15b71abe
JA
614struct io_open {
615 struct file *file;
616 int dfd;
b9445598 617 u32 file_slot;
15b71abe 618 struct filename *filename;
c12cedf2 619 struct open_how how;
4022e7af 620 unsigned long nofile;
15b71abe
JA
621};
622
269bbe5f 623struct io_rsrc_update {
05f3fb3c
JA
624 struct file *file;
625 u64 arg;
626 u32 nr_args;
627 u32 offset;
628};
629
4840e418
JA
630struct io_fadvise {
631 struct file *file;
632 u64 offset;
633 u32 len;
634 u32 advice;
635};
636
c1ca757b
JA
637struct io_madvise {
638 struct file *file;
639 u64 addr;
640 u32 len;
641 u32 advice;
642};
643
3e4827b0
JA
644struct io_epoll {
645 struct file *file;
646 int epfd;
647 int op;
648 int fd;
649 struct epoll_event event;
e47293fd
JA
650};
651
7d67af2c
PB
652struct io_splice {
653 struct file *file_out;
7d67af2c
PB
654 loff_t off_out;
655 loff_t off_in;
656 u64 len;
a3e4bc23 657 int splice_fd_in;
7d67af2c
PB
658 unsigned int flags;
659};
660
ddf0322d
JA
661struct io_provide_buf {
662 struct file *file;
663 __u64 addr;
38134ada 664 __u32 len;
ddf0322d
JA
665 __u32 bgid;
666 __u16 nbufs;
667 __u16 bid;
668};
669
1d9e1288
BM
670struct io_statx {
671 struct file *file;
672 int dfd;
673 unsigned int mask;
674 unsigned int flags;
1b6fe6e0 675 struct filename *filename;
1d9e1288
BM
676 struct statx __user *buffer;
677};
678
36f4fa68
JA
679struct io_shutdown {
680 struct file *file;
681 int how;
682};
683
80a261fd
JA
684struct io_rename {
685 struct file *file;
686 int old_dfd;
687 int new_dfd;
688 struct filename *oldpath;
689 struct filename *newpath;
690 int flags;
691};
692
14a1143b
JA
693struct io_unlink {
694 struct file *file;
695 int dfd;
696 int flags;
697 struct filename *filename;
698};
699
e34a02dc
DK
700struct io_mkdir {
701 struct file *file;
702 int dfd;
703 umode_t mode;
704 struct filename *filename;
705};
706
7a8721f8
DK
707struct io_symlink {
708 struct file *file;
709 int new_dfd;
710 struct filename *oldpath;
711 struct filename *newpath;
712};
713
cf30da90
DK
714struct io_hardlink {
715 struct file *file;
716 int old_dfd;
717 int new_dfd;
718 struct filename *oldpath;
719 struct filename *newpath;
720 int flags;
721};
722
4f57f06c
JA
723struct io_msg {
724 struct file *file;
725 u64 user_data;
726 u32 len;
727};
728
f499a021
JA
729struct io_async_connect {
730 struct sockaddr_storage address;
731};
732
03b1230c
JA
733struct io_async_msghdr {
734 struct iovec fast_iov[UIO_FASTIOV];
257e84a5
PB
735 /* points to an allocated iov, if NULL we use fast_iov instead */
736 struct iovec *free_iov;
03b1230c
JA
737 struct sockaddr __user *uaddr;
738 struct msghdr msg;
b537916c 739 struct sockaddr_storage addr;
03b1230c
JA
740};
741
538941e2 742struct io_rw_state {
ff6165b2 743 struct iov_iter iter;
cd658695 744 struct iov_iter_state iter_state;
c88598a9 745 struct iovec fast_iov[UIO_FASTIOV];
538941e2
PB
746};
747
748struct io_async_rw {
749 struct io_rw_state s;
750 const struct iovec *free_iovec;
227c0c96 751 size_t bytes_done;
bcf5a063 752 struct wait_page_queue wpq;
f67676d1
JA
753};
754
6b47ee6e
PB
755enum {
756 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
757 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
758 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
759 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
760 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
bcda7baa 761 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
04c76b41 762 REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
6b47ee6e 763
dddca226 764 /* first byte is taken by user flags, shift it to not overlap */
93d2bcd2 765 REQ_F_FAIL_BIT = 8,
6b47ee6e
PB
766 REQ_F_INFLIGHT_BIT,
767 REQ_F_CUR_POS_BIT,
768 REQ_F_NOWAIT_BIT,
6b47ee6e 769 REQ_F_LINK_TIMEOUT_BIT,
99bc4c38 770 REQ_F_NEED_CLEANUP_BIT,
d7718a9d 771 REQ_F_POLLED_BIT,
bcda7baa 772 REQ_F_BUFFER_SELECTED_BIT,
e342c807 773 REQ_F_COMPLETE_INLINE_BIT,
230d50d4 774 REQ_F_REISSUE_BIT,
b8e64b53 775 REQ_F_CREDS_BIT,
20e60a38 776 REQ_F_REFCOUNT_BIT,
4d13d1a4 777 REQ_F_ARM_LTIMEOUT_BIT,
d886e185 778 REQ_F_ASYNC_DATA_BIT,
04c76b41 779 REQ_F_SKIP_LINK_CQES_BIT,
91eac1c6
JA
780 REQ_F_SINGLE_POLL_BIT,
781 REQ_F_DOUBLE_POLL_BIT,
8a3e8ee5 782 REQ_F_PARTIAL_IO_BIT,
7b29f92d 783 /* keep async read/write and isreg together and in order */
35645ac3 784 REQ_F_SUPPORT_NOWAIT_BIT,
7b29f92d 785 REQ_F_ISREG_BIT,
84557871
JA
786
787 /* not a real bit, just to check we're not overflowing the space */
788 __REQ_F_LAST_BIT,
6b47ee6e
PB
789};
790
791enum {
792 /* ctx owns file */
793 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
794 /* drain existing IO first */
795 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
796 /* linked sqes */
797 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
798 /* doesn't sever on completion < 0 */
799 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
800 /* IOSQE_ASYNC */
801 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
bcda7baa
JA
802 /* IOSQE_BUFFER_SELECT */
803 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
04c76b41
PB
804 /* IOSQE_CQE_SKIP_SUCCESS */
805 REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT),
6b47ee6e 806
6b47ee6e 807 /* fail rest of links */
93d2bcd2 808 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
b05a1bcd 809 /* on inflight list, should be cancelled and waited on exit reliably */
6b47ee6e
PB
810 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
811 /* read/write uses file position */
812 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
813 /* must not punt to workers */
814 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
900fad45 815 /* has or had linked timeout */
6b47ee6e 816 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
99bc4c38
PB
817 /* needs cleanup */
818 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
d7718a9d
JA
819 /* already went through poll handler */
820 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
bcda7baa
JA
821 /* buffer already selected */
822 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
e342c807
PB
823 /* completion is deferred through io_comp_state */
824 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
230d50d4
JA
825 /* caller should reissue async */
826 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
35645ac3
PB
827 /* supports async reads/writes */
828 REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT),
7b29f92d
JA
829 /* regular file */
830 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
b8e64b53
PB
831 /* has creds assigned */
832 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
20e60a38
PB
833 /* skip refcounting if not set */
834 REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
4d13d1a4
PB
835 /* there is a linked timeout that has to be armed */
836 REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
d886e185
PB
837 /* ->async_data allocated */
838 REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT),
04c76b41
PB
839 /* don't post CQEs while failing linked requests */
840 REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT),
91eac1c6
JA
841 /* single poll may be active */
842 REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT),
843 /* double poll may active */
844 REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT),
8a3e8ee5
JA
845 /* request has already done partial IO */
846 REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
d7718a9d
JA
847};
848
849struct async_poll {
850 struct io_poll_iocb poll;
807abcb0 851 struct io_poll_iocb *double_poll;
6b47ee6e
PB
852};
853
f237c30a 854typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
5b0a6acc 855
7cbf1722 856struct io_task_work {
5b0a6acc
PB
857 union {
858 struct io_wq_work_node node;
859 struct llist_node fallback_node;
860 };
861 io_req_tw_func_t func;
7cbf1722
JA
862};
863
992da01a
PB
864enum {
865 IORING_RSRC_FILE = 0,
866 IORING_RSRC_BUFFER = 1,
867};
868
cef216fc
PB
869struct io_cqe {
870 __u64 user_data;
871 __s32 res;
872 /* fd initially, then cflags for completion */
873 union {
874 __u32 flags;
875 int fd;
876 };
877};
878
09bb8394
JA
879/*
880 * NOTE! Each of the iocb union members has the file pointer
881 * as the first entry in their struct definition. So you can
882 * access the file pointer through any of the sub-structs,
63c36549 883 * or directly as just 'file' in this struct.
09bb8394 884 */
2b188cc1 885struct io_kiocb {
221c5eb2 886 union {
09bb8394 887 struct file *file;
9adbd45d 888 struct io_rw rw;
221c5eb2 889 struct io_poll_iocb poll;
9d805892 890 struct io_poll_update poll_update;
8ed8d3c3
JA
891 struct io_accept accept;
892 struct io_sync sync;
fbf23849 893 struct io_cancel cancel;
b29472ee 894 struct io_timeout timeout;
0bdf7a2d 895 struct io_timeout_rem timeout_rem;
3fbb51c1 896 struct io_connect connect;
e47293fd 897 struct io_sr_msg sr_msg;
15b71abe 898 struct io_open open;
b5dba59e 899 struct io_close close;
269bbe5f 900 struct io_rsrc_update rsrc_update;
4840e418 901 struct io_fadvise fadvise;
c1ca757b 902 struct io_madvise madvise;
3e4827b0 903 struct io_epoll epoll;
7d67af2c 904 struct io_splice splice;
ddf0322d 905 struct io_provide_buf pbuf;
1d9e1288 906 struct io_statx statx;
36f4fa68 907 struct io_shutdown shutdown;
80a261fd 908 struct io_rename rename;
14a1143b 909 struct io_unlink unlink;
e34a02dc 910 struct io_mkdir mkdir;
7a8721f8 911 struct io_symlink symlink;
cf30da90 912 struct io_hardlink hardlink;
4f57f06c 913 struct io_msg msg;
221c5eb2 914 };
2b188cc1 915
d625c6ee 916 u8 opcode;
65a6543d
XW
917 /* polled IO has completed */
918 u8 iopoll_completed;
4f4eeba8 919 u16 buf_index;
d17e56eb
PB
920 unsigned int flags;
921
cef216fc 922 struct io_cqe cqe;
4f4eeba8 923
010e8e6b 924 struct io_ring_ctx *ctx;
010e8e6b 925 struct task_struct *task;
d7718a9d 926
269bbe5f 927 struct percpu_ref *fixed_rsrc_refs;
d886e185
PB
928 /* store used ubuf, so we can prevent reloading */
929 struct io_mapped_ubuf *imu;
fcb323cc 930
2804ecd8
JA
931 union {
932 /* used by request caches, completion batching and iopoll */
933 struct io_wq_work_node comp_list;
934 /* cache ->apoll->events */
935 int apoll_events;
936 };
d17e56eb 937 atomic_t refs;
521d61fc 938 atomic_t poll_refs;
5b0a6acc 939 struct io_task_work io_task_work;
010e8e6b
PB
940 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
941 struct hlist_node hash_node;
7e3709d5 942 /* internal polling, see IORING_FEAT_FAST_POLL */
010e8e6b 943 struct async_poll *apoll;
d886e185
PB
944 /* opcode allocated if it needs to store data for async defer */
945 void *async_data;
7e3709d5 946 /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
30d51dd4 947 struct io_buffer *kbuf;
41cdcc22 948 /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
34d2bfe7 949 struct io_kiocb *link;
41cdcc22 950 /* custom credentials, valid IFF REQ_F_CREDS is set */
521d61fc
JA
951 const struct cred *creds;
952 struct io_wq_work work;
2b188cc1 953};
05589553 954
13bf43f5
PB
955struct io_tctx_node {
956 struct list_head ctx_node;
957 struct task_struct *task;
13bf43f5
PB
958 struct io_ring_ctx *ctx;
959};
960
27dc8338
PB
961struct io_defer_entry {
962 struct list_head list;
963 struct io_kiocb *req;
9cf7c104 964 u32 seq;
2b188cc1
JA
965};
966
d3656344 967struct io_op_def {
d3656344
JA
968 /* needs req->file assigned */
969 unsigned needs_file : 1;
6d63416d
PB
970 /* should block plug */
971 unsigned plug : 1;
d3656344
JA
972 /* hash wq insertion if file is a regular file */
973 unsigned hash_reg_file : 1;
974 /* unbound wq insertion if file is a non-regular file */
975 unsigned unbound_nonreg_file : 1;
8a72758c
JA
976 /* set if opcode supports polled "wait" */
977 unsigned pollin : 1;
978 unsigned pollout : 1;
52dd8640 979 unsigned poll_exclusive : 1;
bcda7baa
JA
980 /* op supports buffer selection */
981 unsigned buffer_select : 1;
26f0505a
PB
982 /* do prep async if is going to be punted */
983 unsigned needs_async_setup : 1;
6d63416d
PB
984 /* opcode is not supported by this kernel */
985 unsigned not_supported : 1;
5bd2182d
PM
986 /* skip auditing */
987 unsigned audit_skip : 1;
e8c2bc1f
JA
988 /* size of async data needed, if any */
989 unsigned short async_size;
d3656344
JA
990};
991
0918682b 992static const struct io_op_def io_op_defs[] = {
0463b6c5
PB
993 [IORING_OP_NOP] = {},
994 [IORING_OP_READV] = {
d3656344
JA
995 .needs_file = 1,
996 .unbound_nonreg_file = 1,
8a72758c 997 .pollin = 1,
4d954c25 998 .buffer_select = 1,
26f0505a 999 .needs_async_setup = 1,
27926b68 1000 .plug = 1,
5bd2182d 1001 .audit_skip = 1,
e8c2bc1f 1002 .async_size = sizeof(struct io_async_rw),
d3656344 1003 },
0463b6c5 1004 [IORING_OP_WRITEV] = {
d3656344
JA
1005 .needs_file = 1,
1006 .hash_reg_file = 1,
1007 .unbound_nonreg_file = 1,
8a72758c 1008 .pollout = 1,
26f0505a 1009 .needs_async_setup = 1,
27926b68 1010 .plug = 1,
5bd2182d 1011 .audit_skip = 1,
e8c2bc1f 1012 .async_size = sizeof(struct io_async_rw),
d3656344 1013 },
0463b6c5 1014 [IORING_OP_FSYNC] = {
d3656344 1015 .needs_file = 1,
5bd2182d 1016 .audit_skip = 1,
d3656344 1017 },
0463b6c5 1018 [IORING_OP_READ_FIXED] = {
d3656344
JA
1019 .needs_file = 1,
1020 .unbound_nonreg_file = 1,
8a72758c 1021 .pollin = 1,
27926b68 1022 .plug = 1,
5bd2182d 1023 .audit_skip = 1,
e8c2bc1f 1024 .async_size = sizeof(struct io_async_rw),
d3656344 1025 },
0463b6c5 1026 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
1027 .needs_file = 1,
1028 .hash_reg_file = 1,
1029 .unbound_nonreg_file = 1,
8a72758c 1030 .pollout = 1,
27926b68 1031 .plug = 1,
5bd2182d 1032 .audit_skip = 1,
e8c2bc1f 1033 .async_size = sizeof(struct io_async_rw),
d3656344 1034 },
0463b6c5 1035 [IORING_OP_POLL_ADD] = {
d3656344
JA
1036 .needs_file = 1,
1037 .unbound_nonreg_file = 1,
5bd2182d
PM
1038 .audit_skip = 1,
1039 },
1040 [IORING_OP_POLL_REMOVE] = {
1041 .audit_skip = 1,
d3656344 1042 },
0463b6c5 1043 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344 1044 .needs_file = 1,
5bd2182d 1045 .audit_skip = 1,
d3656344 1046 },
0463b6c5 1047 [IORING_OP_SENDMSG] = {
d3656344
JA
1048 .needs_file = 1,
1049 .unbound_nonreg_file = 1,
8a72758c 1050 .pollout = 1,
26f0505a 1051 .needs_async_setup = 1,
e8c2bc1f 1052 .async_size = sizeof(struct io_async_msghdr),
d3656344 1053 },
0463b6c5 1054 [IORING_OP_RECVMSG] = {
d3656344
JA
1055 .needs_file = 1,
1056 .unbound_nonreg_file = 1,
8a72758c 1057 .pollin = 1,
52de1fe1 1058 .buffer_select = 1,
26f0505a 1059 .needs_async_setup = 1,
e8c2bc1f 1060 .async_size = sizeof(struct io_async_msghdr),
d3656344 1061 },
0463b6c5 1062 [IORING_OP_TIMEOUT] = {
5bd2182d 1063 .audit_skip = 1,
e8c2bc1f 1064 .async_size = sizeof(struct io_timeout_data),
d3656344 1065 },
9c8e11b3
PB
1066 [IORING_OP_TIMEOUT_REMOVE] = {
1067 /* used by timeout updates' prep() */
5bd2182d 1068 .audit_skip = 1,
9c8e11b3 1069 },
0463b6c5 1070 [IORING_OP_ACCEPT] = {
d3656344
JA
1071 .needs_file = 1,
1072 .unbound_nonreg_file = 1,
8a72758c 1073 .pollin = 1,
52dd8640 1074 .poll_exclusive = 1,
d3656344 1075 },
5bd2182d
PM
1076 [IORING_OP_ASYNC_CANCEL] = {
1077 .audit_skip = 1,
1078 },
0463b6c5 1079 [IORING_OP_LINK_TIMEOUT] = {
5bd2182d 1080 .audit_skip = 1,
e8c2bc1f 1081 .async_size = sizeof(struct io_timeout_data),
d3656344 1082 },
0463b6c5 1083 [IORING_OP_CONNECT] = {
d3656344
JA
1084 .needs_file = 1,
1085 .unbound_nonreg_file = 1,
8a72758c 1086 .pollout = 1,
26f0505a 1087 .needs_async_setup = 1,
e8c2bc1f 1088 .async_size = sizeof(struct io_async_connect),
d3656344 1089 },
0463b6c5 1090 [IORING_OP_FALLOCATE] = {
d3656344 1091 .needs_file = 1,
d3656344 1092 },
44526bed
JA
1093 [IORING_OP_OPENAT] = {},
1094 [IORING_OP_CLOSE] = {},
5bd2182d
PM
1095 [IORING_OP_FILES_UPDATE] = {
1096 .audit_skip = 1,
1097 },
1098 [IORING_OP_STATX] = {
1099 .audit_skip = 1,
1100 },
0463b6c5 1101 [IORING_OP_READ] = {
3a6820f2
JA
1102 .needs_file = 1,
1103 .unbound_nonreg_file = 1,
8a72758c 1104 .pollin = 1,
bcda7baa 1105 .buffer_select = 1,
27926b68 1106 .plug = 1,
5bd2182d 1107 .audit_skip = 1,
e8c2bc1f 1108 .async_size = sizeof(struct io_async_rw),
3a6820f2 1109 },
0463b6c5 1110 [IORING_OP_WRITE] = {
3a6820f2 1111 .needs_file = 1,
7b3188e7 1112 .hash_reg_file = 1,
3a6820f2 1113 .unbound_nonreg_file = 1,
8a72758c 1114 .pollout = 1,
27926b68 1115 .plug = 1,
5bd2182d 1116 .audit_skip = 1,
e8c2bc1f 1117 .async_size = sizeof(struct io_async_rw),
3a6820f2 1118 },
0463b6c5 1119 [IORING_OP_FADVISE] = {
4840e418 1120 .needs_file = 1,
5bd2182d 1121 .audit_skip = 1,
c1ca757b 1122 },
44526bed 1123 [IORING_OP_MADVISE] = {},
0463b6c5 1124 [IORING_OP_SEND] = {
fddaface
JA
1125 .needs_file = 1,
1126 .unbound_nonreg_file = 1,
8a72758c 1127 .pollout = 1,
5bd2182d 1128 .audit_skip = 1,
fddaface 1129 },
0463b6c5 1130 [IORING_OP_RECV] = {
fddaface
JA
1131 .needs_file = 1,
1132 .unbound_nonreg_file = 1,
8a72758c 1133 .pollin = 1,
bcda7baa 1134 .buffer_select = 1,
5bd2182d 1135 .audit_skip = 1,
fddaface 1136 },
0463b6c5 1137 [IORING_OP_OPENAT2] = {
cebdb986 1138 },
3e4827b0
JA
1139 [IORING_OP_EPOLL_CTL] = {
1140 .unbound_nonreg_file = 1,
5bd2182d 1141 .audit_skip = 1,
3e4827b0 1142 },
7d67af2c
PB
1143 [IORING_OP_SPLICE] = {
1144 .needs_file = 1,
1145 .hash_reg_file = 1,
1146 .unbound_nonreg_file = 1,
5bd2182d
PM
1147 .audit_skip = 1,
1148 },
1149 [IORING_OP_PROVIDE_BUFFERS] = {
1150 .audit_skip = 1,
1151 },
1152 [IORING_OP_REMOVE_BUFFERS] = {
1153 .audit_skip = 1,
ddf0322d 1154 },
f2a8d5c7
PB
1155 [IORING_OP_TEE] = {
1156 .needs_file = 1,
1157 .hash_reg_file = 1,
1158 .unbound_nonreg_file = 1,
5bd2182d 1159 .audit_skip = 1,
f2a8d5c7 1160 },
36f4fa68
JA
1161 [IORING_OP_SHUTDOWN] = {
1162 .needs_file = 1,
1163 },
44526bed
JA
1164 [IORING_OP_RENAMEAT] = {},
1165 [IORING_OP_UNLINKAT] = {},
e34a02dc 1166 [IORING_OP_MKDIRAT] = {},
7a8721f8 1167 [IORING_OP_SYMLINKAT] = {},
cf30da90 1168 [IORING_OP_LINKAT] = {},
4f57f06c
JA
1169 [IORING_OP_MSG_RING] = {
1170 .needs_file = 1,
1171 },
d3656344
JA
1172};
1173
0756a869
PB
1174/* requests with any of those set should undergo io_disarm_next() */
1175#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
1176
7a612350 1177static bool io_disarm_next(struct io_kiocb *req);
eef51daa 1178static void io_uring_del_tctx_node(unsigned long index);
9936c7c2
PB
1179static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1180 struct task_struct *task,
3dd0c97a 1181 bool cancel_all);
78cc687b 1182static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
1ffc5422 1183
913a571a
PB
1184static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags);
1185
ec9c02ad 1186static void io_put_req(struct io_kiocb *req);
91c2f697 1187static void io_put_req_deferred(struct io_kiocb *req);
c7dae4ba 1188static void io_dismantle_req(struct io_kiocb *req);
94ae5e77 1189static void io_queue_linked_timeout(struct io_kiocb *req);
fdecb662 1190static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
c3bdad02 1191 struct io_uring_rsrc_update2 *up,
98f0b3b4 1192 unsigned nr_args);
68fb8979 1193static void io_clean_op(struct io_kiocb *req);
5106dd6e
JA
1194static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
1195 unsigned issue_flags);
1196static inline struct file *io_file_get_normal(struct io_kiocb *req, int fd);
d5361233
JA
1197static void io_drop_inflight_file(struct io_kiocb *req);
1198static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags);
c5eef2b9 1199static void __io_queue_sqe(struct io_kiocb *req);
269bbe5f 1200static void io_rsrc_put_work(struct work_struct *work);
de0617e4 1201
907d1df3 1202static void io_req_task_queue(struct io_kiocb *req);
c450178d 1203static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
179ae0d1 1204static int io_req_prep_async(struct io_kiocb *req);
de0617e4 1205
b9445598
PB
1206static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
1207 unsigned int issue_flags, u32 slot_index);
7df778be
PB
1208static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
1209
f1042b6c 1210static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
9aa8dfde 1211static void io_eventfd_signal(struct io_ring_ctx *ctx);
b9445598 1212
2b188cc1
JA
1213static struct kmem_cache *req_cachep;
1214
0918682b 1215static const struct file_operations io_uring_fops;
2b188cc1
JA
1216
1217struct sock *io_uring_get_socket(struct file *file)
1218{
1219#if defined(CONFIG_UNIX)
1220 if (file->f_op == &io_uring_fops) {
1221 struct io_ring_ctx *ctx = file->private_data;
1222
1223 return ctx->ring_sock->sk;
1224 }
1225#endif
1226 return NULL;
1227}
1228EXPORT_SYMBOL(io_uring_get_socket);
1229
1f59bc0f
PB
1230#if defined(CONFIG_UNIX)
1231static inline bool io_file_need_scm(struct file *filp)
1232{
1233 return !!unix_get_socket(filp);
1234}
1235#else
1236static inline bool io_file_need_scm(struct file *filp)
1237{
1238 return 0;
1239}
1240#endif
1241
f8929630
PB
1242static void io_ring_submit_unlock(struct io_ring_ctx *ctx, unsigned issue_flags)
1243{
1244 lockdep_assert_held(&ctx->uring_lock);
1245 if (issue_flags & IO_URING_F_UNLOCKED)
1246 mutex_unlock(&ctx->uring_lock);
1247}
1248
1249static void io_ring_submit_lock(struct io_ring_ctx *ctx, unsigned issue_flags)
1250{
1251 /*
1252 * "Normal" inline submissions always hold the uring_lock, since we
1253 * grab it from the system call. Same is true for the SQPOLL offload.
1254 * The only exception is when we've detached the request and issue it
1255 * from an async worker thread, grab the lock for that case.
1256 */
1257 if (issue_flags & IO_URING_F_UNLOCKED)
1258 mutex_lock(&ctx->uring_lock);
1259 lockdep_assert_held(&ctx->uring_lock);
1260}
1261
f237c30a
PB
1262static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
1263{
1264 if (!*locked) {
1265 mutex_lock(&ctx->uring_lock);
1266 *locked = true;
1267 }
1268}
1269
f2f87370
PB
1270#define io_for_each_link(pos, head) \
1271 for (pos = (head); pos; pos = pos->link)
1272
21c843d5
PB
1273/*
1274 * Shamelessly stolen from the mm implementation of page reference checking,
1275 * see commit f958d7b528b1 for details.
1276 */
1277#define req_ref_zero_or_close_to_overflow(req) \
1278 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1279
1280static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1281{
20e60a38 1282 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
21c843d5
PB
1283 return atomic_inc_not_zero(&req->refs);
1284}
1285
21c843d5
PB
1286static inline bool req_ref_put_and_test(struct io_kiocb *req)
1287{
20e60a38
PB
1288 if (likely(!(req->flags & REQ_F_REFCOUNT)))
1289 return true;
1290
21c843d5
PB
1291 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1292 return atomic_dec_and_test(&req->refs);
1293}
1294
21c843d5
PB
1295static inline void req_ref_get(struct io_kiocb *req)
1296{
20e60a38 1297 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
21c843d5
PB
1298 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1299 atomic_inc(&req->refs);
1300}
1301
c450178d
PB
1302static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
1303{
6f33b0bc 1304 if (!wq_list_empty(&ctx->submit_state.compl_reqs))
c450178d
PB
1305 __io_submit_flush_completions(ctx);
1306}
1307
48dcd38d 1308static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
20e60a38
PB
1309{
1310 if (!(req->flags & REQ_F_REFCOUNT)) {
1311 req->flags |= REQ_F_REFCOUNT;
48dcd38d 1312 atomic_set(&req->refs, nr);
20e60a38
PB
1313 }
1314}
1315
48dcd38d
PB
1316static inline void io_req_set_refcount(struct io_kiocb *req)
1317{
1318 __io_req_set_refcount(req, 1);
1319}
1320
ab409402
PB
1321#define IO_RSRC_REF_BATCH 100
1322
1323static inline void io_req_put_rsrc_locked(struct io_kiocb *req,
1324 struct io_ring_ctx *ctx)
1325 __must_hold(&ctx->uring_lock)
36f72fe2 1326{
ab409402
PB
1327 struct percpu_ref *ref = req->fixed_rsrc_refs;
1328
1329 if (ref) {
1330 if (ref == &ctx->rsrc_node->refs)
1331 ctx->rsrc_cached_refs++;
1332 else
1333 percpu_ref_put(ref);
1334 }
1335}
1336
1337static inline void io_req_put_rsrc(struct io_kiocb *req, struct io_ring_ctx *ctx)
1338{
1339 if (req->fixed_rsrc_refs)
1340 percpu_ref_put(req->fixed_rsrc_refs);
1341}
1342
1343static __cold void io_rsrc_refs_drop(struct io_ring_ctx *ctx)
1344 __must_hold(&ctx->uring_lock)
1345{
1346 if (ctx->rsrc_cached_refs) {
1347 percpu_ref_put_many(&ctx->rsrc_node->refs, ctx->rsrc_cached_refs);
1348 ctx->rsrc_cached_refs = 0;
1349 }
1350}
1351
1352static void io_rsrc_refs_refill(struct io_ring_ctx *ctx)
1353 __must_hold(&ctx->uring_lock)
1354{
1355 ctx->rsrc_cached_refs += IO_RSRC_REF_BATCH;
1356 percpu_ref_get_many(&ctx->rsrc_node->refs, IO_RSRC_REF_BATCH);
1357}
36f72fe2 1358
a46be971 1359static inline void io_req_set_rsrc_node(struct io_kiocb *req,
5106dd6e
JA
1360 struct io_ring_ctx *ctx,
1361 unsigned int issue_flags)
36f72fe2 1362{
269bbe5f 1363 if (!req->fixed_rsrc_refs) {
a7f0ed5a 1364 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
5106dd6e
JA
1365
1366 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1367 lockdep_assert_held(&ctx->uring_lock);
1368 ctx->rsrc_cached_refs--;
1369 if (unlikely(ctx->rsrc_cached_refs < 0))
1370 io_rsrc_refs_refill(ctx);
1371 } else {
1372 percpu_ref_get(req->fixed_rsrc_refs);
1373 }
36f72fe2
PB
1374 }
1375}
1376
cc3cec83 1377static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list)
3648e526 1378{
d1fd1c20 1379 struct io_buffer *kbuf = req->kbuf;
3648e526
HX
1380 unsigned int cflags;
1381
cc3cec83 1382 cflags = IORING_CQE_F_BUFFER | (kbuf->bid << IORING_CQE_BUFFER_SHIFT);
3648e526 1383 req->flags &= ~REQ_F_BUFFER_SELECTED;
cc3cec83 1384 list_add(&kbuf->list, list);
d1fd1c20 1385 req->kbuf = NULL;
3648e526
HX
1386 return cflags;
1387}
1388
cc3cec83 1389static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
3648e526 1390{
8197b053
PB
1391 lockdep_assert_held(&req->ctx->completion_lock);
1392
3648e526
HX
1393 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
1394 return 0;
cc3cec83
JA
1395 return __io_put_kbuf(req, &req->ctx->io_buffers_comp);
1396}
1397
1398static inline unsigned int io_put_kbuf(struct io_kiocb *req,
1399 unsigned issue_flags)
1400{
1401 unsigned int cflags;
1402
1403 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
1404 return 0;
1405
1406 /*
1407 * We can add this buffer back to two lists:
1408 *
1409 * 1) The io_buffers_cache list. This one is protected by the
1410 * ctx->uring_lock. If we already hold this lock, add back to this
1411 * list as we can grab it from issue as well.
1412 * 2) The io_buffers_comp list. This one is protected by the
1413 * ctx->completion_lock.
1414 *
1415 * We migrate buffers from the comp_list to the issue cache list
1416 * when we need one.
1417 */
1418 if (issue_flags & IO_URING_F_UNLOCKED) {
1419 struct io_ring_ctx *ctx = req->ctx;
1420
1421 spin_lock(&ctx->completion_lock);
1422 cflags = __io_put_kbuf(req, &ctx->io_buffers_comp);
1423 spin_unlock(&ctx->completion_lock);
1424 } else {
ab0ac095
PB
1425 lockdep_assert_held(&req->ctx->uring_lock);
1426
cc3cec83
JA
1427 cflags = __io_put_kbuf(req, &req->ctx->io_buffers_cache);
1428 }
1429
1430 return cflags;
3648e526
HX
1431}
1432
dbc7d452
JA
1433static struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
1434 unsigned int bgid)
1435{
1436 struct list_head *hash_list;
1437 struct io_buffer_list *bl;
1438
1439 hash_list = &ctx->io_buffers[hash_32(bgid, IO_BUFFERS_HASH_BITS)];
1440 list_for_each_entry(bl, hash_list, list)
1441 if (bl->bgid == bgid || bgid == -1U)
1442 return bl;
1443
1444 return NULL;
1445}
1446
4d55f238 1447static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
b1c62645
JA
1448{
1449 struct io_ring_ctx *ctx = req->ctx;
dbc7d452
JA
1450 struct io_buffer_list *bl;
1451 struct io_buffer *buf;
b1c62645
JA
1452
1453 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
1454 return;
8a3e8ee5
JA
1455 /* don't recycle if we already did IO to this buffer */
1456 if (req->flags & REQ_F_PARTIAL_IO)
1457 return;
b1c62645 1458
f8929630 1459 io_ring_submit_lock(ctx, issue_flags);
b1c62645
JA
1460
1461 buf = req->kbuf;
dbc7d452
JA
1462 bl = io_buffer_get_list(ctx, buf->bgid);
1463 list_add(&buf->list, &bl->buf_list);
b1c62645
JA
1464 req->flags &= ~REQ_F_BUFFER_SELECTED;
1465 req->kbuf = NULL;
4d55f238 1466
f8929630 1467 io_ring_submit_unlock(ctx, issue_flags);
b1c62645
JA
1468}
1469
3dd0c97a
PB
1470static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1471 bool cancel_all)
6af3f48b 1472 __must_hold(&req->ctx->timeout_lock)
08d23634 1473{
68207680 1474 if (task && head->task != task)
08d23634 1475 return false;
d5361233 1476 return cancel_all;
6af3f48b
PB
1477}
1478
1479/*
1480 * As io_match_task() but protected against racing with linked timeouts.
1481 * User must not hold timeout_lock.
1482 */
1483static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
1484 bool cancel_all)
1485{
6af3f48b
PB
1486 if (task && head->task != task)
1487 return false;
d5361233 1488 return cancel_all;
6af3f48b
PB
1489}
1490
d886e185
PB
1491static inline bool req_has_async_data(struct io_kiocb *req)
1492{
1493 return req->flags & REQ_F_ASYNC_DATA;
1494}
1495
93d2bcd2 1496static inline void req_set_fail(struct io_kiocb *req)
c40f6379 1497{
93d2bcd2 1498 req->flags |= REQ_F_FAIL;
04c76b41
PB
1499 if (req->flags & REQ_F_CQE_SKIP) {
1500 req->flags &= ~REQ_F_CQE_SKIP;
1501 req->flags |= REQ_F_SKIP_LINK_CQES;
1502 }
c40f6379 1503}
4a38aed2 1504
a8295b98
HX
1505static inline void req_fail_link_node(struct io_kiocb *req, int res)
1506{
1507 req_set_fail(req);
cef216fc 1508 req->cqe.res = res;
a8295b98
HX
1509}
1510
fa05457a
PB
1511static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
1512{
1513 wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
1514}
1515
c072481d 1516static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
2b188cc1
JA
1517{
1518 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1519
0f158b4c 1520 complete(&ctx->ref_comp);
2b188cc1
JA
1521}
1522
8eb7e2d0
PB
1523static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1524{
1525 return !req->timeout.off;
1526}
1527
c072481d 1528static __cold void io_fallback_req_func(struct work_struct *work)
f56165e6
PB
1529{
1530 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
1531 fallback_work.work);
1532 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
1533 struct io_kiocb *req, *tmp;
f237c30a 1534 bool locked = false;
f56165e6
PB
1535
1536 percpu_ref_get(&ctx->refs);
1537 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
f237c30a 1538 req->io_task_work.func(req, &locked);
5636c00d 1539
f237c30a 1540 if (locked) {
c450178d 1541 io_submit_flush_completions(ctx);
f237c30a
PB
1542 mutex_unlock(&ctx->uring_lock);
1543 }
f56165e6
PB
1544 percpu_ref_put(&ctx->refs);
1545}
1546
c072481d 1547static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
2b188cc1
JA
1548{
1549 struct io_ring_ctx *ctx;
dbc7d452 1550 int i, hash_bits;
2b188cc1
JA
1551
1552 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1553 if (!ctx)
1554 return NULL;
1555
78076bb6
JA
1556 /*
1557 * Use 5 bits less than the max cq entries, that should give us around
1558 * 32 entries per hash list if totally full and uniformly spread.
1559 */
1560 hash_bits = ilog2(p->cq_entries);
1561 hash_bits -= 5;
1562 if (hash_bits <= 0)
1563 hash_bits = 1;
1564 ctx->cancel_hash_bits = hash_bits;
1565 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1566 GFP_KERNEL);
1567 if (!ctx->cancel_hash)
1568 goto err;
1569 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1570
6224843d
PB
1571 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1572 if (!ctx->dummy_ubuf)
1573 goto err;
1574 /* set invalid range, so io_import_fixed() fails meeting it */
1575 ctx->dummy_ubuf->ubuf = -1UL;
1576
dbc7d452
JA
1577 ctx->io_buffers = kcalloc(1U << IO_BUFFERS_HASH_BITS,
1578 sizeof(struct list_head), GFP_KERNEL);
1579 if (!ctx->io_buffers)
1580 goto err;
1581 for (i = 0; i < (1U << IO_BUFFERS_HASH_BITS); i++)
1582 INIT_LIST_HEAD(&ctx->io_buffers[i]);
1583
21482896 1584 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
1585 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1586 goto err;
2b188cc1
JA
1587
1588 ctx->flags = p->flags;
90554200 1589 init_waitqueue_head(&ctx->sqo_sq_wait);
69fb2131 1590 INIT_LIST_HEAD(&ctx->sqd_list);
1d7bb1d5 1591 INIT_LIST_HEAD(&ctx->cq_overflow_list);
cc3cec83 1592 INIT_LIST_HEAD(&ctx->io_buffers_cache);
4d9237e3 1593 INIT_LIST_HEAD(&ctx->apoll_cache);
0f158b4c 1594 init_completion(&ctx->ref_comp);
61cf9370 1595 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
2b188cc1 1596 mutex_init(&ctx->uring_lock);
311997b3 1597 init_waitqueue_head(&ctx->cq_wait);
2b188cc1 1598 spin_lock_init(&ctx->completion_lock);
89850fce 1599 spin_lock_init(&ctx->timeout_lock);
5eef4e87 1600 INIT_WQ_LIST(&ctx->iopoll_list);
cc3cec83
JA
1601 INIT_LIST_HEAD(&ctx->io_buffers_pages);
1602 INIT_LIST_HEAD(&ctx->io_buffers_comp);
de0617e4 1603 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 1604 INIT_LIST_HEAD(&ctx->timeout_list);
ef9dd637 1605 INIT_LIST_HEAD(&ctx->ltimeout_list);
d67d2263
BM
1606 spin_lock_init(&ctx->rsrc_ref_lock);
1607 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
269bbe5f
BM
1608 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1609 init_llist_head(&ctx->rsrc_put_llist);
13bf43f5 1610 INIT_LIST_HEAD(&ctx->tctx_list);
c2b6c6bc
PB
1611 ctx->submit_state.free_list.next = NULL;
1612 INIT_WQ_LIST(&ctx->locked_free_list);
9011bf9a 1613 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
6f33b0bc 1614 INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
2b188cc1 1615 return ctx;
206aefde 1616err:
6224843d 1617 kfree(ctx->dummy_ubuf);
78076bb6 1618 kfree(ctx->cancel_hash);
dbc7d452 1619 kfree(ctx->io_buffers);
206aefde
JA
1620 kfree(ctx);
1621 return NULL;
2b188cc1
JA
1622}
1623
8f6ed49a
PB
1624static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1625{
1626 struct io_rings *r = ctx->rings;
1627
1628 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1629 ctx->cq_extra--;
1630}
1631
9cf7c104 1632static bool req_need_defer(struct io_kiocb *req, u32 seq)
7adf4eaf 1633{
2bc9930e
JA
1634 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1635 struct io_ring_ctx *ctx = req->ctx;
a197f664 1636
8f6ed49a 1637 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
2bc9930e 1638 }
de0617e4 1639
9d858b21 1640 return false;
de0617e4
JA
1641}
1642
35645ac3
PB
1643#define FFS_NOWAIT 0x1UL
1644#define FFS_ISREG 0x2UL
1645#define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG)
c97d8a0f
PB
1646
1647static inline bool io_req_ffs_set(struct io_kiocb *req)
1648{
35645ac3 1649 return req->flags & REQ_F_FIXED_FILE;
c97d8a0f
PB
1650}
1651
fd08e530
PB
1652static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
1653{
906c6caa
PB
1654 if (WARN_ON_ONCE(!req->link))
1655 return NULL;
1656
4d13d1a4
PB
1657 req->flags &= ~REQ_F_ARM_LTIMEOUT;
1658 req->flags |= REQ_F_LINK_TIMEOUT;
fd08e530
PB
1659
1660 /* linked timeouts should have two refs once prep'ed */
48dcd38d 1661 io_req_set_refcount(req);
4d13d1a4
PB
1662 __io_req_set_refcount(req->link, 2);
1663 return req->link;
fd08e530
PB
1664}
1665
1666static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
1667{
4d13d1a4 1668 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
fd08e530
PB
1669 return NULL;
1670 return __io_prep_linked_timeout(req);
1671}
1672
1e6fa521
JA
1673static void io_prep_async_work(struct io_kiocb *req)
1674{
1675 const struct io_op_def *def = &io_op_defs[req->opcode];
1e6fa521
JA
1676 struct io_ring_ctx *ctx = req->ctx;
1677
b8e64b53
PB
1678 if (!(req->flags & REQ_F_CREDS)) {
1679 req->flags |= REQ_F_CREDS;
c10d1f98 1680 req->creds = get_current_cred();
b8e64b53 1681 }
003e8dcc 1682
e1d675df
PB
1683 req->work.list.next = NULL;
1684 req->work.flags = 0;
feaadc4f
PB
1685 if (req->flags & REQ_F_FORCE_ASYNC)
1686 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1687
1e6fa521
JA
1688 if (req->flags & REQ_F_ISREG) {
1689 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
1690 io_wq_hash_work(&req->work, file_inode(req->file));
4b982bd0 1691 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
1e6fa521
JA
1692 if (def->unbound_nonreg_file)
1693 req->work.flags |= IO_WQ_WORK_UNBOUND;
1694 }
561fb04a 1695}
cccf0ee8 1696
cbdcb435 1697static void io_prep_async_link(struct io_kiocb *req)
561fb04a 1698{
cbdcb435 1699 struct io_kiocb *cur;
54a91f3b 1700
44eff40a
PB
1701 if (req->flags & REQ_F_LINK_TIMEOUT) {
1702 struct io_ring_ctx *ctx = req->ctx;
1703
674ee8e1 1704 spin_lock_irq(&ctx->timeout_lock);
44eff40a
PB
1705 io_for_each_link(cur, req)
1706 io_prep_async_work(cur);
674ee8e1 1707 spin_unlock_irq(&ctx->timeout_lock);
44eff40a
PB
1708 } else {
1709 io_for_each_link(cur, req)
1710 io_prep_async_work(cur);
1711 }
561fb04a
JA
1712}
1713
fff4e40e
PB
1714static inline void io_req_add_compl_list(struct io_kiocb *req)
1715{
775a1f2f 1716 struct io_submit_state *state = &req->ctx->submit_state;
fff4e40e 1717
3d4aeb9f 1718 if (!(req->flags & REQ_F_CQE_SKIP))
775a1f2f 1719 state->flush_cqes = true;
fff4e40e
PB
1720 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
1721}
1722
00169246 1723static void io_queue_async_work(struct io_kiocb *req, bool *dont_use)
561fb04a 1724{
a197f664 1725 struct io_ring_ctx *ctx = req->ctx;
cbdcb435 1726 struct io_kiocb *link = io_prep_linked_timeout(req);
5aa75ed5 1727 struct io_uring_task *tctx = req->task->io_uring;
561fb04a 1728
3bfe6106
JA
1729 BUG_ON(!tctx);
1730 BUG_ON(!tctx->io_wq);
561fb04a 1731
cbdcb435
PB
1732 /* init ->work of the whole link before punting */
1733 io_prep_async_link(req);
991468dc
JA
1734
1735 /*
1736 * Not expected to happen, but if we do have a bug where this _can_
1737 * happen, catch it here and ensure the request is marked as
1738 * canceled. That will make io-wq go through the usual work cancel
1739 * procedure rather than attempt to run this request (or create a new
1740 * worker for it).
1741 */
1742 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
1743 req->work.flags |= IO_WQ_WORK_CANCEL;
1744
cef216fc 1745 trace_io_uring_queue_async_work(ctx, req, req->cqe.user_data, req->opcode, req->flags,
502c87d6 1746 &req->work, io_wq_is_hashed(&req->work));
ebf93667 1747 io_wq_enqueue(tctx->io_wq, &req->work);
7271ef3a
JA
1748 if (link)
1749 io_queue_linked_timeout(link);
cbdcb435
PB
1750}
1751
1ee4160c 1752static void io_kill_timeout(struct io_kiocb *req, int status)
8c855885 1753 __must_hold(&req->ctx->completion_lock)
89850fce 1754 __must_hold(&req->ctx->timeout_lock)
5262f567 1755{
e8c2bc1f 1756 struct io_timeout_data *io = req->async_data;
5262f567 1757
fd9c7bc5 1758 if (hrtimer_try_to_cancel(&io->timer) != -1) {
2ae2eb9d
PB
1759 if (status)
1760 req_set_fail(req);
01cec8c1
PB
1761 atomic_set(&req->ctx->cq_timeouts,
1762 atomic_read(&req->ctx->cq_timeouts) + 1);
135fcde8 1763 list_del_init(&req->timeout.list);
913a571a 1764 io_fill_cqe_req(req, status, 0);
91c2f697 1765 io_put_req_deferred(req);
5262f567
JA
1766 }
1767}
1768
c072481d 1769static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
de0617e4 1770{
441b8a78 1771 while (!list_empty(&ctx->defer_list)) {
27dc8338
PB
1772 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1773 struct io_defer_entry, list);
de0617e4 1774
9cf7c104 1775 if (req_need_defer(de->req, de->seq))
04518945 1776 break;
27dc8338 1777 list_del_init(&de->list);
907d1df3 1778 io_req_task_queue(de->req);
27dc8338 1779 kfree(de);
441b8a78 1780 }
04518945
PB
1781}
1782
c072481d 1783static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
89850fce 1784 __must_hold(&ctx->completion_lock)
de0617e4 1785{
441b8a78 1786 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
e677edbc 1787 struct io_kiocb *req, *tmp;
f010505b 1788
79ebeaee 1789 spin_lock_irq(&ctx->timeout_lock);
e677edbc 1790 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
f010505b 1791 u32 events_needed, events_got;
de0617e4 1792
8eb7e2d0 1793 if (io_is_timeout_noseq(req))
360428f8 1794 break;
f010505b
MDG
1795
1796 /*
1797 * Since seq can easily wrap around over time, subtract
1798 * the last seq at which timeouts were flushed before comparing.
1799 * Assuming not more than 2^31-1 events have happened since,
1800 * these subtractions won't have wrapped, so we can check if
1801 * target is in [last_seq, current_seq] by comparing the two.
1802 */
1803 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1804 events_got = seq - ctx->cq_last_tm_flush;
1805 if (events_got < events_needed)
360428f8 1806 break;
bfe68a22 1807
1ee4160c 1808 io_kill_timeout(req, 0);
f18ee4cf 1809 }
f010505b 1810 ctx->cq_last_tm_flush = seq;
79ebeaee 1811 spin_unlock_irq(&ctx->timeout_lock);
360428f8 1812}
5262f567 1813
9333f6b4
PB
1814static inline void io_commit_cqring(struct io_ring_ctx *ctx)
1815{
1816 /* order cqe stores with ring update */
1817 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
1818}
1819
9aa8dfde 1820static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
360428f8 1821{
9aa8dfde
PB
1822 if (ctx->off_timeout_used || ctx->drain_active) {
1823 spin_lock(&ctx->completion_lock);
1824 if (ctx->off_timeout_used)
1825 io_flush_timeouts(ctx);
1826 if (ctx->drain_active)
1827 io_queue_deferred(ctx);
1828 io_commit_cqring(ctx);
1829 spin_unlock(&ctx->completion_lock);
1830 }
1831 if (ctx->has_evfd)
1832 io_eventfd_signal(ctx);
de0617e4
JA
1833}
1834
90554200
JA
1835static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1836{
1837 struct io_rings *r = ctx->rings;
1838
a566c556 1839 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
90554200
JA
1840}
1841
888aae2e
PB
1842static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1843{
1844 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1845}
1846
d068b506 1847static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
2b188cc1 1848{
75b28aff 1849 struct io_rings *rings = ctx->rings;
ea5ab3b5 1850 unsigned tail, mask = ctx->cq_entries - 1;
2b188cc1 1851
115e12e5
SB
1852 /*
1853 * writes to the cq entry need to come after reading head; the
1854 * control dependency is enough as we're using WRITE_ONCE to
1855 * fill the cq entry
1856 */
a566c556 1857 if (__io_cqring_events(ctx) == ctx->cq_entries)
2b188cc1
JA
1858 return NULL;
1859
888aae2e 1860 tail = ctx->cached_cq_tail++;
ea5ab3b5 1861 return &rings->cqes[tail & mask];
2b188cc1
JA
1862}
1863
77bc59b4 1864static void io_eventfd_signal(struct io_ring_ctx *ctx)
f2842ab5 1865{
77bc59b4
UA
1866 struct io_ev_fd *ev_fd;
1867
77bc59b4
UA
1868 rcu_read_lock();
1869 /*
1870 * rcu_dereference ctx->io_ev_fd once and use it for both for checking
1871 * and eventfd_signal
1872 */
1873 ev_fd = rcu_dereference(ctx->io_ev_fd);
1874
1875 /*
1876 * Check again if ev_fd exists incase an io_eventfd_unregister call
1877 * completed between the NULL check of ctx->io_ev_fd at the start of
1878 * the function and rcu_read_lock.
1879 */
1880 if (unlikely(!ev_fd))
1881 goto out;
7e55a19c 1882 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
77bc59b4
UA
1883 goto out;
1884
c75312dd 1885 if (!ev_fd->eventfd_async || io_wq_current_is_worker())
77bc59b4 1886 eventfd_signal(ev_fd->cq_ev_fd, 1);
77bc59b4
UA
1887out:
1888 rcu_read_unlock();
f2842ab5
JA
1889}
1890
9aa8dfde
PB
1891static inline void io_cqring_wake(struct io_ring_ctx *ctx)
1892{
1893 /*
1894 * wake_up_all() may seem excessive, but io_wake_function() and
1895 * io_should_wake() handle the termination of the loop and only
1896 * wake as many waiters as we need to.
1897 */
1898 if (wq_has_sleeper(&ctx->cq_wait))
1899 wake_up_all(&ctx->cq_wait);
1900}
1901
2c5d763c
JA
1902/*
1903 * This should only get called when at least one event has been posted.
1904 * Some applications rely on the eventfd notification count only changing
1905 * IFF a new CQE has been added to the CQ ring. There's no depedency on
1906 * 1:1 relationship between how many times this function is called (and
1907 * hence the eventfd count) and number of CQEs posted to the CQ ring.
1908 */
66fc25ca 1909static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1d7bb1d5 1910{
9aa8dfde
PB
1911 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
1912 ctx->has_evfd))
9333f6b4
PB
1913 __io_commit_cqring_flush(ctx);
1914
9aa8dfde 1915 io_cqring_wake(ctx);
1d7bb1d5
JA
1916}
1917
80c18e4a
PB
1918static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1919{
9aa8dfde
PB
1920 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
1921 ctx->has_evfd))
9333f6b4
PB
1922 __io_commit_cqring_flush(ctx);
1923
9aa8dfde
PB
1924 if (ctx->flags & IORING_SETUP_SQPOLL)
1925 io_cqring_wake(ctx);
80c18e4a
PB
1926}
1927
c4a2ed72 1928/* Returns true if there are no backlogged entries after the flush */
6c2450ae 1929static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1d7bb1d5 1930{
b18032bb 1931 bool all_flushed, posted;
1d7bb1d5 1932
a566c556 1933 if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
e23de15f 1934 return false;
1d7bb1d5 1935
b18032bb 1936 posted = false;
79ebeaee 1937 spin_lock(&ctx->completion_lock);
6c2450ae 1938 while (!list_empty(&ctx->cq_overflow_list)) {
d068b506 1939 struct io_uring_cqe *cqe = io_get_cqe(ctx);
6c2450ae 1940 struct io_overflow_cqe *ocqe;
e6c8aa9a 1941
1d7bb1d5
JA
1942 if (!cqe && !force)
1943 break;
6c2450ae
PB
1944 ocqe = list_first_entry(&ctx->cq_overflow_list,
1945 struct io_overflow_cqe, list);
1946 if (cqe)
1947 memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1948 else
8f6ed49a
PB
1949 io_account_cq_overflow(ctx);
1950
b18032bb 1951 posted = true;
6c2450ae
PB
1952 list_del(&ocqe->list);
1953 kfree(ocqe);
1d7bb1d5
JA
1954 }
1955
09e88404
PB
1956 all_flushed = list_empty(&ctx->cq_overflow_list);
1957 if (all_flushed) {
5ed7a37d 1958 clear_bit(0, &ctx->check_cq_overflow);
20c0b380
NA
1959 WRITE_ONCE(ctx->rings->sq_flags,
1960 ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
09e88404 1961 }
46930143 1962
60053be8 1963 io_commit_cqring(ctx);
79ebeaee 1964 spin_unlock(&ctx->completion_lock);
b18032bb
JA
1965 if (posted)
1966 io_cqring_ev_posted(ctx);
09e88404 1967 return all_flushed;
1d7bb1d5
JA
1968}
1969
90f67366 1970static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
6c503150 1971{
ca0a2651
JA
1972 bool ret = true;
1973
5ed7a37d 1974 if (test_bit(0, &ctx->check_cq_overflow)) {
6c503150
PB
1975 /* iopoll syncs against uring_lock, not completion_lock */
1976 if (ctx->flags & IORING_SETUP_IOPOLL)
1977 mutex_lock(&ctx->uring_lock);
90f67366 1978 ret = __io_cqring_overflow_flush(ctx, false);
6c503150
PB
1979 if (ctx->flags & IORING_SETUP_IOPOLL)
1980 mutex_unlock(&ctx->uring_lock);
1981 }
ca0a2651
JA
1982
1983 return ret;
6c503150
PB
1984}
1985
9d170164 1986static void __io_put_task(struct task_struct *task, int nr)
6a290a14
PB
1987{
1988 struct io_uring_task *tctx = task->io_uring;
1989
9d170164
PB
1990 percpu_counter_sub(&tctx->inflight, nr);
1991 if (unlikely(atomic_read(&tctx->in_idle)))
1992 wake_up(&tctx->wait);
1993 put_task_struct_many(task, nr);
1994}
1995
1996/* must to be called somewhat shortly after putting a request */
1997static inline void io_put_task(struct task_struct *task, int nr)
1998{
1999 if (likely(task == current))
2000 task->io_uring->cached_refs += nr;
2001 else
2002 __io_put_task(task, nr);
6a290a14
PB
2003}
2004
9a10867a
PB
2005static void io_task_refs_refill(struct io_uring_task *tctx)
2006{
2007 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
2008
2009 percpu_counter_add(&tctx->inflight, refill);
2010 refcount_add(refill, &current->usage);
2011 tctx->cached_refs += refill;
2012}
2013
2014static inline void io_get_task_refs(int nr)
2015{
2016 struct io_uring_task *tctx = current->io_uring;
2017
2018 tctx->cached_refs -= nr;
2019 if (unlikely(tctx->cached_refs < 0))
2020 io_task_refs_refill(tctx);
2021}
2022
3cc7fdb9
PB
2023static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
2024{
2025 struct io_uring_task *tctx = task->io_uring;
2026 unsigned int refs = tctx->cached_refs;
2027
2028 if (refs) {
2029 tctx->cached_refs = 0;
2030 percpu_counter_sub(&tctx->inflight, refs);
2031 put_task_struct_many(task, refs);
2032 }
2033}
2034
d4d19c19 2035static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
54daa9b2 2036 s32 res, u32 cflags)
2b188cc1 2037{
cce4b8b0 2038 struct io_overflow_cqe *ocqe;
2b188cc1 2039
cce4b8b0
PB
2040 ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
2041 if (!ocqe) {
2042 /*
2043 * If we're in ring overflow flush mode, or in task cancel mode,
2044 * or cannot allocate an overflow entry, then we need to drop it
2045 * on the floor.
2046 */
8f6ed49a 2047 io_account_cq_overflow(ctx);
cce4b8b0 2048 return false;
2b188cc1 2049 }
cce4b8b0 2050 if (list_empty(&ctx->cq_overflow_list)) {
5ed7a37d 2051 set_bit(0, &ctx->check_cq_overflow);
20c0b380
NA
2052 WRITE_ONCE(ctx->rings->sq_flags,
2053 ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
2054
cce4b8b0 2055 }
d4d19c19 2056 ocqe->cqe.user_data = user_data;
cce4b8b0
PB
2057 ocqe->cqe.res = res;
2058 ocqe->cqe.flags = cflags;
2059 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
2060 return true;
2b188cc1
JA
2061}
2062
ae4da189 2063static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
913a571a 2064 s32 res, u32 cflags)
2b188cc1
JA
2065{
2066 struct io_uring_cqe *cqe;
2067
2068 /*
2069 * If we can't get a cq entry, userspace overflowed the
2070 * submission (by quite a lot). Increment the overflow count in
2071 * the ring.
2072 */
d068b506 2073 cqe = io_get_cqe(ctx);
1d7bb1d5 2074 if (likely(cqe)) {
d4d19c19 2075 WRITE_ONCE(cqe->user_data, user_data);
2b188cc1 2076 WRITE_ONCE(cqe->res, res);
bcda7baa 2077 WRITE_ONCE(cqe->flags, cflags);
8d13326e 2078 return true;
2b188cc1 2079 }
d4d19c19 2080 return io_cqring_event_overflow(ctx, user_data, res, cflags);
2b188cc1
JA
2081}
2082
90e7c35f
PB
2083static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
2084 struct io_kiocb *req)
2085{
2086 struct io_uring_cqe *cqe;
2087
2088 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
2089 req->cqe.res, req->cqe.flags);
2090
2091 /*
2092 * If we can't get a cq entry, userspace overflowed the
2093 * submission (by quite a lot). Increment the overflow count in
2094 * the ring.
2095 */
2096 cqe = io_get_cqe(ctx);
2097 if (likely(cqe)) {
2098 memcpy(cqe, &req->cqe, sizeof(*cqe));
2099 return true;
2100 }
2101 return io_cqring_event_overflow(ctx, req->cqe.user_data,
2102 req->cqe.res, req->cqe.flags);
2103}
2104
ae4da189 2105static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
d5ec1dfa 2106{
cef216fc
PB
2107 trace_io_uring_complete(req->ctx, req, req->cqe.user_data, res, cflags);
2108 return __io_fill_cqe(req->ctx, req->cqe.user_data, res, cflags);
d5ec1dfa
SR
2109}
2110
913a571a 2111static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
bcda7baa 2112{
04c76b41 2113 if (!(req->flags & REQ_F_CQE_SKIP))
ae4da189 2114 __io_fill_cqe_req(req, res, cflags);
bcda7baa
JA
2115}
2116
913a571a
PB
2117static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
2118 s32 res, u32 cflags)
bcda7baa 2119{
913a571a 2120 ctx->cq_extra++;
502c87d6 2121 trace_io_uring_complete(ctx, NULL, user_data, res, cflags);
ae4da189 2122 return __io_fill_cqe(ctx, user_data, res, cflags);
bcda7baa
JA
2123}
2124
a37fae8a
HX
2125static void __io_req_complete_post(struct io_kiocb *req, s32 res,
2126 u32 cflags)
2b188cc1 2127{
78e19bbe 2128 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 2129
04c76b41 2130 if (!(req->flags & REQ_F_CQE_SKIP))
ae4da189 2131 __io_fill_cqe_req(req, res, cflags);
c7dae4ba
JA
2132 /*
2133 * If we're the last reference to this request, add to our locked
2134 * free_list cache.
2135 */
de9b4cca 2136 if (req_ref_put_and_test(req)) {
7a612350 2137 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
0756a869 2138 if (req->flags & IO_DISARM_MASK)
7a612350
PB
2139 io_disarm_next(req);
2140 if (req->link) {
2141 io_req_task_queue(req->link);
2142 req->link = NULL;
2143 }
2144 }
ab409402 2145 io_req_put_rsrc(req, ctx);
8197b053
PB
2146 /*
2147 * Selected buffer deallocation in io_clean_op() assumes that
2148 * we don't hold ->completion_lock. Clean them here to avoid
2149 * deadlocks.
2150 */
2151 io_put_kbuf_comp(req);
c7dae4ba
JA
2152 io_dismantle_req(req);
2153 io_put_task(req->task, 1);
c2b6c6bc 2154 wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
d0acdee2 2155 ctx->locked_free_nr++;
180f829f 2156 }
a37fae8a
HX
2157}
2158
2159static void io_req_complete_post(struct io_kiocb *req, s32 res,
2160 u32 cflags)
2161{
2162 struct io_ring_ctx *ctx = req->ctx;
2163
2164 spin_lock(&ctx->completion_lock);
2165 __io_req_complete_post(req, res, cflags);
7a612350 2166 io_commit_cqring(ctx);
79ebeaee 2167 spin_unlock(&ctx->completion_lock);
a3f34907 2168 io_cqring_ev_posted(ctx);
4e3d9ff9
JA
2169}
2170
54daa9b2
PB
2171static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
2172 u32 cflags)
229a7b63 2173{
cef216fc
PB
2174 req->cqe.res = res;
2175 req->cqe.flags = cflags;
e342c807 2176 req->flags |= REQ_F_COMPLETE_INLINE;
e1e16097
JA
2177}
2178
889fca73 2179static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
54daa9b2 2180 s32 res, u32 cflags)
bcda7baa 2181{
889fca73
PB
2182 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
2183 io_req_complete_state(req, res, cflags);
a38d68db 2184 else
c7dae4ba 2185 io_req_complete_post(req, res, cflags);
bcda7baa
JA
2186}
2187
54daa9b2 2188static inline void io_req_complete(struct io_kiocb *req, s32 res)
0ddf92e8 2189{
889fca73 2190 __io_req_complete(req, 0, res, 0);
0ddf92e8
JA
2191}
2192
54daa9b2 2193static void io_req_complete_failed(struct io_kiocb *req, s32 res)
f41db273 2194{
93d2bcd2 2195 req_set_fail(req);
ab0ac095 2196 io_req_complete_post(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
f41db273
PB
2197}
2198
c6d3d9cb
PB
2199static void io_req_complete_fail_submit(struct io_kiocb *req)
2200{
2201 /*
2202 * We don't submit, fail them all, for that replace hardlinks with
2203 * normal links. Extra REQ_F_LINK is tolerated.
2204 */
2205 req->flags &= ~REQ_F_HARDLINK;
2206 req->flags |= REQ_F_LINK;
cef216fc 2207 io_req_complete_failed(req, req->cqe.res);
c6d3d9cb
PB
2208}
2209
864ea921
PB
2210/*
2211 * Don't initialise the fields below on every allocation, but do that in
2212 * advance and keep them valid across allocations.
2213 */
2214static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
2215{
2216 req->ctx = ctx;
2217 req->link = NULL;
2218 req->async_data = NULL;
2219 /* not necessary, but safer to zero */
cef216fc 2220 req->cqe.res = 0;
864ea921
PB
2221}
2222
dac7a098 2223static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
cd0ca2e0 2224 struct io_submit_state *state)
dac7a098 2225{
79ebeaee 2226 spin_lock(&ctx->completion_lock);
c2b6c6bc 2227 wq_list_splice(&ctx->locked_free_list, &state->free_list);
d0acdee2 2228 ctx->locked_free_nr = 0;
79ebeaee 2229 spin_unlock(&ctx->completion_lock);
dac7a098
PB
2230}
2231
88ab95be
PB
2232static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
2233{
2234 return !ctx->submit_state.free_list.next;
2235}
2236
5d5901a3
PB
2237/*
2238 * A request might get retired back into the request caches even before opcode
2239 * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
2240 * Because of that, io_alloc_req() should be called only under ->uring_lock
2241 * and with extra caution to not get a request that is still worked on.
2242 */
c072481d 2243static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
5d5901a3 2244 __must_hold(&ctx->uring_lock)
2b188cc1 2245{
864ea921 2246 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
3ab665b7 2247 void *reqs[IO_REQ_ALLOC_BATCH];
864ea921 2248 int ret, i;
e5d1bc0a 2249
23a5c43b
PB
2250 /*
2251 * If we have more than a batch's worth of requests in our IRQ side
2252 * locked cache, grab the lock and move them over to our submission
2253 * side cache.
2254 */
2255 if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH) {
2256 io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
88ab95be 2257 if (!io_req_cache_empty(ctx))
23a5c43b
PB
2258 return true;
2259 }
e5d1bc0a 2260
3ab665b7 2261 ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
fd6fab2c 2262
864ea921
PB
2263 /*
2264 * Bulk alloc is all-or-nothing. If we fail to get a batch,
2265 * retry single alloc to be on the safe side.
2266 */
2267 if (unlikely(ret <= 0)) {
3ab665b7
PB
2268 reqs[0] = kmem_cache_alloc(req_cachep, gfp);
2269 if (!reqs[0])
a33ae9ce 2270 return false;
864ea921 2271 ret = 1;
2b188cc1 2272 }
864ea921 2273
37f0e767 2274 percpu_ref_get_many(&ctx->refs, ret);
3ab665b7 2275 for (i = 0; i < ret; i++) {
23a5c43b 2276 struct io_kiocb *req = reqs[i];
3ab665b7
PB
2277
2278 io_preinit_req(req, ctx);
fa05457a 2279 io_req_add_to_cache(req, ctx);
3ab665b7 2280 }
a33ae9ce
PB
2281 return true;
2282}
2283
2284static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
2285{
88ab95be 2286 if (unlikely(io_req_cache_empty(ctx)))
a33ae9ce
PB
2287 return __io_alloc_req_refill(ctx);
2288 return true;
2289}
2290
2291static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
2292{
2293 struct io_wq_work_node *node;
2294
2295 node = wq_stack_extract(&ctx->submit_state.free_list);
c2b6c6bc 2296 return container_of(node, struct io_kiocb, comp_list);
2b188cc1
JA
2297}
2298
e1d767f0 2299static inline void io_put_file(struct file *file)
8da11c19 2300{
e1d767f0 2301 if (file)
8da11c19
PB
2302 fput(file);
2303}
2304
6b639522 2305static inline void io_dismantle_req(struct io_kiocb *req)
2b188cc1 2306{
094bae49 2307 unsigned int flags = req->flags;
929a3af9 2308
867f8fa5 2309 if (unlikely(flags & IO_REQ_CLEAN_FLAGS))
3a0a6902 2310 io_clean_op(req);
e1d767f0
PB
2311 if (!(flags & REQ_F_FIXED_FILE))
2312 io_put_file(req->file);
e65ef56d
JA
2313}
2314
c072481d 2315static __cold void __io_free_req(struct io_kiocb *req)
c6ca97b3 2316{
51a4cc11 2317 struct io_ring_ctx *ctx = req->ctx;
c6ca97b3 2318
ab409402 2319 io_req_put_rsrc(req, ctx);
216578e5 2320 io_dismantle_req(req);
7c660731 2321 io_put_task(req->task, 1);
c6ca97b3 2322
79ebeaee 2323 spin_lock(&ctx->completion_lock);
c2b6c6bc 2324 wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
c34b025f 2325 ctx->locked_free_nr++;
79ebeaee 2326 spin_unlock(&ctx->completion_lock);
e65ef56d
JA
2327}
2328
f2f87370
PB
2329static inline void io_remove_next_linked(struct io_kiocb *req)
2330{
2331 struct io_kiocb *nxt = req->link;
2332
2333 req->link = nxt->link;
2334 nxt->link = NULL;
2335}
2336
33cc89a9
PB
2337static bool io_kill_linked_timeout(struct io_kiocb *req)
2338 __must_hold(&req->ctx->completion_lock)
89b263f6 2339 __must_hold(&req->ctx->timeout_lock)
2665abfd 2340{
33cc89a9 2341 struct io_kiocb *link = req->link;
f2f87370 2342
b97e736a 2343 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
c9abd7ad 2344 struct io_timeout_data *io = link->async_data;
7c86ffee 2345
f2f87370 2346 io_remove_next_linked(req);
90cd7e42 2347 link->timeout.head = NULL;
fd9c7bc5 2348 if (hrtimer_try_to_cancel(&io->timer) != -1) {
ef9dd637 2349 list_del(&link->timeout.list);
04c76b41 2350 /* leave REQ_F_CQE_SKIP to io_fill_cqe_req */
913a571a 2351 io_fill_cqe_req(link, -ECANCELED, 0);
91c2f697 2352 io_put_req_deferred(link);
d4729fbd 2353 return true;
c9abd7ad
PB
2354 }
2355 }
d4729fbd 2356 return false;
7c86ffee
PB
2357}
2358
d148ca4b 2359static void io_fail_links(struct io_kiocb *req)
33cc89a9 2360 __must_hold(&req->ctx->completion_lock)
9e645e11 2361{
33cc89a9 2362 struct io_kiocb *nxt, *link = req->link;
04c76b41 2363 bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES;
9e645e11 2364
f2f87370 2365 req->link = NULL;
f2f87370 2366 while (link) {
a8295b98
HX
2367 long res = -ECANCELED;
2368
2369 if (link->flags & REQ_F_FAIL)
cef216fc 2370 res = link->cqe.res;
a8295b98 2371
f2f87370
PB
2372 nxt = link->link;
2373 link->link = NULL;
2665abfd 2374
cef216fc 2375 trace_io_uring_fail_link(req->ctx, req, req->cqe.user_data,
502c87d6
SR
2376 req->opcode, link);
2377
04c76b41
PB
2378 if (!ignore_cqes) {
2379 link->flags &= ~REQ_F_CQE_SKIP;
2380 io_fill_cqe_req(link, res, 0);
2381 }
91c2f697 2382 io_put_req_deferred(link);
f2f87370 2383 link = nxt;
9e645e11 2384 }
33cc89a9 2385}
9e645e11 2386
33cc89a9
PB
2387static bool io_disarm_next(struct io_kiocb *req)
2388 __must_hold(&req->ctx->completion_lock)
2389{
2390 bool posted = false;
2391
0756a869
PB
2392 if (req->flags & REQ_F_ARM_LTIMEOUT) {
2393 struct io_kiocb *link = req->link;
2394
906c6caa 2395 req->flags &= ~REQ_F_ARM_LTIMEOUT;
0756a869
PB
2396 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
2397 io_remove_next_linked(req);
04c76b41 2398 /* leave REQ_F_CQE_SKIP to io_fill_cqe_req */
913a571a 2399 io_fill_cqe_req(link, -ECANCELED, 0);
0756a869
PB
2400 io_put_req_deferred(link);
2401 posted = true;
2402 }
2403 } else if (req->flags & REQ_F_LINK_TIMEOUT) {
89b263f6
JA
2404 struct io_ring_ctx *ctx = req->ctx;
2405
2406 spin_lock_irq(&ctx->timeout_lock);
33cc89a9 2407 posted = io_kill_linked_timeout(req);
89b263f6
JA
2408 spin_unlock_irq(&ctx->timeout_lock);
2409 }
93d2bcd2 2410 if (unlikely((req->flags & REQ_F_FAIL) &&
e4335ed3 2411 !(req->flags & REQ_F_HARDLINK))) {
33cc89a9
PB
2412 posted |= (req->link != NULL);
2413 io_fail_links(req);
2414 }
2415 return posted;
9e645e11
JA
2416}
2417
d81499bf
PB
2418static void __io_req_find_next_prep(struct io_kiocb *req)
2419{
2420 struct io_ring_ctx *ctx = req->ctx;
2421 bool posted;
2422
2423 spin_lock(&ctx->completion_lock);
2424 posted = io_disarm_next(req);
60053be8 2425 io_commit_cqring(ctx);
d81499bf
PB
2426 spin_unlock(&ctx->completion_lock);
2427 if (posted)
2428 io_cqring_ev_posted(ctx);
2429}
2430
2431static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
c69f8dbe 2432{
33cc89a9 2433 struct io_kiocb *nxt;
944e58bf 2434
9e645e11
JA
2435 /*
2436 * If LINK is set, we have dependent requests in this chain. If we
2437 * didn't fail this request, queue the first one up, moving any other
2438 * dependencies to the next request. In case of failure, fail the rest
2439 * of the chain.
2440 */
d81499bf
PB
2441 if (unlikely(req->flags & IO_DISARM_MASK))
2442 __io_req_find_next_prep(req);
33cc89a9
PB
2443 nxt = req->link;
2444 req->link = NULL;
2445 return nxt;
4d7dd462 2446}
9e645e11 2447
f237c30a 2448static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
2c32395d
PB
2449{
2450 if (!ctx)
2451 return;
f237c30a 2452 if (*locked) {
c450178d 2453 io_submit_flush_completions(ctx);
2c32395d 2454 mutex_unlock(&ctx->uring_lock);
f237c30a 2455 *locked = false;
2c32395d
PB
2456 }
2457 percpu_ref_put(&ctx->refs);
2458}
2459
f28c240e
HX
2460static inline void ctx_commit_and_unlock(struct io_ring_ctx *ctx)
2461{
2462 io_commit_cqring(ctx);
2463 spin_unlock(&ctx->completion_lock);
2464 io_cqring_ev_posted(ctx);
2465}
2466
2467static void handle_prev_tw_list(struct io_wq_work_node *node,
2468 struct io_ring_ctx **ctx, bool *uring_locked)
2469{
2470 if (*ctx && !*uring_locked)
2471 spin_lock(&(*ctx)->completion_lock);
2472
2473 do {
2474 struct io_wq_work_node *next = node->next;
2475 struct io_kiocb *req = container_of(node, struct io_kiocb,
2476 io_task_work.node);
2477
34d2bfe7
JA
2478 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
2479
f28c240e
HX
2480 if (req->ctx != *ctx) {
2481 if (unlikely(!*uring_locked && *ctx))
2482 ctx_commit_and_unlock(*ctx);
2483
2484 ctx_flush_and_put(*ctx, uring_locked);
2485 *ctx = req->ctx;
2486 /* if not contended, grab and improve batching */
2487 *uring_locked = mutex_trylock(&(*ctx)->uring_lock);
2488 percpu_ref_get(&(*ctx)->refs);
2489 if (unlikely(!*uring_locked))
2490 spin_lock(&(*ctx)->completion_lock);
2491 }
2492 if (likely(*uring_locked))
2493 req->io_task_work.func(req, uring_locked);
2494 else
cef216fc 2495 __io_req_complete_post(req, req->cqe.res,
cc3cec83 2496 io_put_kbuf_comp(req));
f28c240e
HX
2497 node = next;
2498 } while (node);
2499
2500 if (unlikely(!*uring_locked))
2501 ctx_commit_and_unlock(*ctx);
2502}
2503
2504static void handle_tw_list(struct io_wq_work_node *node,
2505 struct io_ring_ctx **ctx, bool *locked)
9f8d032a
HX
2506{
2507 do {
2508 struct io_wq_work_node *next = node->next;
2509 struct io_kiocb *req = container_of(node, struct io_kiocb,
2510 io_task_work.node);
2511
34d2bfe7
JA
2512 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
2513
9f8d032a
HX
2514 if (req->ctx != *ctx) {
2515 ctx_flush_and_put(*ctx, locked);
2516 *ctx = req->ctx;
2517 /* if not contended, grab and improve batching */
2518 *locked = mutex_trylock(&(*ctx)->uring_lock);
2519 percpu_ref_get(&(*ctx)->refs);
2520 }
2521 req->io_task_work.func(req, locked);
2522 node = next;
2523 } while (node);
2524}
2525
7cbf1722 2526static void tctx_task_work(struct callback_head *cb)
c40f6379 2527{
f28c240e 2528 bool uring_locked = false;
ebd0df2e 2529 struct io_ring_ctx *ctx = NULL;
3f18407d
PB
2530 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
2531 task_work);
c40f6379 2532
16f72070 2533 while (1) {
f28c240e 2534 struct io_wq_work_node *node1, *node2;
3f18407d
PB
2535
2536 spin_lock_irq(&tctx->task_lock);
f28c240e
HX
2537 node1 = tctx->prior_task_list.first;
2538 node2 = tctx->task_list.first;
3f18407d 2539 INIT_WQ_LIST(&tctx->task_list);
f28c240e
HX
2540 INIT_WQ_LIST(&tctx->prior_task_list);
2541 if (!node2 && !node1)
6294f368 2542 tctx->task_running = false;
3f18407d 2543 spin_unlock_irq(&tctx->task_lock);
f28c240e 2544 if (!node2 && !node1)
6294f368 2545 break;
3f18407d 2546
f28c240e
HX
2547 if (node1)
2548 handle_prev_tw_list(node1, &ctx, &uring_locked);
f28c240e
HX
2549 if (node2)
2550 handle_tw_list(node2, &ctx, &uring_locked);
7cbf1722 2551 cond_resched();
68ca8fc0
PB
2552
2553 if (!tctx->task_list.first &&
2554 !tctx->prior_task_list.first && uring_locked)
2555 io_submit_flush_completions(ctx);
3f18407d 2556 }
ebd0df2e 2557
f28c240e 2558 ctx_flush_and_put(ctx, &uring_locked);
3cc7fdb9
PB
2559
2560 /* relaxed read is enough as only the task itself sets ->in_idle */
2561 if (unlikely(atomic_read(&tctx->in_idle)))
2562 io_uring_drop_tctx_refs(current);
7cbf1722
JA
2563}
2564
4813c377 2565static void io_req_task_work_add(struct io_kiocb *req, bool priority)
7cbf1722 2566{
c15b79de 2567 struct task_struct *tsk = req->task;
7cbf1722 2568 struct io_uring_task *tctx = tsk->io_uring;
c15b79de 2569 enum task_work_notify_mode notify;
e09ee510 2570 struct io_wq_work_node *node;
0b81e80c 2571 unsigned long flags;
6294f368 2572 bool running;
7cbf1722
JA
2573
2574 WARN_ON_ONCE(!tctx);
2575
d5361233
JA
2576 io_drop_inflight_file(req);
2577
0b81e80c 2578 spin_lock_irqsave(&tctx->task_lock, flags);
4813c377
HX
2579 if (priority)
2580 wq_list_add_tail(&req->io_task_work.node, &tctx->prior_task_list);
2581 else
2582 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
6294f368
PB
2583 running = tctx->task_running;
2584 if (!running)
2585 tctx->task_running = true;
0b81e80c 2586 spin_unlock_irqrestore(&tctx->task_lock, flags);
7cbf1722
JA
2587
2588 /* task_work already pending, we're done */
6294f368 2589 if (running)
e09ee510 2590 return;
7cbf1722 2591
c15b79de
PB
2592 /*
2593 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2594 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2595 * processing task_work. There's no reliable way to tell if TWA_RESUME
2596 * will do the job.
2597 */
2598 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
d97ec623
PB
2599 if (likely(!task_work_add(tsk, &tctx->task_work, notify))) {
2600 if (notify == TWA_NONE)
2601 wake_up_process(tsk);
e09ee510 2602 return;
c15b79de 2603 }
2215bed9 2604
0b81e80c 2605 spin_lock_irqsave(&tctx->task_lock, flags);
6294f368 2606 tctx->task_running = false;
4813c377 2607 node = wq_list_merge(&tctx->prior_task_list, &tctx->task_list);
0b81e80c 2608 spin_unlock_irqrestore(&tctx->task_lock, flags);
7cbf1722 2609
e09ee510
PB
2610 while (node) {
2611 req = container_of(node, struct io_kiocb, io_task_work.node);
2612 node = node->next;
2613 if (llist_add(&req->io_task_work.fallback_node,
2614 &req->ctx->fallback_llist))
2615 schedule_delayed_work(&req->ctx->fallback_work, 1);
2616 }
eab30c4d
PB
2617}
2618
f237c30a 2619static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
c40f6379 2620{
87ceb6a6 2621 struct io_ring_ctx *ctx = req->ctx;
c40f6379 2622
b18a1a45 2623 /* not needed for normal modes, but SQPOLL depends on it */
f237c30a 2624 io_tw_lock(ctx, locked);
cef216fc 2625 io_req_complete_failed(req, req->cqe.res);
c40f6379
JA
2626}
2627
f237c30a 2628static void io_req_task_submit(struct io_kiocb *req, bool *locked)
c40f6379
JA
2629{
2630 struct io_ring_ctx *ctx = req->ctx;
2631
f237c30a 2632 io_tw_lock(ctx, locked);
316319e8 2633 /* req->task == current here, checking PF_EXITING is safe */
af066f31 2634 if (likely(!(req->task->flags & PF_EXITING)))
c5eef2b9 2635 __io_queue_sqe(req);
81b6d05c 2636 else
2593553a 2637 io_req_complete_failed(req, -EFAULT);
c40f6379
JA
2638}
2639
2c4b8eb6 2640static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
c40f6379 2641{
cef216fc 2642 req->cqe.res = ret;
5b0a6acc 2643 req->io_task_work.func = io_req_task_cancel;
4813c377 2644 io_req_task_work_add(req, false);
c40f6379
JA
2645}
2646
2c4b8eb6 2647static void io_req_task_queue(struct io_kiocb *req)
a3df7698 2648{
5b0a6acc 2649 req->io_task_work.func = io_req_task_submit;
4813c377 2650 io_req_task_work_add(req, false);
a3df7698
PB
2651}
2652
773af691
JA
2653static void io_req_task_queue_reissue(struct io_kiocb *req)
2654{
2655 req->io_task_work.func = io_queue_async_work;
4813c377 2656 io_req_task_work_add(req, false);
773af691
JA
2657}
2658
57859f4d 2659static void io_queue_next(struct io_kiocb *req)
c69f8dbe 2660{
57859f4d 2661 struct io_kiocb *nxt = io_req_find_next(req);
944e58bf 2662
57859f4d
PB
2663 if (nxt)
2664 io_req_task_queue(nxt);
c69f8dbe
JL
2665}
2666
c3524383 2667static void io_free_req(struct io_kiocb *req)
7a743e22 2668{
c3524383
PB
2669 io_queue_next(req);
2670 __io_free_req(req);
2671}
8766dd51 2672
f237c30a
PB
2673static void io_free_req_work(struct io_kiocb *req, bool *locked)
2674{
2675 io_free_req(req);
2676}
2677
3aa83bfb 2678static void io_free_batch_list(struct io_ring_ctx *ctx,
1cce17ac 2679 struct io_wq_work_node *node)
3aa83bfb 2680 __must_hold(&ctx->uring_lock)
5af1d13e 2681{
d4b7a5ef 2682 struct task_struct *task = NULL;
37f0e767 2683 int task_refs = 0;
5af1d13e 2684
3aa83bfb
PB
2685 do {
2686 struct io_kiocb *req = container_of(node, struct io_kiocb,
2687 comp_list);
2d6500d4 2688
a538be5b
PB
2689 if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
2690 if (req->flags & REQ_F_REFCOUNT) {
2691 node = req->comp_list.next;
2692 if (!req_ref_put_and_test(req))
2693 continue;
2694 }
b605a7fa
PB
2695 if ((req->flags & REQ_F_POLLED) && req->apoll) {
2696 struct async_poll *apoll = req->apoll;
2697
2698 if (apoll->double_poll)
2699 kfree(apoll->double_poll);
2700 list_add(&apoll->poll.wait.entry,
2701 &ctx->apoll_cache);
2702 req->flags &= ~REQ_F_POLLED;
2703 }
57859f4d
PB
2704 if (req->flags & (REQ_F_LINK|REQ_F_HARDLINK))
2705 io_queue_next(req);
a538be5b
PB
2706 if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
2707 io_clean_op(req);
c1e53a69 2708 }
a538be5b
PB
2709 if (!(req->flags & REQ_F_FIXED_FILE))
2710 io_put_file(req->file);
2d6500d4 2711
ab409402 2712 io_req_put_rsrc_locked(req, ctx);
5af1d13e 2713
d4b7a5ef
PB
2714 if (req->task != task) {
2715 if (task)
2716 io_put_task(task, task_refs);
2717 task = req->task;
2718 task_refs = 0;
2719 }
2720 task_refs++;
c1e53a69 2721 node = req->comp_list.next;
fa05457a 2722 io_req_add_to_cache(req, ctx);
3aa83bfb 2723 } while (node);
d4b7a5ef 2724
d4b7a5ef
PB
2725 if (task)
2726 io_put_task(task, task_refs);
7a743e22
PB
2727}
2728
c450178d 2729static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
a141dd89 2730 __must_hold(&ctx->uring_lock)
905c172f 2731{
6f33b0bc 2732 struct io_wq_work_node *node, *prev;
cd0ca2e0 2733 struct io_submit_state *state = &ctx->submit_state;
905c172f 2734
3d4aeb9f
PB
2735 if (state->flush_cqes) {
2736 spin_lock(&ctx->completion_lock);
2737 wq_list_for_each(node, prev, &state->compl_reqs) {
2738 struct io_kiocb *req = container_of(node, struct io_kiocb,
6f33b0bc 2739 comp_list);
5182ed2e 2740
3d4aeb9f 2741 if (!(req->flags & REQ_F_CQE_SKIP))
90e7c35f 2742 __io_fill_cqe_req_filled(ctx, req);
3d4aeb9f
PB
2743 }
2744
2745 io_commit_cqring(ctx);
2746 spin_unlock(&ctx->completion_lock);
2747 io_cqring_ev_posted(ctx);
2748 state->flush_cqes = false;
905c172f 2749 }
5182ed2e 2750
1cce17ac 2751 io_free_batch_list(ctx, state->compl_reqs.first);
6f33b0bc 2752 INIT_WQ_LIST(&state->compl_reqs);
7a743e22
PB
2753}
2754
ba816ad6
JA
2755/*
2756 * Drop reference to request, return next in chain (if there is one) if this
2757 * was the last reference to this request.
2758 */
0d85035a 2759static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
e65ef56d 2760{
9b5f7bd9
PB
2761 struct io_kiocb *nxt = NULL;
2762
de9b4cca 2763 if (req_ref_put_and_test(req)) {
7819a1f6
PB
2764 if (unlikely(req->flags & (REQ_F_LINK|REQ_F_HARDLINK)))
2765 nxt = io_req_find_next(req);
4d7dd462 2766 __io_free_req(req);
2a44f467 2767 }
9b5f7bd9 2768 return nxt;
2b188cc1
JA
2769}
2770
0d85035a 2771static inline void io_put_req(struct io_kiocb *req)
e65ef56d 2772{
de9b4cca 2773 if (req_ref_put_and_test(req))
e65ef56d 2774 io_free_req(req);
2b188cc1
JA
2775}
2776
91c2f697 2777static inline void io_put_req_deferred(struct io_kiocb *req)
216578e5 2778{
91c2f697 2779 if (req_ref_put_and_test(req)) {
f237c30a 2780 req->io_task_work.func = io_free_req_work;
4813c377 2781 io_req_task_work_add(req, false);
543af3a1 2782 }
216578e5
PB
2783}
2784
6c503150 2785static unsigned io_cqring_events(struct io_ring_ctx *ctx)
a3a0e43f
JA
2786{
2787 /* See comment at the top of this file */
2788 smp_rmb();
e23de15f 2789 return __io_cqring_events(ctx);
a3a0e43f
JA
2790}
2791
fb5ccc98
PB
2792static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2793{
2794 struct io_rings *rings = ctx->rings;
2795
2796 /* make sure SQ entry isn't read before tail */
2797 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2798}
2799
4c6e277c
JA
2800static inline bool io_run_task_work(void)
2801{
7f62d40d 2802 if (test_thread_flag(TIF_NOTIFY_SIGNAL) || task_work_pending(current)) {
4c6e277c 2803 __set_current_state(TASK_RUNNING);
7c5d8fa6
EB
2804 clear_notify_signal();
2805 if (task_work_pending(current))
2806 task_work_run();
4c6e277c
JA
2807 return true;
2808 }
2809
2810 return false;
bcda7baa
JA
2811}
2812
5ba3c874 2813static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
def596e9 2814{
5eef4e87 2815 struct io_wq_work_node *pos, *start, *prev;
d729cf9a 2816 unsigned int poll_flags = BLK_POLL_NOSLEEP;
b688f11e 2817 DEFINE_IO_COMP_BATCH(iob);
5ba3c874 2818 int nr_events = 0;
def596e9
JA
2819
2820 /*
2821 * Only spin for completions if we don't have multiple devices hanging
87a115fb 2822 * off our complete list.
def596e9 2823 */
87a115fb 2824 if (ctx->poll_multi_queue || force_nonspin)
ef99b2d3 2825 poll_flags |= BLK_POLL_ONESHOT;
def596e9 2826
5eef4e87
PB
2827 wq_list_for_each(pos, start, &ctx->iopoll_list) {
2828 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
9adbd45d 2829 struct kiocb *kiocb = &req->rw.kiocb;
a2416e1e 2830 int ret;
def596e9
JA
2831
2832 /*
581f9810
BM
2833 * Move completed and retryable entries to our local lists.
2834 * If we find a request that requires polling, break out
2835 * and complete those lists first, if we have entries there.
def596e9 2836 */
e3f721e6 2837 if (READ_ONCE(req->iopoll_completed))
def596e9
JA
2838 break;
2839
b688f11e 2840 ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags);
a2416e1e
PB
2841 if (unlikely(ret < 0))
2842 return ret;
2843 else if (ret)
ef99b2d3 2844 poll_flags |= BLK_POLL_ONESHOT;
def596e9 2845
3aadc23e 2846 /* iopoll may have completed current req */
b688f11e
JA
2847 if (!rq_list_empty(iob.req_list) ||
2848 READ_ONCE(req->iopoll_completed))
e3f721e6 2849 break;
def596e9
JA
2850 }
2851
b688f11e
JA
2852 if (!rq_list_empty(iob.req_list))
2853 iob.complete(&iob);
5eef4e87
PB
2854 else if (!pos)
2855 return 0;
def596e9 2856
5eef4e87
PB
2857 prev = start;
2858 wq_list_for_each_resume(pos, prev) {
2859 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
2860
b3fa03fd
PB
2861 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
2862 if (!smp_load_acquire(&req->iopoll_completed))
e3f721e6 2863 break;
c0713540 2864 nr_events++;
83a13a41
PB
2865 if (unlikely(req->flags & REQ_F_CQE_SKIP))
2866 continue;
cef216fc 2867 __io_fill_cqe_req(req, req->cqe.res, io_put_kbuf(req, 0));
e3f721e6 2868 }
def596e9 2869
f5ed3bcd
PB
2870 if (unlikely(!nr_events))
2871 return 0;
2872
2873 io_commit_cqring(ctx);
2874 io_cqring_ev_posted_iopoll(ctx);
1cce17ac 2875 pos = start ? start->next : ctx->iopoll_list.first;
5eef4e87 2876 wq_list_cut(&ctx->iopoll_list, prev, start);
1cce17ac 2877 io_free_batch_list(ctx, pos);
5ba3c874 2878 return nr_events;
def596e9
JA
2879}
2880
def596e9
JA
2881/*
2882 * We can't just wait for polled events to come to us, we have to actively
2883 * find and complete them.
2884 */
c072481d 2885static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
def596e9
JA
2886{
2887 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2888 return;
2889
2890 mutex_lock(&ctx->uring_lock);
5eef4e87 2891 while (!wq_list_empty(&ctx->iopoll_list)) {
b2edc0a7 2892 /* let it sleep and repeat later if can't complete a request */
5ba3c874 2893 if (io_do_iopoll(ctx, true) == 0)
b2edc0a7 2894 break;
08f5439f
JA
2895 /*
2896 * Ensure we allow local-to-the-cpu processing to take place,
2897 * in this case we need to ensure that we reap all events.
3fcee5a6 2898 * Also let task_work, etc. to progress by releasing the mutex
08f5439f 2899 */
3fcee5a6
PB
2900 if (need_resched()) {
2901 mutex_unlock(&ctx->uring_lock);
2902 cond_resched();
2903 mutex_lock(&ctx->uring_lock);
2904 }
def596e9
JA
2905 }
2906 mutex_unlock(&ctx->uring_lock);
2907}
2908
7668b92a 2909static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
def596e9 2910{
7668b92a 2911 unsigned int nr_events = 0;
e9979b36 2912 int ret = 0;
500f9fba 2913
f39c8a5b
PB
2914 /*
2915 * Don't enter poll loop if we already have events pending.
2916 * If we do, we can potentially be spinning for commands that
2917 * already triggered a CQE (eg in error).
2918 */
5ed7a37d 2919 if (test_bit(0, &ctx->check_cq_overflow))
f39c8a5b
PB
2920 __io_cqring_overflow_flush(ctx, false);
2921 if (io_cqring_events(ctx))
d487b43c 2922 return 0;
def596e9 2923 do {
500f9fba
JA
2924 /*
2925 * If a submit got punted to a workqueue, we can have the
2926 * application entering polling for a command before it gets
2927 * issued. That app will hold the uring_lock for the duration
2928 * of the poll right here, so we need to take a breather every
2929 * now and then to ensure that the issue has a chance to add
2930 * the poll to the issued list. Otherwise we can spin here
2931 * forever, while the workqueue is stuck trying to acquire the
2932 * very same mutex.
2933 */
5eef4e87 2934 if (wq_list_empty(&ctx->iopoll_list)) {
8f487ef2
PB
2935 u32 tail = ctx->cached_cq_tail;
2936
500f9fba 2937 mutex_unlock(&ctx->uring_lock);
4c6e277c 2938 io_run_task_work();
500f9fba 2939 mutex_lock(&ctx->uring_lock);
def596e9 2940
8f487ef2
PB
2941 /* some requests don't go through iopoll_list */
2942 if (tail != ctx->cached_cq_tail ||
5eef4e87 2943 wq_list_empty(&ctx->iopoll_list))
e9979b36 2944 break;
500f9fba 2945 }
5ba3c874
PB
2946 ret = io_do_iopoll(ctx, !min);
2947 if (ret < 0)
2948 break;
2949 nr_events += ret;
2950 ret = 0;
2951 } while (nr_events < min && !need_resched());
d487b43c 2952
def596e9
JA
2953 return ret;
2954}
2955
491381ce 2956static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 2957{
491381ce
JA
2958 /*
2959 * Tell lockdep we inherited freeze protection from submission
2960 * thread.
2961 */
2962 if (req->flags & REQ_F_ISREG) {
1c98679d 2963 struct super_block *sb = file_inode(req->file)->i_sb;
2b188cc1 2964
1c98679d
PB
2965 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
2966 sb_end_write(sb);
2b188cc1
JA
2967 }
2968}
2969
b63534c4 2970#ifdef CONFIG_BLOCK
dc2a6e9a 2971static bool io_resubmit_prep(struct io_kiocb *req)
b63534c4 2972{
ab454438 2973 struct io_async_rw *rw = req->async_data;
b63534c4 2974
d886e185 2975 if (!req_has_async_data(req))
ab454438 2976 return !io_req_prep_async(req);
538941e2 2977 iov_iter_restore(&rw->s.iter, &rw->s.iter_state);
ab454438 2978 return true;
b63534c4 2979}
b63534c4 2980
3e6a0d3c 2981static bool io_rw_should_reissue(struct io_kiocb *req)
b63534c4 2982{
355afaeb 2983 umode_t mode = file_inode(req->file)->i_mode;
3e6a0d3c 2984 struct io_ring_ctx *ctx = req->ctx;
b63534c4 2985
355afaeb
JA
2986 if (!S_ISBLK(mode) && !S_ISREG(mode))
2987 return false;
3e6a0d3c
JA
2988 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2989 !(ctx->flags & IORING_SETUP_IOPOLL)))
b63534c4 2990 return false;
7c977a58
JA
2991 /*
2992 * If ref is dying, we might be running poll reap from the exit work.
2993 * Don't attempt to reissue from that path, just let it fail with
2994 * -EAGAIN.
2995 */
3e6a0d3c
JA
2996 if (percpu_ref_is_dying(&ctx->refs))
2997 return false;
ef046888
JA
2998 /*
2999 * Play it safe and assume not safe to re-import and reissue if we're
3000 * not in the original thread group (or in task context).
3001 */
3002 if (!same_thread_group(req->task, current) || !in_task())
3003 return false;
3e6a0d3c
JA
3004 return true;
3005}
e82ad485 3006#else
a1ff1e3f 3007static bool io_resubmit_prep(struct io_kiocb *req)
e82ad485
JA
3008{
3009 return false;
3010}
e82ad485 3011static bool io_rw_should_reissue(struct io_kiocb *req)
3e6a0d3c 3012{
b63534c4
JA
3013 return false;
3014}
3e6a0d3c 3015#endif
b63534c4 3016
8ef12efe 3017static bool __io_complete_rw_common(struct io_kiocb *req, long res)
a1d7c393 3018{
f63cf519 3019 if (req->rw.kiocb.ki_flags & IOCB_WRITE) {
b65c128f 3020 kiocb_end_write(req);
f63cf519
JA
3021 fsnotify_modify(req->file);
3022 } else {
3023 fsnotify_access(req->file);
3024 }
cef216fc 3025 if (unlikely(res != req->cqe.res)) {
9532b99b
PB
3026 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
3027 io_rw_should_reissue(req)) {
3028 req->flags |= REQ_F_REISSUE;
8ef12efe 3029 return true;
9532b99b 3030 }
93d2bcd2 3031 req_set_fail(req);
cef216fc 3032 req->cqe.res = res;
9532b99b 3033 }
8ef12efe
JA
3034 return false;
3035}
3036
cc8e9ba7 3037static inline void io_req_task_complete(struct io_kiocb *req, bool *locked)
8ef12efe 3038{
cef216fc 3039 int res = req->cqe.res;
126180b9
PB
3040
3041 if (*locked) {
cc3cec83 3042 io_req_complete_state(req, res, io_put_kbuf(req, 0));
fff4e40e 3043 io_req_add_compl_list(req);
126180b9 3044 } else {
cc3cec83
JA
3045 io_req_complete_post(req, res,
3046 io_put_kbuf(req, IO_URING_F_UNLOCKED));
126180b9 3047 }
8ef12efe
JA
3048}
3049
00f6e68b 3050static void __io_complete_rw(struct io_kiocb *req, long res,
8ef12efe
JA
3051 unsigned int issue_flags)
3052{
3053 if (__io_complete_rw_common(req, res))
3054 return;
cef216fc 3055 __io_req_complete(req, issue_flags, req->cqe.res,
cc3cec83 3056 io_put_kbuf(req, issue_flags));
ba816ad6
JA
3057}
3058
6b19b766 3059static void io_complete_rw(struct kiocb *kiocb, long res)
ba816ad6 3060{
9adbd45d 3061 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6 3062
8ef12efe
JA
3063 if (__io_complete_rw_common(req, res))
3064 return;
cef216fc 3065 req->cqe.res = res;
8ef12efe 3066 req->io_task_work.func = io_req_task_complete;
f28c240e 3067 io_req_task_work_add(req, !!(req->ctx->flags & IORING_SETUP_SQPOLL));
2b188cc1
JA
3068}
3069
6b19b766 3070static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
def596e9 3071{
9adbd45d 3072 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 3073
491381ce
JA
3074 if (kiocb->ki_flags & IOCB_WRITE)
3075 kiocb_end_write(req);
cef216fc 3076 if (unlikely(res != req->cqe.res)) {
b66ceaf3
PB
3077 if (res == -EAGAIN && io_rw_should_reissue(req)) {
3078 req->flags |= REQ_F_REISSUE;
3079 return;
9532b99b 3080 }
cef216fc 3081 req->cqe.res = res;
8c130827 3082 }
bbde017a 3083
b3fa03fd
PB
3084 /* order with io_iopoll_complete() checking ->iopoll_completed */
3085 smp_store_release(&req->iopoll_completed, 1);
def596e9
JA
3086}
3087
3088/*
3089 * After the iocb has been issued, it's safe to be found on the poll list.
3090 * Adding the kiocb to the list AFTER submission ensures that we don't
f39c8a5b 3091 * find it from a io_do_iopoll() thread before the issuer is done
def596e9
JA
3092 * accessing the kiocb cookie.
3093 */
9882131c 3094static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
def596e9
JA
3095{
3096 struct io_ring_ctx *ctx = req->ctx;
3b44b371 3097 const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
cb3d8972
PB
3098
3099 /* workqueue context doesn't hold uring_lock, grab it now */
3b44b371 3100 if (unlikely(needs_lock))
cb3d8972 3101 mutex_lock(&ctx->uring_lock);
def596e9
JA
3102
3103 /*
3104 * Track whether we have multiple files in our lists. This will impact
3105 * how we do polling eventually, not spinning if we're on potentially
3106 * different devices.
3107 */
5eef4e87 3108 if (wq_list_empty(&ctx->iopoll_list)) {
915b3dde
HX
3109 ctx->poll_multi_queue = false;
3110 } else if (!ctx->poll_multi_queue) {
def596e9
JA
3111 struct io_kiocb *list_req;
3112
5eef4e87
PB
3113 list_req = container_of(ctx->iopoll_list.first, struct io_kiocb,
3114 comp_list);
30da1b45 3115 if (list_req->file != req->file)
915b3dde 3116 ctx->poll_multi_queue = true;
def596e9
JA
3117 }
3118
3119 /*
3120 * For fast devices, IO may have already completed. If it has, add
3121 * it to the front so we find it first.
3122 */
65a6543d 3123 if (READ_ONCE(req->iopoll_completed))
5eef4e87 3124 wq_list_add_head(&req->comp_list, &ctx->iopoll_list);
def596e9 3125 else
5eef4e87 3126 wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
bdcd3eab 3127
3b44b371 3128 if (unlikely(needs_lock)) {
cb3d8972
PB
3129 /*
3130 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
3131 * in sq thread task context or in io worker task context. If
3132 * current task context is sq thread, we don't need to check
3133 * whether should wake up sq thread.
3134 */
3135 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
3136 wq_has_sleeper(&ctx->sq_data->wait))
3137 wake_up(&ctx->sq_data->wait);
3138
3139 mutex_unlock(&ctx->uring_lock);
3140 }
def596e9
JA
3141}
3142
4503b767
JA
3143static bool io_bdev_nowait(struct block_device *bdev)
3144{
9ba0d0c8 3145 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
4503b767
JA
3146}
3147
2b188cc1
JA
3148/*
3149 * If we tracked the file through the SCM inflight mechanism, we could support
3150 * any file. For now, just ensure that anything potentially problematic is done
3151 * inline.
3152 */
88459b50 3153static bool __io_file_supports_nowait(struct file *file, umode_t mode)
2b188cc1 3154{
4503b767 3155 if (S_ISBLK(mode)) {
4e7b5671
CH
3156 if (IS_ENABLED(CONFIG_BLOCK) &&
3157 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
4503b767
JA
3158 return true;
3159 return false;
3160 }
976517f1 3161 if (S_ISSOCK(mode))
2b188cc1 3162 return true;
4503b767 3163 if (S_ISREG(mode)) {
4e7b5671
CH
3164 if (IS_ENABLED(CONFIG_BLOCK) &&
3165 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
4503b767
JA
3166 file->f_op != &io_uring_fops)
3167 return true;
3168 return false;
3169 }
2b188cc1 3170
c5b85625
JA
3171 /* any ->read/write should understand O_NONBLOCK */
3172 if (file->f_flags & O_NONBLOCK)
3173 return true;
35645ac3 3174 return file->f_mode & FMODE_NOWAIT;
2b188cc1 3175}
c5b85625 3176
88459b50
PB
3177/*
3178 * If we tracked the file through the SCM inflight mechanism, we could support
3179 * any file. For now, just ensure that anything potentially problematic is done
3180 * inline.
3181 */
3182static unsigned int io_file_get_flags(struct file *file)
3183{
3184 umode_t mode = file_inode(file)->i_mode;
3185 unsigned int res = 0;
af197f50 3186
88459b50
PB
3187 if (S_ISREG(mode))
3188 res |= FFS_ISREG;
3189 if (__io_file_supports_nowait(file, mode))
3190 res |= FFS_NOWAIT;
3191 return res;
2b188cc1
JA
3192}
3193
35645ac3 3194static inline bool io_file_supports_nowait(struct io_kiocb *req)
7b29f92d 3195{
88459b50 3196 return req->flags & REQ_F_SUPPORT_NOWAIT;
7b29f92d
JA
3197}
3198
b9a6b8f9 3199static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2b188cc1 3200{
9adbd45d 3201 struct kiocb *kiocb = &req->rw.kiocb;
09bb8394
JA
3202 unsigned ioprio;
3203 int ret;
2b188cc1 3204
2b188cc1 3205 kiocb->ki_pos = READ_ONCE(sqe->off);
9adbd45d 3206
fb27274a
PB
3207 ioprio = READ_ONCE(sqe->ioprio);
3208 if (ioprio) {
3209 ret = ioprio_check_cap(ioprio);
3210 if (ret)
3211 return ret;
3212
3213 kiocb->ki_ioprio = ioprio;
3214 } else {
3215 kiocb->ki_ioprio = get_current_ioprio();
eae071c9
PB
3216 }
3217
578c0ee2 3218 req->imu = NULL;
3529d8c2
JA
3219 req->rw.addr = READ_ONCE(sqe->addr);
3220 req->rw.len = READ_ONCE(sqe->len);
584b0180 3221 req->rw.flags = READ_ONCE(sqe->rw_flags);
4f4eeba8 3222 req->buf_index = READ_ONCE(sqe->buf_index);
2b188cc1 3223 return 0;
2b188cc1
JA
3224}
3225
3226static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
3227{
3228 switch (ret) {
3229 case -EIOCBQUEUED:
3230 break;
3231 case -ERESTARTSYS:
3232 case -ERESTARTNOINTR:
3233 case -ERESTARTNOHAND:
3234 case -ERESTART_RESTARTBLOCK:
3235 /*
3236 * We can't just restart the syscall, since previously
3237 * submitted sqes may already be in progress. Just fail this
3238 * IO with EINTR.
3239 */
3240 ret = -EINTR;
df561f66 3241 fallthrough;
2b188cc1 3242 default:
6b19b766 3243 kiocb->ki_complete(kiocb, ret);
2b188cc1
JA
3244 }
3245}
3246
b4aec400 3247static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
d34e1e5b
DY
3248{
3249 struct kiocb *kiocb = &req->rw.kiocb;
3250
6f83ab22
JA
3251 if (kiocb->ki_pos != -1)
3252 return &kiocb->ki_pos;
3253
3254 if (!(req->file->f_mode & FMODE_STREAM)) {
3255 req->flags |= REQ_F_CUR_POS;
3256 kiocb->ki_pos = req->file->f_pos;
3257 return &kiocb->ki_pos;
d34e1e5b 3258 }
6f83ab22
JA
3259
3260 kiocb->ki_pos = 0;
3261 return NULL;
d34e1e5b
DY
3262}
3263
2ea537ca 3264static void kiocb_done(struct io_kiocb *req, ssize_t ret,
889fca73 3265 unsigned int issue_flags)
ba816ad6 3266{
e8c2bc1f 3267 struct io_async_rw *io = req->async_data;
ba04291e 3268
227c0c96 3269 /* add previously done IO, if any */
d886e185 3270 if (req_has_async_data(req) && io->bytes_done > 0) {
227c0c96 3271 if (ret < 0)
e8c2bc1f 3272 ret = io->bytes_done;
227c0c96 3273 else
e8c2bc1f 3274 ret += io->bytes_done;
227c0c96
JA
3275 }
3276
ba04291e 3277 if (req->flags & REQ_F_CUR_POS)
2ea537ca
PB
3278 req->file->f_pos = req->rw.kiocb.ki_pos;
3279 if (ret >= 0 && (req->rw.kiocb.ki_complete == io_complete_rw))
00f6e68b 3280 __io_complete_rw(req, ret, issue_flags);
ba816ad6 3281 else
2ea537ca 3282 io_rw_done(&req->rw.kiocb, ret);
97284637 3283
b66ceaf3 3284 if (req->flags & REQ_F_REISSUE) {
97284637 3285 req->flags &= ~REQ_F_REISSUE;
b91ef187 3286 if (io_resubmit_prep(req))
773af691 3287 io_req_task_queue_reissue(req);
b91ef187
PB
3288 else
3289 io_req_task_queue_fail(req, ret);
97284637 3290 }
ba816ad6
JA
3291}
3292
eae071c9
PB
3293static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
3294 struct io_mapped_ubuf *imu)
edafccee 3295{
9adbd45d 3296 size_t len = req->rw.len;
75769e3f 3297 u64 buf_end, buf_addr = req->rw.addr;
edafccee 3298 size_t offset;
edafccee 3299
75769e3f 3300 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
edafccee
JA
3301 return -EFAULT;
3302 /* not inside the mapped region */
4751f53d 3303 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
edafccee
JA
3304 return -EFAULT;
3305
3306 /*
3307 * May not be a start of buffer, set size appropriately
3308 * and advance us to the beginning.
3309 */
3310 offset = buf_addr - imu->ubuf;
3311 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
3312
3313 if (offset) {
3314 /*
3315 * Don't use iov_iter_advance() here, as it's really slow for
3316 * using the latter parts of a big fixed buffer - it iterates
3317 * over each segment manually. We can cheat a bit here, because
3318 * we know that:
3319 *
3320 * 1) it's a BVEC iter, we set it up
3321 * 2) all bvecs are PAGE_SIZE in size, except potentially the
3322 * first and last bvec
3323 *
3324 * So just find our index, and adjust the iterator afterwards.
3325 * If the offset is within the first bvec (or the whole first
3326 * bvec, just use iov_iter_advance(). This makes it easier
3327 * since we can just skip the first segment, which may not
3328 * be PAGE_SIZE aligned.
3329 */
3330 const struct bio_vec *bvec = imu->bvec;
3331
3332 if (offset <= bvec->bv_len) {
3333 iov_iter_advance(iter, offset);
3334 } else {
3335 unsigned long seg_skip;
3336
3337 /* skip first vec */
3338 offset -= bvec->bv_len;
3339 seg_skip = 1 + (offset >> PAGE_SHIFT);
3340
3341 iter->bvec = bvec + seg_skip;
3342 iter->nr_segs -= seg_skip;
99c79f66 3343 iter->count -= bvec->bv_len + offset;
bd11b3a3 3344 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
3345 }
3346 }
3347
847595de 3348 return 0;
edafccee
JA
3349}
3350
5106dd6e
JA
3351static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
3352 unsigned int issue_flags)
eae071c9 3353{
eae071c9
PB
3354 struct io_mapped_ubuf *imu = req->imu;
3355 u16 index, buf_index = req->buf_index;
3356
3357 if (likely(!imu)) {
578c0ee2
PB
3358 struct io_ring_ctx *ctx = req->ctx;
3359
eae071c9
PB
3360 if (unlikely(buf_index >= ctx->nr_user_bufs))
3361 return -EFAULT;
5106dd6e 3362 io_req_set_rsrc_node(req, ctx, issue_flags);
eae071c9
PB
3363 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
3364 imu = READ_ONCE(ctx->user_bufs[index]);
3365 req->imu = imu;
3366 }
3367 return __io_import_fixed(req, rw, iter, imu);
3368}
3369
dbc7d452
JA
3370static void io_buffer_add_list(struct io_ring_ctx *ctx,
3371 struct io_buffer_list *bl, unsigned int bgid)
3372{
3373 struct list_head *list;
3374
3375 list = &ctx->io_buffers[hash_32(bgid, IO_BUFFERS_HASH_BITS)];
3376 INIT_LIST_HEAD(&bl->buf_list);
3377 bl->bgid = bgid;
3378 list_add(&bl->list, list);
3379}
3380
bcda7baa 3381static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
51aac424 3382 int bgid, unsigned int issue_flags)
bcda7baa 3383{
30d51dd4 3384 struct io_buffer *kbuf = req->kbuf;
dbc7d452
JA
3385 struct io_ring_ctx *ctx = req->ctx;
3386 struct io_buffer_list *bl;
bcda7baa
JA
3387
3388 if (req->flags & REQ_F_BUFFER_SELECTED)
3389 return kbuf;
3390
f8929630 3391 io_ring_submit_lock(req->ctx, issue_flags);
bcda7baa 3392
dbc7d452
JA
3393 bl = io_buffer_get_list(ctx, bgid);
3394 if (bl && !list_empty(&bl->buf_list)) {
3395 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
3396 list_del(&kbuf->list);
bcda7baa
JA
3397 if (*len > kbuf->len)
3398 *len = kbuf->len;
30d51dd4
PB
3399 req->flags |= REQ_F_BUFFER_SELECTED;
3400 req->kbuf = kbuf;
bcda7baa
JA
3401 } else {
3402 kbuf = ERR_PTR(-ENOBUFS);
3403 }
3404
f8929630 3405 io_ring_submit_unlock(req->ctx, issue_flags);
bcda7baa
JA
3406 return kbuf;
3407}
3408
4d954c25 3409static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
51aac424 3410 unsigned int issue_flags)
4d954c25
JA
3411{
3412 struct io_buffer *kbuf;
4f4eeba8 3413 u16 bgid;
4d954c25 3414
4f4eeba8 3415 bgid = req->buf_index;
51aac424 3416 kbuf = io_buffer_select(req, len, bgid, issue_flags);
4d954c25
JA
3417 if (IS_ERR(kbuf))
3418 return kbuf;
4d954c25
JA
3419 return u64_to_user_ptr(kbuf->addr);
3420}
3421
3422#ifdef CONFIG_COMPAT
3423static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
51aac424 3424 unsigned int issue_flags)
4d954c25
JA
3425{
3426 struct compat_iovec __user *uiov;
3427 compat_ssize_t clen;
3428 void __user *buf;
3429 ssize_t len;
3430
3431 uiov = u64_to_user_ptr(req->rw.addr);
3432 if (!access_ok(uiov, sizeof(*uiov)))
3433 return -EFAULT;
3434 if (__get_user(clen, &uiov->iov_len))
3435 return -EFAULT;
3436 if (clen < 0)
3437 return -EINVAL;
3438
3439 len = clen;
51aac424 3440 buf = io_rw_buffer_select(req, &len, issue_flags);
4d954c25
JA
3441 if (IS_ERR(buf))
3442 return PTR_ERR(buf);
3443 iov[0].iov_base = buf;
3444 iov[0].iov_len = (compat_size_t) len;
3445 return 0;
3446}
3447#endif
3448
3449static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
51aac424 3450 unsigned int issue_flags)
4d954c25
JA
3451{
3452 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3453 void __user *buf;
3454 ssize_t len;
3455
3456 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3457 return -EFAULT;
3458
3459 len = iov[0].iov_len;
3460 if (len < 0)
3461 return -EINVAL;
51aac424 3462 buf = io_rw_buffer_select(req, &len, issue_flags);
4d954c25
JA
3463 if (IS_ERR(buf))
3464 return PTR_ERR(buf);
3465 iov[0].iov_base = buf;
3466 iov[0].iov_len = len;
3467 return 0;
3468}
3469
3470static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
51aac424 3471 unsigned int issue_flags)
4d954c25 3472{
dddb3e26 3473 if (req->flags & REQ_F_BUFFER_SELECTED) {
30d51dd4 3474 struct io_buffer *kbuf = req->kbuf;
dddb3e26 3475
dddb3e26
JA
3476 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3477 iov[0].iov_len = kbuf->len;
4d954c25 3478 return 0;
dddb3e26 3479 }
dd201662 3480 if (req->rw.len != 1)
4d954c25
JA
3481 return -EINVAL;
3482
3483#ifdef CONFIG_COMPAT
3484 if (req->ctx->compat)
51aac424 3485 return io_compat_import(req, iov, issue_flags);
4d954c25
JA
3486#endif
3487
51aac424 3488 return __io_iov_buffer_select(req, iov, issue_flags);
4d954c25
JA
3489}
3490
caa8fe6e
PB
3491static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
3492 struct io_rw_state *s,
3493 unsigned int issue_flags)
2b188cc1 3494{
5e49c973 3495 struct iov_iter *iter = &s->iter;
847595de 3496 u8 opcode = req->opcode;
caa8fe6e 3497 struct iovec *iovec;
d1d681b0
PB
3498 void __user *buf;
3499 size_t sqe_len;
4d954c25 3500 ssize_t ret;
edafccee 3501
f3251183 3502 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
5106dd6e 3503 ret = io_import_fixed(req, rw, iter, issue_flags);
f3251183
PB
3504 if (ret)
3505 return ERR_PTR(ret);
3506 return NULL;
3507 }
2b188cc1 3508
bcda7baa 3509 /* buffer index only valid with fixed read/write, or buffer select */
d1d681b0 3510 if (unlikely(req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)))
caa8fe6e 3511 return ERR_PTR(-EINVAL);
9adbd45d 3512
d1d681b0
PB
3513 buf = u64_to_user_ptr(req->rw.addr);
3514 sqe_len = req->rw.len;
9adbd45d 3515
3a6820f2 3516 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
bcda7baa 3517 if (req->flags & REQ_F_BUFFER_SELECT) {
51aac424 3518 buf = io_rw_buffer_select(req, &sqe_len, issue_flags);
867a23ea 3519 if (IS_ERR(buf))
898df244 3520 return ERR_CAST(buf);
3f9d6441 3521 req->rw.len = sqe_len;
bcda7baa
JA
3522 }
3523
5e49c973 3524 ret = import_single_range(rw, buf, sqe_len, s->fast_iov, iter);
f3251183
PB
3525 if (ret)
3526 return ERR_PTR(ret);
3527 return NULL;
3a6820f2
JA
3528 }
3529
caa8fe6e 3530 iovec = s->fast_iov;
4d954c25 3531 if (req->flags & REQ_F_BUFFER_SELECT) {
caa8fe6e 3532 ret = io_iov_buffer_select(req, iovec, issue_flags);
f3251183
PB
3533 if (ret)
3534 return ERR_PTR(ret);
3535 iov_iter_init(iter, rw, iovec, 1, iovec->iov_len);
3536 return NULL;
4d954c25
JA
3537 }
3538
caa8fe6e 3539 ret = __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
89cd35c5 3540 req->ctx->compat);
caa8fe6e
PB
3541 if (unlikely(ret < 0))
3542 return ERR_PTR(ret);
3543 return iovec;
2b188cc1
JA
3544}
3545
5e49c973
PB
3546static inline int io_import_iovec(int rw, struct io_kiocb *req,
3547 struct iovec **iovec, struct io_rw_state *s,
3548 unsigned int issue_flags)
3549{
caa8fe6e
PB
3550 *iovec = __io_import_iovec(rw, req, s, issue_flags);
3551 if (unlikely(IS_ERR(*iovec)))
3552 return PTR_ERR(*iovec);
5e49c973 3553
5e49c973 3554 iov_iter_save_state(&s->iter, &s->iter_state);
caa8fe6e 3555 return 0;
2b188cc1
JA
3556}
3557
0fef9483
JA
3558static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3559{
5b09e37e 3560 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
0fef9483
JA
3561}
3562
31b51510 3563/*
32960613
JA
3564 * For files that don't have ->read_iter() and ->write_iter(), handle them
3565 * by looping over ->read() or ->write() manually.
31b51510 3566 */
4017eb91 3567static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
32960613 3568{
4017eb91
JA
3569 struct kiocb *kiocb = &req->rw.kiocb;
3570 struct file *file = req->file;
32960613 3571 ssize_t ret = 0;
af9c45ec 3572 loff_t *ppos;
32960613
JA
3573
3574 /*
3575 * Don't support polled IO through this interface, and we can't
3576 * support non-blocking either. For the latter, this just causes
3577 * the kiocb to be handled from an async context.
3578 */
3579 if (kiocb->ki_flags & IOCB_HIPRI)
3580 return -EOPNOTSUPP;
35645ac3
PB
3581 if ((kiocb->ki_flags & IOCB_NOWAIT) &&
3582 !(kiocb->ki_filp->f_flags & O_NONBLOCK))
32960613
JA
3583 return -EAGAIN;
3584
af9c45ec
DY
3585 ppos = io_kiocb_ppos(kiocb);
3586
32960613 3587 while (iov_iter_count(iter)) {
311ae9e1 3588 struct iovec iovec;
32960613
JA
3589 ssize_t nr;
3590
311ae9e1
PB
3591 if (!iov_iter_is_bvec(iter)) {
3592 iovec = iov_iter_iovec(iter);
3593 } else {
4017eb91
JA
3594 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3595 iovec.iov_len = req->rw.len;
311ae9e1
PB
3596 }
3597
32960613
JA
3598 if (rw == READ) {
3599 nr = file->f_op->read(file, iovec.iov_base,
af9c45ec 3600 iovec.iov_len, ppos);
32960613
JA
3601 } else {
3602 nr = file->f_op->write(file, iovec.iov_base,
af9c45ec 3603 iovec.iov_len, ppos);
32960613
JA
3604 }
3605
3606 if (nr < 0) {
3607 if (!ret)
3608 ret = nr;
3609 break;
3610 }
5e929367 3611 ret += nr;
16c8d2df
JA
3612 if (!iov_iter_is_bvec(iter)) {
3613 iov_iter_advance(iter, nr);
3614 } else {
16c8d2df 3615 req->rw.addr += nr;
5e929367
JA
3616 req->rw.len -= nr;
3617 if (!req->rw.len)
3618 break;
16c8d2df 3619 }
32960613
JA
3620 if (nr != iovec.iov_len)
3621 break;
32960613
JA
3622 }
3623
3624 return ret;
3625}
3626
ff6165b2
JA
3627static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3628 const struct iovec *fast_iov, struct iov_iter *iter)
f67676d1 3629{
e8c2bc1f 3630 struct io_async_rw *rw = req->async_data;
b64e3444 3631
538941e2 3632 memcpy(&rw->s.iter, iter, sizeof(*iter));
afb87658 3633 rw->free_iovec = iovec;
227c0c96 3634 rw->bytes_done = 0;
ff6165b2 3635 /* can only be fixed buffers, no need to do anything */
9c3a205c 3636 if (iov_iter_is_bvec(iter))
ff6165b2 3637 return;
b64e3444 3638 if (!iovec) {
ff6165b2
JA
3639 unsigned iov_off = 0;
3640
538941e2 3641 rw->s.iter.iov = rw->s.fast_iov;
ff6165b2
JA
3642 if (iter->iov != fast_iov) {
3643 iov_off = iter->iov - fast_iov;
538941e2 3644 rw->s.iter.iov += iov_off;
ff6165b2 3645 }
538941e2
PB
3646 if (rw->s.fast_iov != fast_iov)
3647 memcpy(rw->s.fast_iov + iov_off, fast_iov + iov_off,
45097dae 3648 sizeof(struct iovec) * iter->nr_segs);
99bc4c38
PB
3649 } else {
3650 req->flags |= REQ_F_NEED_CLEANUP;
f67676d1
JA
3651 }
3652}
3653
8d4af685 3654static inline bool io_alloc_async_data(struct io_kiocb *req)
3d9932a8 3655{
e8c2bc1f
JA
3656 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3657 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
d886e185
PB
3658 if (req->async_data) {
3659 req->flags |= REQ_F_ASYNC_DATA;
3660 return false;
3661 }
3662 return true;
3d9932a8
XW
3663}
3664
ff6165b2 3665static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
c88598a9 3666 struct io_rw_state *s, bool force)
b7bb4f7d 3667{
26f0505a 3668 if (!force && !io_op_defs[req->opcode].needs_async_setup)
74566df3 3669 return 0;
d886e185 3670 if (!req_has_async_data(req)) {
cd658695
JA
3671 struct io_async_rw *iorw;
3672
6cb78689 3673 if (io_alloc_async_data(req)) {
6bf985dc 3674 kfree(iovec);
5d204bcf 3675 return -ENOMEM;
6bf985dc 3676 }
b7bb4f7d 3677
c88598a9 3678 io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
cd658695
JA
3679 iorw = req->async_data;
3680 /* we've copied and mapped the iter, ensure state is saved */
538941e2 3681 iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
5d204bcf 3682 }
b7bb4f7d 3683 return 0;
f67676d1
JA
3684}
3685
73debe68 3686static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
c3e330a4 3687{
e8c2bc1f 3688 struct io_async_rw *iorw = req->async_data;
5e49c973 3689 struct iovec *iov;
847595de 3690 int ret;
c3e330a4 3691
51aac424 3692 /* submission path, ->uring_lock should already be taken */
3b44b371 3693 ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
c3e330a4
PB
3694 if (unlikely(ret < 0))
3695 return ret;
3696
ab0b196c
PB
3697 iorw->bytes_done = 0;
3698 iorw->free_iovec = iov;
3699 if (iov)
3700 req->flags |= REQ_F_NEED_CLEANUP;
c3e330a4
PB
3701 return 0;
3702}
3703
c1dd91d1 3704/*
ffdc8dab 3705 * This is our waitqueue callback handler, registered through __folio_lock_async()
c1dd91d1
JA
3706 * when we initially tried to do the IO with the iocb armed our waitqueue.
3707 * This gets called when the page is unlocked, and we generally expect that to
3708 * happen when the page IO is completed and the page is now uptodate. This will
3709 * queue a task_work based retry of the operation, attempting to copy the data
3710 * again. If the latter fails because the page was NOT uptodate, then we will
3711 * do a thread based blocking retry of the operation. That's the unexpected
3712 * slow path.
3713 */
bcf5a063
JA
3714static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3715 int sync, void *arg)
3716{
3717 struct wait_page_queue *wpq;
3718 struct io_kiocb *req = wait->private;
bcf5a063 3719 struct wait_page_key *key = arg;
bcf5a063
JA
3720
3721 wpq = container_of(wait, struct wait_page_queue, wait);
3722
cdc8fcb4
LT
3723 if (!wake_page_match(wpq, key))
3724 return 0;
3725
c8d317aa 3726 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
bcf5a063 3727 list_del_init(&wait->entry);
921b9054 3728 io_req_task_queue(req);
bcf5a063
JA
3729 return 1;
3730}
3731
c1dd91d1
JA
3732/*
3733 * This controls whether a given IO request should be armed for async page
3734 * based retry. If we return false here, the request is handed to the async
3735 * worker threads for retry. If we're doing buffered reads on a regular file,
3736 * we prepare a private wait_page_queue entry and retry the operation. This
3737 * will either succeed because the page is now uptodate and unlocked, or it
3738 * will register a callback when the page is unlocked at IO completion. Through
3739 * that callback, io_uring uses task_work to setup a retry of the operation.
3740 * That retry will attempt the buffered read again. The retry will generally
3741 * succeed, or in rare cases where it fails, we then fall back to using the
3742 * async worker threads for a blocking retry.
3743 */
227c0c96 3744static bool io_rw_should_retry(struct io_kiocb *req)
f67676d1 3745{
e8c2bc1f
JA
3746 struct io_async_rw *rw = req->async_data;
3747 struct wait_page_queue *wait = &rw->wpq;
bcf5a063 3748 struct kiocb *kiocb = &req->rw.kiocb;
f67676d1 3749
bcf5a063
JA
3750 /* never retry for NOWAIT, we just complete with -EAGAIN */
3751 if (req->flags & REQ_F_NOWAIT)
3752 return false;
f67676d1 3753
227c0c96 3754 /* Only for buffered IO */
3b2a4439 3755 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
bcf5a063 3756 return false;
3b2a4439 3757
bcf5a063
JA
3758 /*
3759 * just use poll if we can, and don't attempt if the fs doesn't
3760 * support callback based unlocks
3761 */
3762 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3763 return false;
f67676d1 3764
3b2a4439
JA
3765 wait->wait.func = io_async_buf_func;
3766 wait->wait.private = req;
3767 wait->wait.flags = 0;
3768 INIT_LIST_HEAD(&wait->wait.entry);
3769 kiocb->ki_flags |= IOCB_WAITQ;
c8d317aa 3770 kiocb->ki_flags &= ~IOCB_NOWAIT;
3b2a4439 3771 kiocb->ki_waitq = wait;
3b2a4439 3772 return true;
bcf5a063
JA
3773}
3774
aeab9506 3775static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
bcf5a063 3776{
607b6fb8 3777 if (likely(req->file->f_op->read_iter))
bcf5a063 3778 return call_read_iter(req->file, &req->rw.kiocb, iter);
2dd2111d 3779 else if (req->file->f_op->read)
4017eb91 3780 return loop_rw_iter(READ, req, iter);
2dd2111d
GH
3781 else
3782 return -EINVAL;
f67676d1
JA
3783}
3784
7db30437
ML
3785static bool need_read_all(struct io_kiocb *req)
3786{
3787 return req->flags & REQ_F_ISREG ||
3788 S_ISBLK(file_inode(req->file)->i_mode);
3789}
3790
584b0180
JA
3791static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
3792{
3793 struct kiocb *kiocb = &req->rw.kiocb;
3794 struct io_ring_ctx *ctx = req->ctx;
3795 struct file *file = req->file;
3796 int ret;
3797
3798 if (unlikely(!file || !(file->f_mode & mode)))
3799 return -EBADF;
3800
3801 if (!io_req_ffs_set(req))
3802 req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
3803
3804 kiocb->ki_flags = iocb_flags(file);
3805 ret = kiocb_set_rw_flags(kiocb, req->rw.flags);
3806 if (unlikely(ret))
3807 return ret;
3808
3809 /*
3810 * If the file is marked O_NONBLOCK, still allow retry for it if it
3811 * supports async. Otherwise it's impossible to use O_NONBLOCK files
3812 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
3813 */
3814 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
3815 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
3816 req->flags |= REQ_F_NOWAIT;
3817
3818 if (ctx->flags & IORING_SETUP_IOPOLL) {
3819 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
3820 return -EOPNOTSUPP;
3821
3822 kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
3823 kiocb->ki_complete = io_complete_rw_iopoll;
3824 req->iopoll_completed = 0;
3825 } else {
3826 if (kiocb->ki_flags & IOCB_HIPRI)
3827 return -EINVAL;
3828 kiocb->ki_complete = io_complete_rw;
3829 }
3830
3831 return 0;
3832}
3833
889fca73 3834static int io_read(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1 3835{
607b6fb8 3836 struct io_rw_state __s, *s = &__s;
c88598a9 3837 struct iovec *iovec;
9adbd45d 3838 struct kiocb *kiocb = &req->rw.kiocb;
45d189c6 3839 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
d886e185 3840 struct io_async_rw *rw;
cd658695 3841 ssize_t ret, ret2;
b4aec400 3842 loff_t *ppos;
ff6165b2 3843
607b6fb8
PB
3844 if (!req_has_async_data(req)) {
3845 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
3846 if (unlikely(ret < 0))
3847 return ret;
3848 } else {
2be2eb02
JA
3849 /*
3850 * Safe and required to re-import if we're using provided
3851 * buffers, as we dropped the selected one before retry.
3852 */
3853 if (req->flags & REQ_F_BUFFER_SELECT) {
3854 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
3855 if (unlikely(ret < 0))
3856 return ret;
3857 }
3858
d886e185 3859 rw = req->async_data;
c88598a9 3860 s = &rw->s;
cd658695
JA
3861 /*
3862 * We come here from an earlier attempt, restore our state to
3863 * match in case it doesn't. It's cheap enough that we don't
3864 * need to make this conditional.
3865 */
c88598a9 3866 iov_iter_restore(&s->iter, &s->iter_state);
2846c481 3867 iovec = NULL;
2846c481 3868 }
584b0180 3869 ret = io_rw_init_file(req, FMODE_READ);
323b190b
JA
3870 if (unlikely(ret)) {
3871 kfree(iovec);
584b0180 3872 return ret;
323b190b 3873 }
cef216fc 3874 req->cqe.res = iov_iter_count(&s->iter);
2b188cc1 3875
607b6fb8
PB
3876 if (force_nonblock) {
3877 /* If the file doesn't support async, just async punt */
35645ac3 3878 if (unlikely(!io_file_supports_nowait(req))) {
607b6fb8
PB
3879 ret = io_setup_async_rw(req, iovec, s, true);
3880 return ret ?: -EAGAIN;
3881 }
a88fc400 3882 kiocb->ki_flags |= IOCB_NOWAIT;
607b6fb8
PB
3883 } else {
3884 /* Ensure we clear previously set non-block flag */
3885 kiocb->ki_flags &= ~IOCB_NOWAIT;
6713e7a6 3886 }
9e645e11 3887
b4aec400 3888 ppos = io_kiocb_update_pos(req);
d34e1e5b 3889
cef216fc 3890 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
5ea5dd45
PB
3891 if (unlikely(ret)) {
3892 kfree(iovec);
3893 return ret;
3894 }
2b188cc1 3895
c88598a9 3896 ret = io_iter_do_read(req, &s->iter);
32960613 3897
230d50d4 3898 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
6ad7f233 3899 req->flags &= ~REQ_F_REISSUE;
9af177ee
JA
3900 /* if we can poll, just do that */
3901 if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
3902 return -EAGAIN;
eefdf30f
JA
3903 /* IOPOLL retry should happen for io-wq threads */
3904 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
f91daf56 3905 goto done;
75c668cd
PB
3906 /* no retry on NONBLOCK nor RWF_NOWAIT */
3907 if (req->flags & REQ_F_NOWAIT)
355afaeb 3908 goto done;
f38c7e3a 3909 ret = 0;
230d50d4
JA
3910 } else if (ret == -EIOCBQUEUED) {
3911 goto out_free;
cef216fc 3912 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
7db30437 3913 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
7335e3bf 3914 /* read all, failed, already did sync or don't want to retry */
00d23d51 3915 goto done;
227c0c96
JA
3916 }
3917
cd658695
JA
3918 /*
3919 * Don't depend on the iter state matching what was consumed, or being
3920 * untouched in case of error. Restore it and we'll advance it
3921 * manually if we need to.
3922 */
c88598a9 3923 iov_iter_restore(&s->iter, &s->iter_state);
cd658695 3924
c88598a9 3925 ret2 = io_setup_async_rw(req, iovec, s, true);
6bf985dc
PB
3926 if (ret2)
3927 return ret2;
3928
fe1cdd55 3929 iovec = NULL;
e8c2bc1f 3930 rw = req->async_data;
c88598a9 3931 s = &rw->s;
cd658695
JA
3932 /*
3933 * Now use our persistent iterator and state, if we aren't already.
3934 * We've restored and mapped the iter to match.
3935 */
227c0c96 3936
b23df91b 3937 do {
cd658695
JA
3938 /*
3939 * We end up here because of a partial read, either from
3940 * above or inside this loop. Advance the iter by the bytes
3941 * that were consumed.
3942 */
c88598a9
PB
3943 iov_iter_advance(&s->iter, ret);
3944 if (!iov_iter_count(&s->iter))
cd658695 3945 break;
b23df91b 3946 rw->bytes_done += ret;
c88598a9 3947 iov_iter_save_state(&s->iter, &s->iter_state);
cd658695 3948
b23df91b
PB
3949 /* if we can retry, do so with the callbacks armed */
3950 if (!io_rw_should_retry(req)) {
3951 kiocb->ki_flags &= ~IOCB_WAITQ;
3952 return -EAGAIN;
3953 }
3954
3955 /*
3956 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3957 * we get -EIOCBQUEUED, then we'll get a notification when the
3958 * desired page gets unlocked. We can also get a partial read
3959 * here, and if we do, then just retry at the new offset.
3960 */
c88598a9 3961 ret = io_iter_do_read(req, &s->iter);
b23df91b
PB
3962 if (ret == -EIOCBQUEUED)
3963 return 0;
227c0c96 3964 /* we got some bytes, but not all. retry. */
b5b0ecb7 3965 kiocb->ki_flags &= ~IOCB_WAITQ;
c88598a9 3966 iov_iter_restore(&s->iter, &s->iter_state);
cd658695 3967 } while (ret > 0);
227c0c96 3968done:
2ea537ca 3969 kiocb_done(req, ret, issue_flags);
fe1cdd55
PB
3970out_free:
3971 /* it's faster to check here then delegate to kfree */
3972 if (iovec)
3973 kfree(iovec);
5ea5dd45 3974 return 0;
2b188cc1
JA
3975}
3976
889fca73 3977static int io_write(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1 3978{
607b6fb8 3979 struct io_rw_state __s, *s = &__s;
c88598a9 3980 struct iovec *iovec;
9adbd45d 3981 struct kiocb *kiocb = &req->rw.kiocb;
45d189c6 3982 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
cd658695 3983 ssize_t ret, ret2;
b4aec400 3984 loff_t *ppos;
2b188cc1 3985
607b6fb8 3986 if (!req_has_async_data(req)) {
5e49c973
PB
3987 ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
3988 if (unlikely(ret < 0))
2846c481 3989 return ret;
607b6fb8
PB
3990 } else {
3991 struct io_async_rw *rw = req->async_data;
3992
3993 s = &rw->s;
3994 iov_iter_restore(&s->iter, &s->iter_state);
2846c481 3995 iovec = NULL;
2846c481 3996 }
584b0180 3997 ret = io_rw_init_file(req, FMODE_WRITE);
323b190b
JA
3998 if (unlikely(ret)) {
3999 kfree(iovec);
584b0180 4000 return ret;
323b190b 4001 }
cef216fc 4002 req->cqe.res = iov_iter_count(&s->iter);
2b188cc1 4003
607b6fb8
PB
4004 if (force_nonblock) {
4005 /* If the file doesn't support async, just async punt */
35645ac3 4006 if (unlikely(!io_file_supports_nowait(req)))
607b6fb8 4007 goto copy_iov;
fd6c2e4c 4008
607b6fb8
PB
4009 /* file path doesn't support NOWAIT for non-direct_IO */
4010 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
4011 (req->flags & REQ_F_ISREG))
4012 goto copy_iov;
31b51510 4013
607b6fb8
PB
4014 kiocb->ki_flags |= IOCB_NOWAIT;
4015 } else {
4016 /* Ensure we clear previously set non-block flag */
4017 kiocb->ki_flags &= ~IOCB_NOWAIT;
4018 }
31b51510 4019
b4aec400 4020 ppos = io_kiocb_update_pos(req);
d34e1e5b 4021
cef216fc 4022 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
fa15bafb
PB
4023 if (unlikely(ret))
4024 goto out_free;
4ed734b0 4025
fa15bafb
PB
4026 /*
4027 * Open-code file_start_write here to grab freeze protection,
4028 * which will be released by another thread in
4029 * io_complete_rw(). Fool lockdep by telling it the lock got
4030 * released so that it doesn't complain about the held lock when
4031 * we return to userspace.
4032 */
4033 if (req->flags & REQ_F_ISREG) {
8a3c84b6 4034 sb_start_write(file_inode(req->file)->i_sb);
fa15bafb
PB
4035 __sb_writers_release(file_inode(req->file)->i_sb,
4036 SB_FREEZE_WRITE);
4037 }
4038 kiocb->ki_flags |= IOCB_WRITE;
4ed734b0 4039
35645ac3 4040 if (likely(req->file->f_op->write_iter))
c88598a9 4041 ret2 = call_write_iter(req->file, kiocb, &s->iter);
2dd2111d 4042 else if (req->file->f_op->write)
c88598a9 4043 ret2 = loop_rw_iter(WRITE, req, &s->iter);
2dd2111d
GH
4044 else
4045 ret2 = -EINVAL;
4ed734b0 4046
6ad7f233
PB
4047 if (req->flags & REQ_F_REISSUE) {
4048 req->flags &= ~REQ_F_REISSUE;
230d50d4 4049 ret2 = -EAGAIN;
6ad7f233 4050 }
230d50d4 4051
fa15bafb
PB
4052 /*
4053 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
4054 * retry them without IOCB_NOWAIT.
4055 */
4056 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
4057 ret2 = -EAGAIN;
75c668cd
PB
4058 /* no retry on NONBLOCK nor RWF_NOWAIT */
4059 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
355afaeb 4060 goto done;
fa15bafb 4061 if (!force_nonblock || ret2 != -EAGAIN) {
eefdf30f 4062 /* IOPOLL retry should happen for io-wq threads */
b10841c9 4063 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
eefdf30f 4064 goto copy_iov;
355afaeb 4065done:
2ea537ca 4066 kiocb_done(req, ret2, issue_flags);
fa15bafb 4067 } else {
f67676d1 4068copy_iov:
c88598a9
PB
4069 iov_iter_restore(&s->iter, &s->iter_state);
4070 ret = io_setup_async_rw(req, iovec, s, false);
6bf985dc 4071 return ret ?: -EAGAIN;
2b188cc1 4072 }
31b51510 4073out_free:
f261c168 4074 /* it's reportedly faster than delegating the null check to kfree() */
252917c3 4075 if (iovec)
6f2cc166 4076 kfree(iovec);
2b188cc1
JA
4077 return ret;
4078}
4079
80a261fd
JA
4080static int io_renameat_prep(struct io_kiocb *req,
4081 const struct io_uring_sqe *sqe)
4082{
4083 struct io_rename *ren = &req->rename;
4084 const char __user *oldf, *newf;
4085
ed7eb259
JA
4086 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4087 return -EINVAL;
26578cda 4088 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
ed7eb259 4089 return -EINVAL;
80a261fd
JA
4090 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4091 return -EBADF;
4092
4093 ren->old_dfd = READ_ONCE(sqe->fd);
4094 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
4095 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4096 ren->new_dfd = READ_ONCE(sqe->len);
4097 ren->flags = READ_ONCE(sqe->rename_flags);
4098
4099 ren->oldpath = getname(oldf);
4100 if (IS_ERR(ren->oldpath))
4101 return PTR_ERR(ren->oldpath);
4102
4103 ren->newpath = getname(newf);
4104 if (IS_ERR(ren->newpath)) {
4105 putname(ren->oldpath);
4106 return PTR_ERR(ren->newpath);
4107 }
4108
4109 req->flags |= REQ_F_NEED_CLEANUP;
4110 return 0;
4111}
4112
45d189c6 4113static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
80a261fd
JA
4114{
4115 struct io_rename *ren = &req->rename;
4116 int ret;
4117
45d189c6 4118 if (issue_flags & IO_URING_F_NONBLOCK)
80a261fd
JA
4119 return -EAGAIN;
4120
4121 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
4122 ren->newpath, ren->flags);
4123
4124 req->flags &= ~REQ_F_NEED_CLEANUP;
4125 if (ret < 0)
93d2bcd2 4126 req_set_fail(req);
80a261fd
JA
4127 io_req_complete(req, ret);
4128 return 0;
4129}
4130
14a1143b
JA
4131static int io_unlinkat_prep(struct io_kiocb *req,
4132 const struct io_uring_sqe *sqe)
4133{
4134 struct io_unlink *un = &req->unlink;
4135 const char __user *fname;
4136
22634bc5
JA
4137 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4138 return -EINVAL;
26578cda
PB
4139 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
4140 sqe->splice_fd_in)
22634bc5 4141 return -EINVAL;
14a1143b
JA
4142 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4143 return -EBADF;
4144
4145 un->dfd = READ_ONCE(sqe->fd);
4146
4147 un->flags = READ_ONCE(sqe->unlink_flags);
4148 if (un->flags & ~AT_REMOVEDIR)
4149 return -EINVAL;
4150
4151 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
4152 un->filename = getname(fname);
4153 if (IS_ERR(un->filename))
4154 return PTR_ERR(un->filename);
4155
4156 req->flags |= REQ_F_NEED_CLEANUP;
4157 return 0;
4158}
4159
45d189c6 4160static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
14a1143b
JA
4161{
4162 struct io_unlink *un = &req->unlink;
4163 int ret;
4164
45d189c6 4165 if (issue_flags & IO_URING_F_NONBLOCK)
14a1143b
JA
4166 return -EAGAIN;
4167
4168 if (un->flags & AT_REMOVEDIR)
4169 ret = do_rmdir(un->dfd, un->filename);
4170 else
4171 ret = do_unlinkat(un->dfd, un->filename);
4172
4173 req->flags &= ~REQ_F_NEED_CLEANUP;
4174 if (ret < 0)
93d2bcd2 4175 req_set_fail(req);
14a1143b
JA
4176 io_req_complete(req, ret);
4177 return 0;
4178}
4179
e34a02dc
DK
4180static int io_mkdirat_prep(struct io_kiocb *req,
4181 const struct io_uring_sqe *sqe)
4182{
4183 struct io_mkdir *mkd = &req->mkdir;
4184 const char __user *fname;
4185
4186 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4187 return -EINVAL;
4188 if (sqe->ioprio || sqe->off || sqe->rw_flags || sqe->buf_index ||
4189 sqe->splice_fd_in)
4190 return -EINVAL;
4191 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4192 return -EBADF;
4193
4194 mkd->dfd = READ_ONCE(sqe->fd);
4195 mkd->mode = READ_ONCE(sqe->len);
4196
4197 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
4198 mkd->filename = getname(fname);
4199 if (IS_ERR(mkd->filename))
4200 return PTR_ERR(mkd->filename);
4201
4202 req->flags |= REQ_F_NEED_CLEANUP;
4203 return 0;
4204}
4205
04f34081 4206static int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags)
e34a02dc
DK
4207{
4208 struct io_mkdir *mkd = &req->mkdir;
4209 int ret;
4210
4211 if (issue_flags & IO_URING_F_NONBLOCK)
4212 return -EAGAIN;
4213
4214 ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
4215
4216 req->flags &= ~REQ_F_NEED_CLEANUP;
4217 if (ret < 0)
4218 req_set_fail(req);
4219 io_req_complete(req, ret);
4220 return 0;
4221}
4222
7a8721f8
DK
4223static int io_symlinkat_prep(struct io_kiocb *req,
4224 const struct io_uring_sqe *sqe)
4225{
4226 struct io_symlink *sl = &req->symlink;
4227 const char __user *oldpath, *newpath;
4228
4229 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4230 return -EINVAL;
4231 if (sqe->ioprio || sqe->len || sqe->rw_flags || sqe->buf_index ||
4232 sqe->splice_fd_in)
4233 return -EINVAL;
4234 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4235 return -EBADF;
4236
4237 sl->new_dfd = READ_ONCE(sqe->fd);
4238 oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
4239 newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4240
4241 sl->oldpath = getname(oldpath);
4242 if (IS_ERR(sl->oldpath))
4243 return PTR_ERR(sl->oldpath);
4244
4245 sl->newpath = getname(newpath);
4246 if (IS_ERR(sl->newpath)) {
4247 putname(sl->oldpath);
4248 return PTR_ERR(sl->newpath);
4249 }
4250
4251 req->flags |= REQ_F_NEED_CLEANUP;
4252 return 0;
4253}
4254
04f34081 4255static int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags)
7a8721f8
DK
4256{
4257 struct io_symlink *sl = &req->symlink;
4258 int ret;
4259
4260 if (issue_flags & IO_URING_F_NONBLOCK)
4261 return -EAGAIN;
4262
4263 ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
4264
4265 req->flags &= ~REQ_F_NEED_CLEANUP;
4266 if (ret < 0)
4267 req_set_fail(req);
4268 io_req_complete(req, ret);
4269 return 0;
4270}
4271
cf30da90
DK
4272static int io_linkat_prep(struct io_kiocb *req,
4273 const struct io_uring_sqe *sqe)
4274{
4275 struct io_hardlink *lnk = &req->hardlink;
4276 const char __user *oldf, *newf;
4277
4278 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4279 return -EINVAL;
4280 if (sqe->ioprio || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
4281 return -EINVAL;
4282 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4283 return -EBADF;
4284
4285 lnk->old_dfd = READ_ONCE(sqe->fd);
4286 lnk->new_dfd = READ_ONCE(sqe->len);
4287 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
4288 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4289 lnk->flags = READ_ONCE(sqe->hardlink_flags);
4290
4291 lnk->oldpath = getname(oldf);
4292 if (IS_ERR(lnk->oldpath))
4293 return PTR_ERR(lnk->oldpath);
4294
4295 lnk->newpath = getname(newf);
4296 if (IS_ERR(lnk->newpath)) {
4297 putname(lnk->oldpath);
4298 return PTR_ERR(lnk->newpath);
4299 }
4300
4301 req->flags |= REQ_F_NEED_CLEANUP;
4302 return 0;
4303}
4304
04f34081 4305static int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
cf30da90
DK
4306{
4307 struct io_hardlink *lnk = &req->hardlink;
4308 int ret;
4309
4310 if (issue_flags & IO_URING_F_NONBLOCK)
4311 return -EAGAIN;
4312
4313 ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
4314 lnk->newpath, lnk->flags);
4315
4316 req->flags &= ~REQ_F_NEED_CLEANUP;
4317 if (ret < 0)
4318 req_set_fail(req);
4319 io_req_complete(req, ret);
4320 return 0;
4321}
4322
36f4fa68
JA
4323static int io_shutdown_prep(struct io_kiocb *req,
4324 const struct io_uring_sqe *sqe)
4325{
4326#if defined(CONFIG_NET)
4327 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4328 return -EINVAL;
26578cda
PB
4329 if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
4330 sqe->buf_index || sqe->splice_fd_in))
36f4fa68
JA
4331 return -EINVAL;
4332
4333 req->shutdown.how = READ_ONCE(sqe->len);
4334 return 0;
4335#else
4336 return -EOPNOTSUPP;
4337#endif
4338}
4339
45d189c6 4340static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
36f4fa68
JA
4341{
4342#if defined(CONFIG_NET)
4343 struct socket *sock;
4344 int ret;
4345
45d189c6 4346 if (issue_flags & IO_URING_F_NONBLOCK)
36f4fa68
JA
4347 return -EAGAIN;
4348
48aba79b 4349 sock = sock_from_file(req->file);
36f4fa68 4350 if (unlikely(!sock))
48aba79b 4351 return -ENOTSOCK;
36f4fa68
JA
4352
4353 ret = __sys_shutdown_sock(sock, req->shutdown.how);
a146468d 4354 if (ret < 0)
93d2bcd2 4355 req_set_fail(req);
36f4fa68
JA
4356 io_req_complete(req, ret);
4357 return 0;
4358#else
4359 return -EOPNOTSUPP;
4360#endif
4361}
4362
f2a8d5c7
PB
4363static int __io_splice_prep(struct io_kiocb *req,
4364 const struct io_uring_sqe *sqe)
7d67af2c 4365{
fe7e3257 4366 struct io_splice *sp = &req->splice;
7d67af2c 4367 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
7d67af2c 4368
3232dd02
PB
4369 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4370 return -EINVAL;
7d67af2c 4371
7d67af2c
PB
4372 sp->len = READ_ONCE(sqe->len);
4373 sp->flags = READ_ONCE(sqe->splice_flags);
7d67af2c
PB
4374 if (unlikely(sp->flags & ~valid_flags))
4375 return -EINVAL;
a3e4bc23 4376 sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
7d67af2c
PB
4377 return 0;
4378}
4379
f2a8d5c7
PB
4380static int io_tee_prep(struct io_kiocb *req,
4381 const struct io_uring_sqe *sqe)
4382{
4383 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
4384 return -EINVAL;
4385 return __io_splice_prep(req, sqe);
4386}
4387
45d189c6 4388static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
f2a8d5c7
PB
4389{
4390 struct io_splice *sp = &req->splice;
f2a8d5c7
PB
4391 struct file *out = sp->file_out;
4392 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
a3e4bc23 4393 struct file *in;
f2a8d5c7
PB
4394 long ret = 0;
4395
45d189c6 4396 if (issue_flags & IO_URING_F_NONBLOCK)
f2a8d5c7 4397 return -EAGAIN;
a3e4bc23 4398
5106dd6e 4399 if (sp->flags & SPLICE_F_FD_IN_FIXED)
e9419766 4400 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
5106dd6e
JA
4401 else
4402 in = io_file_get_normal(req, sp->splice_fd_in);
a3e4bc23
JA
4403 if (!in) {
4404 ret = -EBADF;
4405 goto done;
4406 }
4407
f2a8d5c7
PB
4408 if (sp->len)
4409 ret = do_tee(in, out, sp->len, flags);
4410
e1d767f0
PB
4411 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
4412 io_put_file(in);
a3e4bc23 4413done:
f2a8d5c7 4414 if (ret != sp->len)
93d2bcd2 4415 req_set_fail(req);
e1e16097 4416 io_req_complete(req, ret);
f2a8d5c7
PB
4417 return 0;
4418}
4419
4420static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4421{
fe7e3257 4422 struct io_splice *sp = &req->splice;
f2a8d5c7
PB
4423
4424 sp->off_in = READ_ONCE(sqe->splice_off_in);
4425 sp->off_out = READ_ONCE(sqe->off);
4426 return __io_splice_prep(req, sqe);
4427}
4428
45d189c6 4429static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
7d67af2c
PB
4430{
4431 struct io_splice *sp = &req->splice;
7d67af2c
PB
4432 struct file *out = sp->file_out;
4433 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
4434 loff_t *poff_in, *poff_out;
a3e4bc23 4435 struct file *in;
c9687426 4436 long ret = 0;
7d67af2c 4437
45d189c6 4438 if (issue_flags & IO_URING_F_NONBLOCK)
2fb3e822 4439 return -EAGAIN;
7d67af2c 4440
5106dd6e 4441 if (sp->flags & SPLICE_F_FD_IN_FIXED)
e9419766 4442 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
5106dd6e
JA
4443 else
4444 in = io_file_get_normal(req, sp->splice_fd_in);
a3e4bc23
JA
4445 if (!in) {
4446 ret = -EBADF;
4447 goto done;
4448 }
4449
7d67af2c
PB
4450 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
4451 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
c9687426 4452
948a7749 4453 if (sp->len)
c9687426 4454 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
7d67af2c 4455
e1d767f0
PB
4456 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
4457 io_put_file(in);
a3e4bc23 4458done:
7d67af2c 4459 if (ret != sp->len)
93d2bcd2 4460 req_set_fail(req);
e1e16097 4461 io_req_complete(req, ret);
7d67af2c
PB
4462 return 0;
4463}
4464
2b188cc1
JA
4465/*
4466 * IORING_OP_NOP just posts a completion event, nothing else.
4467 */
889fca73 4468static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1
JA
4469{
4470 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 4471
def596e9
JA
4472 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4473 return -EINVAL;
4474
889fca73 4475 __io_req_complete(req, issue_flags, 0, 0);
2b188cc1
JA
4476 return 0;
4477}
4478
4f57f06c
JA
4479static int io_msg_ring_prep(struct io_kiocb *req,
4480 const struct io_uring_sqe *sqe)
4481{
f3b6a41e
JA
4482 if (unlikely(sqe->addr || sqe->ioprio || sqe->rw_flags ||
4483 sqe->splice_fd_in || sqe->buf_index || sqe->personality))
4f57f06c
JA
4484 return -EINVAL;
4485
4f57f06c
JA
4486 req->msg.user_data = READ_ONCE(sqe->off);
4487 req->msg.len = READ_ONCE(sqe->len);
4488 return 0;
4489}
4490
4491static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
4492{
4493 struct io_ring_ctx *target_ctx;
4494 struct io_msg *msg = &req->msg;
4f57f06c 4495 bool filled;
3f1d52ab 4496 int ret;
4f57f06c 4497
3f1d52ab
JA
4498 ret = -EBADFD;
4499 if (req->file->f_op != &io_uring_fops)
4500 goto done;
4f57f06c 4501
3f1d52ab 4502 ret = -EOVERFLOW;
4f57f06c
JA
4503 target_ctx = req->file->private_data;
4504
4505 spin_lock(&target_ctx->completion_lock);
7ef66d18 4506 filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, 0);
4f57f06c
JA
4507 io_commit_cqring(target_ctx);
4508 spin_unlock(&target_ctx->completion_lock);
4509
4510 if (filled) {
4511 io_cqring_ev_posted(target_ctx);
4512 ret = 0;
4513 }
4514
3f1d52ab 4515done:
9666d420
JA
4516 if (ret < 0)
4517 req_set_fail(req);
4f57f06c
JA
4518 __io_req_complete(req, issue_flags, ret, 0);
4519 return 0;
4520}
4521
1155c76a 4522static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 4523{
6b06314c 4524 struct io_ring_ctx *ctx = req->ctx;
c992fe29 4525
6b06314c 4526 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 4527 return -EINVAL;
26578cda
PB
4528 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
4529 sqe->splice_fd_in))
c992fe29
CH
4530 return -EINVAL;
4531
8ed8d3c3
JA
4532 req->sync.flags = READ_ONCE(sqe->fsync_flags);
4533 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
4534 return -EINVAL;
4535
4536 req->sync.off = READ_ONCE(sqe->off);
4537 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
4538 return 0;
4539}
4540
45d189c6 4541static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3 4542{
8ed8d3c3 4543 loff_t end = req->sync.off + req->sync.len;
8ed8d3c3
JA
4544 int ret;
4545
ac45abc0 4546 /* fsync always requires a blocking context */
45d189c6 4547 if (issue_flags & IO_URING_F_NONBLOCK)
ac45abc0
PB
4548 return -EAGAIN;
4549
9adbd45d 4550 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
4551 end > 0 ? end : LLONG_MAX,
4552 req->sync.flags & IORING_FSYNC_DATASYNC);
4553 if (ret < 0)
93d2bcd2 4554 req_set_fail(req);
e1e16097 4555 io_req_complete(req, ret);
c992fe29
CH
4556 return 0;
4557}
4558
d63d1b5e
JA
4559static int io_fallocate_prep(struct io_kiocb *req,
4560 const struct io_uring_sqe *sqe)
4561{
26578cda
PB
4562 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
4563 sqe->splice_fd_in)
d63d1b5e 4564 return -EINVAL;
3232dd02
PB
4565 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4566 return -EINVAL;
d63d1b5e
JA
4567
4568 req->sync.off = READ_ONCE(sqe->off);
4569 req->sync.len = READ_ONCE(sqe->addr);
4570 req->sync.mode = READ_ONCE(sqe->len);
4571 return 0;
4572}
4573
45d189c6 4574static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
5d17b4a4 4575{
ac45abc0
PB
4576 int ret;
4577
d63d1b5e 4578 /* fallocate always requiring blocking context */
45d189c6 4579 if (issue_flags & IO_URING_F_NONBLOCK)
5d17b4a4 4580 return -EAGAIN;
ac45abc0
PB
4581 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
4582 req->sync.len);
ac45abc0 4583 if (ret < 0)
93d2bcd2 4584 req_set_fail(req);
f63cf519
JA
4585 else
4586 fsnotify_modify(req->file);
e1e16097 4587 io_req_complete(req, ret);
5d17b4a4
JA
4588 return 0;
4589}
4590
ec65fea5 4591static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 4592{
f8748881 4593 const char __user *fname;
15b71abe 4594 int ret;
b7bb4f7d 4595
d3fddf6d
PB
4596 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4597 return -EINVAL;
b9445598 4598 if (unlikely(sqe->ioprio || sqe->buf_index))
15b71abe 4599 return -EINVAL;
ec65fea5 4600 if (unlikely(req->flags & REQ_F_FIXED_FILE))
cf3040ca 4601 return -EBADF;
03b1230c 4602
ec65fea5
PB
4603 /* open.how should be already initialised */
4604 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
08a1d26e 4605 req->open.how.flags |= O_LARGEFILE;
3529d8c2 4606
25e72d10
PB
4607 req->open.dfd = READ_ONCE(sqe->fd);
4608 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
f8748881 4609 req->open.filename = getname(fname);
15b71abe
JA
4610 if (IS_ERR(req->open.filename)) {
4611 ret = PTR_ERR(req->open.filename);
4612 req->open.filename = NULL;
4613 return ret;
4614 }
b9445598
PB
4615
4616 req->open.file_slot = READ_ONCE(sqe->file_index);
4617 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC))
4618 return -EINVAL;
4619
4022e7af 4620 req->open.nofile = rlimit(RLIMIT_NOFILE);
8fef80bf 4621 req->flags |= REQ_F_NEED_CLEANUP;
15b71abe 4622 return 0;
03b1230c
JA
4623}
4624
ec65fea5
PB
4625static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4626{
d3fddf6d
PB
4627 u64 mode = READ_ONCE(sqe->len);
4628 u64 flags = READ_ONCE(sqe->open_flags);
ec65fea5 4629
ec65fea5
PB
4630 req->open.how = build_open_how(flags, mode);
4631 return __io_openat_prep(req, sqe);
4632}
4633
cebdb986 4634static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 4635{
cebdb986 4636 struct open_how __user *how;
cebdb986 4637 size_t len;
0fa03c62
JA
4638 int ret;
4639
cebdb986
JA
4640 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4641 len = READ_ONCE(sqe->len);
cebdb986
JA
4642 if (len < OPEN_HOW_SIZE_VER0)
4643 return -EINVAL;
3529d8c2 4644
cebdb986
JA
4645 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
4646 len);
4647 if (ret)
4648 return ret;
3529d8c2 4649
ec65fea5 4650 return __io_openat_prep(req, sqe);
cebdb986
JA
4651}
4652
45d189c6 4653static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
15b71abe
JA
4654{
4655 struct open_flags op;
15b71abe 4656 struct file *file;
b9445598
PB
4657 bool resolve_nonblock, nonblock_set;
4658 bool fixed = !!req->open.file_slot;
15b71abe
JA
4659 int ret;
4660
cebdb986 4661 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
4662 if (ret)
4663 goto err;
3a81fd02
JA
4664 nonblock_set = op.open_flag & O_NONBLOCK;
4665 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
45d189c6 4666 if (issue_flags & IO_URING_F_NONBLOCK) {
3a81fd02
JA
4667 /*
4668 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
4669 * it'll always -EAGAIN
4670 */
4671 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
4672 return -EAGAIN;
4673 op.lookup_flags |= LOOKUP_CACHED;
4674 op.open_flag |= O_NONBLOCK;
4675 }
15b71abe 4676
b9445598
PB
4677 if (!fixed) {
4678 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
4679 if (ret < 0)
4680 goto err;
4681 }
15b71abe
JA
4682
4683 file = do_filp_open(req->open.dfd, req->open.filename, &op);
12dcb58a 4684 if (IS_ERR(file)) {
944d1444 4685 /*
12dcb58a
PB
4686 * We could hang on to this 'fd' on retrying, but seems like
4687 * marginal gain for something that is now known to be a slower
4688 * path. So just put it, and we'll get a new one when we retry.
944d1444 4689 */
b9445598
PB
4690 if (!fixed)
4691 put_unused_fd(ret);
3a81fd02 4692
15b71abe 4693 ret = PTR_ERR(file);
12dcb58a
PB
4694 /* only retry if RESOLVE_CACHED wasn't already set by application */
4695 if (ret == -EAGAIN &&
4696 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
4697 return -EAGAIN;
4698 goto err;
15b71abe 4699 }
12dcb58a
PB
4700
4701 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
4702 file->f_flags &= ~O_NONBLOCK;
4703 fsnotify_open(file);
b9445598
PB
4704
4705 if (!fixed)
4706 fd_install(ret, file);
4707 else
4708 ret = io_install_fixed_file(req, file, issue_flags,
4709 req->open.file_slot - 1);
15b71abe
JA
4710err:
4711 putname(req->open.filename);
8fef80bf 4712 req->flags &= ~REQ_F_NEED_CLEANUP;
15b71abe 4713 if (ret < 0)
93d2bcd2 4714 req_set_fail(req);
0bdf3398 4715 __io_req_complete(req, issue_flags, ret, 0);
15b71abe
JA
4716 return 0;
4717}
4718
45d189c6 4719static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
cebdb986 4720{
e45cff58 4721 return io_openat2(req, issue_flags);
cebdb986
JA
4722}
4723
067524e9
JA
4724static int io_remove_buffers_prep(struct io_kiocb *req,
4725 const struct io_uring_sqe *sqe)
4726{
4727 struct io_provide_buf *p = &req->pbuf;
4728 u64 tmp;
4729
26578cda
PB
4730 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
4731 sqe->splice_fd_in)
067524e9
JA
4732 return -EINVAL;
4733
4734 tmp = READ_ONCE(sqe->fd);
4735 if (!tmp || tmp > USHRT_MAX)
4736 return -EINVAL;
4737
4738 memset(p, 0, sizeof(*p));
4739 p->nbufs = tmp;
4740 p->bgid = READ_ONCE(sqe->buf_group);
4741 return 0;
4742}
4743
dbc7d452
JA
4744static int __io_remove_buffers(struct io_ring_ctx *ctx,
4745 struct io_buffer_list *bl, unsigned nbufs)
067524e9
JA
4746{
4747 unsigned i = 0;
4748
4749 /* shouldn't happen */
4750 if (!nbufs)
4751 return 0;
4752
4753 /* the head kbuf is the list itself */
dbc7d452 4754 while (!list_empty(&bl->buf_list)) {
067524e9
JA
4755 struct io_buffer *nxt;
4756
dbc7d452 4757 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
067524e9 4758 list_del(&nxt->list);
067524e9
JA
4759 if (++i == nbufs)
4760 return i;
1d0254e6 4761 cond_resched();
067524e9
JA
4762 }
4763 i++;
067524e9
JA
4764
4765 return i;
4766}
4767
889fca73 4768static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
067524e9
JA
4769{
4770 struct io_provide_buf *p = &req->pbuf;
4771 struct io_ring_ctx *ctx = req->ctx;
dbc7d452 4772 struct io_buffer_list *bl;
067524e9
JA
4773 int ret = 0;
4774
f8929630 4775 io_ring_submit_lock(ctx, issue_flags);
067524e9
JA
4776
4777 ret = -ENOENT;
dbc7d452
JA
4778 bl = io_buffer_get_list(ctx, p->bgid);
4779 if (bl)
4780 ret = __io_remove_buffers(ctx, bl, p->nbufs);
067524e9 4781 if (ret < 0)
93d2bcd2 4782 req_set_fail(req);
067524e9 4783
9fb8cb49
PB
4784 /* complete before unlock, IOPOLL may need the lock */
4785 __io_req_complete(req, issue_flags, ret, 0);
f8929630 4786 io_ring_submit_unlock(ctx, issue_flags);
067524e9
JA
4787 return 0;
4788}
4789
ddf0322d
JA
4790static int io_provide_buffers_prep(struct io_kiocb *req,
4791 const struct io_uring_sqe *sqe)
4792{
38134ada 4793 unsigned long size, tmp_check;
ddf0322d
JA
4794 struct io_provide_buf *p = &req->pbuf;
4795 u64 tmp;
4796
26578cda 4797 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
ddf0322d
JA
4798 return -EINVAL;
4799
4800 tmp = READ_ONCE(sqe->fd);
4801 if (!tmp || tmp > USHRT_MAX)
4802 return -E2BIG;
4803 p->nbufs = tmp;
4804 p->addr = READ_ONCE(sqe->addr);
4805 p->len = READ_ONCE(sqe->len);
4806
38134ada
PB
4807 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
4808 &size))
4809 return -EOVERFLOW;
4810 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
4811 return -EOVERFLOW;
4812
d81269fe
PB
4813 size = (unsigned long)p->len * p->nbufs;
4814 if (!access_ok(u64_to_user_ptr(p->addr), size))
ddf0322d
JA
4815 return -EFAULT;
4816
4817 p->bgid = READ_ONCE(sqe->buf_group);
4818 tmp = READ_ONCE(sqe->off);
4819 if (tmp > USHRT_MAX)
4820 return -E2BIG;
4821 p->bid = tmp;
4822 return 0;
4823}
4824
cc3cec83
JA
4825static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
4826{
4827 struct io_buffer *buf;
4828 struct page *page;
4829 int bufs_in_page;
4830
4831 /*
4832 * Completions that don't happen inline (eg not under uring_lock) will
4833 * add to ->io_buffers_comp. If we don't have any free buffers, check
4834 * the completion list and splice those entries first.
4835 */
4836 if (!list_empty_careful(&ctx->io_buffers_comp)) {
4837 spin_lock(&ctx->completion_lock);
4838 if (!list_empty(&ctx->io_buffers_comp)) {
4839 list_splice_init(&ctx->io_buffers_comp,
4840 &ctx->io_buffers_cache);
4841 spin_unlock(&ctx->completion_lock);
4842 return 0;
4843 }
4844 spin_unlock(&ctx->completion_lock);
4845 }
4846
4847 /*
4848 * No free buffers and no completion entries either. Allocate a new
4849 * page worth of buffer entries and add those to our freelist.
4850 */
4851 page = alloc_page(GFP_KERNEL_ACCOUNT);
4852 if (!page)
4853 return -ENOMEM;
4854
4855 list_add(&page->lru, &ctx->io_buffers_pages);
4856
4857 buf = page_address(page);
4858 bufs_in_page = PAGE_SIZE / sizeof(*buf);
4859 while (bufs_in_page) {
4860 list_add_tail(&buf->list, &ctx->io_buffers_cache);
4861 buf++;
4862 bufs_in_page--;
4863 }
4864
4865 return 0;
4866}
4867
4868static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
dbc7d452 4869 struct io_buffer_list *bl)
ddf0322d
JA
4870{
4871 struct io_buffer *buf;
4872 u64 addr = pbuf->addr;
4873 int i, bid = pbuf->bid;
4874
4875 for (i = 0; i < pbuf->nbufs; i++) {
cc3cec83
JA
4876 if (list_empty(&ctx->io_buffers_cache) &&
4877 io_refill_buffer_cache(ctx))
ddf0322d 4878 break;
cc3cec83
JA
4879 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
4880 list);
dbc7d452 4881 list_move_tail(&buf->list, &bl->buf_list);
ddf0322d 4882 buf->addr = addr;
d1f82808 4883 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
ddf0322d 4884 buf->bid = bid;
b1c62645 4885 buf->bgid = pbuf->bgid;
ddf0322d
JA
4886 addr += pbuf->len;
4887 bid++;
f240762f 4888 cond_resched();
ddf0322d
JA
4889 }
4890
dbc7d452 4891 return i ? 0 : -ENOMEM;
ddf0322d
JA
4892}
4893
889fca73 4894static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
ddf0322d
JA
4895{
4896 struct io_provide_buf *p = &req->pbuf;
4897 struct io_ring_ctx *ctx = req->ctx;
dbc7d452 4898 struct io_buffer_list *bl;
ddf0322d 4899 int ret = 0;
ddf0322d 4900
f8929630 4901 io_ring_submit_lock(ctx, issue_flags);
ddf0322d 4902
dbc7d452
JA
4903 bl = io_buffer_get_list(ctx, p->bgid);
4904 if (unlikely(!bl)) {
4905 bl = kmalloc(sizeof(*bl), GFP_KERNEL);
4906 if (!bl) {
4907 ret = -ENOMEM;
4908 goto err;
4909 }
4910 io_buffer_add_list(ctx, bl, p->bgid);
ddf0322d 4911 }
dbc7d452
JA
4912
4913 ret = io_add_buffers(ctx, p, bl);
4914err:
ddf0322d 4915 if (ret < 0)
93d2bcd2 4916 req_set_fail(req);
9fb8cb49
PB
4917 /* complete before unlock, IOPOLL may need the lock */
4918 __io_req_complete(req, issue_flags, ret, 0);
f8929630 4919 io_ring_submit_unlock(ctx, issue_flags);
ddf0322d 4920 return 0;
cebdb986
JA
4921}
4922
3e4827b0
JA
4923static int io_epoll_ctl_prep(struct io_kiocb *req,
4924 const struct io_uring_sqe *sqe)
4925{
4926#if defined(CONFIG_EPOLL)
26578cda 4927 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
3e4827b0 4928 return -EINVAL;
2d74d042 4929 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3232dd02 4930 return -EINVAL;
3e4827b0
JA
4931
4932 req->epoll.epfd = READ_ONCE(sqe->fd);
4933 req->epoll.op = READ_ONCE(sqe->len);
4934 req->epoll.fd = READ_ONCE(sqe->off);
4935
4936 if (ep_op_has_event(req->epoll.op)) {
4937 struct epoll_event __user *ev;
4938
4939 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4940 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4941 return -EFAULT;
4942 }
4943
4944 return 0;
4945#else
4946 return -EOPNOTSUPP;
4947#endif
4948}
4949
889fca73 4950static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
3e4827b0
JA
4951{
4952#if defined(CONFIG_EPOLL)
4953 struct io_epoll *ie = &req->epoll;
4954 int ret;
45d189c6 4955 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3e4827b0
JA
4956
4957 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4958 if (force_nonblock && ret == -EAGAIN)
4959 return -EAGAIN;
4960
4961 if (ret < 0)
93d2bcd2 4962 req_set_fail(req);
889fca73 4963 __io_req_complete(req, issue_flags, ret, 0);
3e4827b0
JA
4964 return 0;
4965#else
4966 return -EOPNOTSUPP;
4967#endif
4968}
4969
c1ca757b
JA
4970static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4971{
4972#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
26578cda 4973 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
c1ca757b 4974 return -EINVAL;
3232dd02
PB
4975 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4976 return -EINVAL;
c1ca757b
JA
4977
4978 req->madvise.addr = READ_ONCE(sqe->addr);
4979 req->madvise.len = READ_ONCE(sqe->len);
4980 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4981 return 0;
4982#else
4983 return -EOPNOTSUPP;
4984#endif
4985}
4986
45d189c6 4987static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
c1ca757b
JA
4988{
4989#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4990 struct io_madvise *ma = &req->madvise;
4991 int ret;
4992
45d189c6 4993 if (issue_flags & IO_URING_F_NONBLOCK)
c1ca757b
JA
4994 return -EAGAIN;
4995
0726b01e 4996 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
c1ca757b 4997 if (ret < 0)
93d2bcd2 4998 req_set_fail(req);
e1e16097 4999 io_req_complete(req, ret);
c1ca757b
JA
5000 return 0;
5001#else
5002 return -EOPNOTSUPP;
5003#endif
5004}
5005
4840e418
JA
5006static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5007{
26578cda 5008 if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
4840e418 5009 return -EINVAL;
3232dd02
PB
5010 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5011 return -EINVAL;
4840e418
JA
5012
5013 req->fadvise.offset = READ_ONCE(sqe->off);
5014 req->fadvise.len = READ_ONCE(sqe->len);
5015 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
5016 return 0;
5017}
5018
45d189c6 5019static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
4840e418
JA
5020{
5021 struct io_fadvise *fa = &req->fadvise;
5022 int ret;
5023
45d189c6 5024 if (issue_flags & IO_URING_F_NONBLOCK) {
3e69426d
JA
5025 switch (fa->advice) {
5026 case POSIX_FADV_NORMAL:
5027 case POSIX_FADV_RANDOM:
5028 case POSIX_FADV_SEQUENTIAL:
5029 break;
5030 default:
5031 return -EAGAIN;
5032 }
5033 }
4840e418
JA
5034
5035 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
5036 if (ret < 0)
93d2bcd2 5037 req_set_fail(req);
0bdf3398 5038 __io_req_complete(req, issue_flags, ret, 0);
4840e418
JA
5039 return 0;
5040}
5041
eddc7ef5
JA
5042static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5043{
1b6fe6e0
SR
5044 const char __user *path;
5045
2d74d042 5046 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3232dd02 5047 return -EINVAL;
26578cda 5048 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
eddc7ef5 5049 return -EINVAL;
9c280f90 5050 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 5051 return -EBADF;
eddc7ef5 5052
1d9e1288
BM
5053 req->statx.dfd = READ_ONCE(sqe->fd);
5054 req->statx.mask = READ_ONCE(sqe->len);
1b6fe6e0 5055 path = u64_to_user_ptr(READ_ONCE(sqe->addr));
1d9e1288
BM
5056 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
5057 req->statx.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5 5058
1b6fe6e0
SR
5059 req->statx.filename = getname_flags(path,
5060 getname_statx_lookup_flags(req->statx.flags),
5061 NULL);
5062
5063 if (IS_ERR(req->statx.filename)) {
5064 int ret = PTR_ERR(req->statx.filename);
5065
5066 req->statx.filename = NULL;
5067 return ret;
5068 }
5069
5070 req->flags |= REQ_F_NEED_CLEANUP;
eddc7ef5
JA
5071 return 0;
5072}
5073
45d189c6 5074static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
eddc7ef5 5075{
1d9e1288 5076 struct io_statx *ctx = &req->statx;
eddc7ef5
JA
5077 int ret;
5078
59d70013 5079 if (issue_flags & IO_URING_F_NONBLOCK)
eddc7ef5
JA
5080 return -EAGAIN;
5081
e62753e4
BM
5082 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
5083 ctx->buffer);
eddc7ef5 5084
eddc7ef5 5085 if (ret < 0)
93d2bcd2 5086 req_set_fail(req);
e1e16097 5087 io_req_complete(req, ret);
eddc7ef5
JA
5088 return 0;
5089}
5090
b5dba59e
JA
5091static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5092{
14587a46 5093 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3232dd02 5094 return -EINVAL;
b5dba59e 5095 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
7df778be 5096 sqe->rw_flags || sqe->buf_index)
b5dba59e 5097 return -EINVAL;
9c280f90 5098 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 5099 return -EBADF;
b5dba59e
JA
5100
5101 req->close.fd = READ_ONCE(sqe->fd);
7df778be
PB
5102 req->close.file_slot = READ_ONCE(sqe->file_index);
5103 if (req->close.file_slot && req->close.fd)
5104 return -EINVAL;
5105
b5dba59e 5106 return 0;
b5dba59e
JA
5107}
5108
889fca73 5109static int io_close(struct io_kiocb *req, unsigned int issue_flags)
b5dba59e 5110{
9eac1904 5111 struct files_struct *files = current->files;
3af73b28 5112 struct io_close *close = &req->close;
9eac1904 5113 struct fdtable *fdt;
a1fde923
PB
5114 struct file *file = NULL;
5115 int ret = -EBADF;
b5dba59e 5116
7df778be
PB
5117 if (req->close.file_slot) {
5118 ret = io_close_fixed(req, issue_flags);
5119 goto err;
5120 }
5121
9eac1904
JA
5122 spin_lock(&files->file_lock);
5123 fdt = files_fdtable(files);
5124 if (close->fd >= fdt->max_fds) {
5125 spin_unlock(&files->file_lock);
5126 goto err;
5127 }
5128 file = fdt->fd[close->fd];
a1fde923 5129 if (!file || file->f_op == &io_uring_fops) {
9eac1904
JA
5130 spin_unlock(&files->file_lock);
5131 file = NULL;
5132 goto err;
3af73b28 5133 }
b5dba59e
JA
5134
5135 /* if the file has a flush method, be safe and punt to async */
45d189c6 5136 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
9eac1904 5137 spin_unlock(&files->file_lock);
0bf0eefd 5138 return -EAGAIN;
a2100672 5139 }
b5dba59e 5140
9eac1904
JA
5141 ret = __close_fd_get_file(close->fd, &file);
5142 spin_unlock(&files->file_lock);
5143 if (ret < 0) {
5144 if (ret == -ENOENT)
5145 ret = -EBADF;
5146 goto err;
5147 }
5148
3af73b28 5149 /* No ->flush() or already async, safely close from here */
9eac1904
JA
5150 ret = filp_close(file, current->files);
5151err:
3af73b28 5152 if (ret < 0)
93d2bcd2 5153 req_set_fail(req);
9eac1904
JA
5154 if (file)
5155 fput(file);
889fca73 5156 __io_req_complete(req, issue_flags, ret, 0);
1a417f4e 5157 return 0;
b5dba59e
JA
5158}
5159
1155c76a 5160static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4
JA
5161{
5162 struct io_ring_ctx *ctx = req->ctx;
5d17b4a4 5163
5d17b4a4
JA
5164 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
5165 return -EINVAL;
26578cda
PB
5166 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
5167 sqe->splice_fd_in))
5d17b4a4
JA
5168 return -EINVAL;
5169
8ed8d3c3
JA
5170 req->sync.off = READ_ONCE(sqe->off);
5171 req->sync.len = READ_ONCE(sqe->len);
5172 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
5173 return 0;
5174}
5175
45d189c6 5176static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3 5177{
8ed8d3c3
JA
5178 int ret;
5179
ac45abc0 5180 /* sync_file_range always requires a blocking context */
45d189c6 5181 if (issue_flags & IO_URING_F_NONBLOCK)
ac45abc0
PB
5182 return -EAGAIN;
5183
9adbd45d 5184 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3
JA
5185 req->sync.flags);
5186 if (ret < 0)
93d2bcd2 5187 req_set_fail(req);
e1e16097 5188 io_req_complete(req, ret);
5d17b4a4
JA
5189 return 0;
5190}
5191
469956e8 5192#if defined(CONFIG_NET)
02d27d89
PB
5193static int io_setup_async_msg(struct io_kiocb *req,
5194 struct io_async_msghdr *kmsg)
5195{
e8c2bc1f
JA
5196 struct io_async_msghdr *async_msg = req->async_data;
5197
5198 if (async_msg)
02d27d89 5199 return -EAGAIN;
e8c2bc1f 5200 if (io_alloc_async_data(req)) {
257e84a5 5201 kfree(kmsg->free_iov);
02d27d89
PB
5202 return -ENOMEM;
5203 }
e8c2bc1f 5204 async_msg = req->async_data;
02d27d89 5205 req->flags |= REQ_F_NEED_CLEANUP;
e8c2bc1f 5206 memcpy(async_msg, kmsg, sizeof(*kmsg));
2a780802 5207 async_msg->msg.msg_name = &async_msg->addr;
257e84a5
PB
5208 /* if were using fast_iov, set it to the new one */
5209 if (!async_msg->free_iov)
5210 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
5211
02d27d89
PB
5212 return -EAGAIN;
5213}
5214
2ae523ed
PB
5215static int io_sendmsg_copy_hdr(struct io_kiocb *req,
5216 struct io_async_msghdr *iomsg)
5217{
2ae523ed 5218 iomsg->msg.msg_name = &iomsg->addr;
257e84a5 5219 iomsg->free_iov = iomsg->fast_iov;
2ae523ed 5220 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
257e84a5 5221 req->sr_msg.msg_flags, &iomsg->free_iov);
2ae523ed
PB
5222}
5223
93642ef8
PB
5224static int io_sendmsg_prep_async(struct io_kiocb *req)
5225{
5226 int ret;
5227
93642ef8
PB
5228 ret = io_sendmsg_copy_hdr(req, req->async_data);
5229 if (!ret)
5230 req->flags |= REQ_F_NEED_CLEANUP;
5231 return ret;
5232}
5233
3529d8c2 5234static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 5235{
e47293fd 5236 struct io_sr_msg *sr = &req->sr_msg;
03b1230c 5237
d2b6f48b
PB
5238 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5239 return -EINVAL;
5240
270a5940 5241 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 5242 sr->len = READ_ONCE(sqe->len);
04411806
PB
5243 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
5244 if (sr->msg_flags & MSG_DONTWAIT)
5245 req->flags |= REQ_F_NOWAIT;
3529d8c2 5246
d8768362
JA
5247#ifdef CONFIG_COMPAT
5248 if (req->ctx->compat)
5249 sr->msg_flags |= MSG_CMSG_COMPAT;
5250#endif
93642ef8 5251 return 0;
03b1230c
JA
5252}
5253
889fca73 5254static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
aa1fa28f 5255{
6b754c8b 5256 struct io_async_msghdr iomsg, *kmsg;
0fa03c62 5257 struct socket *sock;
7a7cacba 5258 unsigned flags;
0031275d 5259 int min_ret = 0;
0fa03c62
JA
5260 int ret;
5261
dba4a925 5262 sock = sock_from_file(req->file);
7a7cacba 5263 if (unlikely(!sock))
dba4a925 5264 return -ENOTSOCK;
3529d8c2 5265
d886e185
PB
5266 if (req_has_async_data(req)) {
5267 kmsg = req->async_data;
5268 } else {
7a7cacba
PB
5269 ret = io_sendmsg_copy_hdr(req, &iomsg);
5270 if (ret)
5271 return ret;
5272 kmsg = &iomsg;
0fa03c62 5273 }
0fa03c62 5274
04411806
PB
5275 flags = req->sr_msg.msg_flags;
5276 if (issue_flags & IO_URING_F_NONBLOCK)
7a7cacba 5277 flags |= MSG_DONTWAIT;
0031275d
SM
5278 if (flags & MSG_WAITALL)
5279 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
5280
7a7cacba 5281 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
0fa03c62 5282
7297ce3d
PB
5283 if (ret < min_ret) {
5284 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
5285 return io_setup_async_msg(req, kmsg);
5286 if (ret == -ERESTARTSYS)
5287 ret = -EINTR;
5288 req_set_fail(req);
5289 }
257e84a5
PB
5290 /* fast path, check for non-NULL to avoid function call */
5291 if (kmsg->free_iov)
5292 kfree(kmsg->free_iov);
99bc4c38 5293 req->flags &= ~REQ_F_NEED_CLEANUP;
889fca73 5294 __io_req_complete(req, issue_flags, ret, 0);
5d17b4a4 5295 return 0;
03b1230c 5296}
aa1fa28f 5297
889fca73 5298static int io_send(struct io_kiocb *req, unsigned int issue_flags)
fddaface 5299{
7a7cacba
PB
5300 struct io_sr_msg *sr = &req->sr_msg;
5301 struct msghdr msg;
5302 struct iovec iov;
fddaface 5303 struct socket *sock;
7a7cacba 5304 unsigned flags;
0031275d 5305 int min_ret = 0;
fddaface
JA
5306 int ret;
5307
dba4a925 5308 sock = sock_from_file(req->file);
7a7cacba 5309 if (unlikely(!sock))
dba4a925 5310 return -ENOTSOCK;
fddaface 5311
7a7cacba
PB
5312 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
5313 if (unlikely(ret))
14db8411 5314 return ret;
fddaface 5315
7a7cacba
PB
5316 msg.msg_name = NULL;
5317 msg.msg_control = NULL;
5318 msg.msg_controllen = 0;
5319 msg.msg_namelen = 0;
fddaface 5320
04411806
PB
5321 flags = req->sr_msg.msg_flags;
5322 if (issue_flags & IO_URING_F_NONBLOCK)
7a7cacba 5323 flags |= MSG_DONTWAIT;
0031275d
SM
5324 if (flags & MSG_WAITALL)
5325 min_ret = iov_iter_count(&msg.msg_iter);
5326
7a7cacba
PB
5327 msg.msg_flags = flags;
5328 ret = sock_sendmsg(sock, &msg);
7297ce3d
PB
5329 if (ret < min_ret) {
5330 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
5331 return -EAGAIN;
5332 if (ret == -ERESTARTSYS)
5333 ret = -EINTR;
93d2bcd2 5334 req_set_fail(req);
7297ce3d 5335 }
889fca73 5336 __io_req_complete(req, issue_flags, ret, 0);
fddaface 5337 return 0;
fddaface
JA
5338}
5339
1400e697
PB
5340static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
5341 struct io_async_msghdr *iomsg)
52de1fe1
JA
5342{
5343 struct io_sr_msg *sr = &req->sr_msg;
5344 struct iovec __user *uiov;
5345 size_t iov_len;
5346 int ret;
5347
1400e697
PB
5348 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
5349 &iomsg->uaddr, &uiov, &iov_len);
52de1fe1
JA
5350 if (ret)
5351 return ret;
5352
5353 if (req->flags & REQ_F_BUFFER_SELECT) {
5354 if (iov_len > 1)
5355 return -EINVAL;
5476dfed 5356 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
52de1fe1 5357 return -EFAULT;
5476dfed 5358 sr->len = iomsg->fast_iov[0].iov_len;
257e84a5 5359 iomsg->free_iov = NULL;
52de1fe1 5360 } else {
257e84a5 5361 iomsg->free_iov = iomsg->fast_iov;
89cd35c5 5362 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
257e84a5 5363 &iomsg->free_iov, &iomsg->msg.msg_iter,
89cd35c5 5364 false);
52de1fe1
JA
5365 if (ret > 0)
5366 ret = 0;
5367 }
5368
5369 return ret;
5370}
5371
5372#ifdef CONFIG_COMPAT
5373static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
1400e697 5374 struct io_async_msghdr *iomsg)
52de1fe1 5375{
52de1fe1
JA
5376 struct io_sr_msg *sr = &req->sr_msg;
5377 struct compat_iovec __user *uiov;
5378 compat_uptr_t ptr;
5379 compat_size_t len;
5380 int ret;
5381
4af3417a
PB
5382 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
5383 &ptr, &len);
52de1fe1
JA
5384 if (ret)
5385 return ret;
5386
5387 uiov = compat_ptr(ptr);
5388 if (req->flags & REQ_F_BUFFER_SELECT) {
5389 compat_ssize_t clen;
5390
5391 if (len > 1)
5392 return -EINVAL;
5393 if (!access_ok(uiov, sizeof(*uiov)))
5394 return -EFAULT;
5395 if (__get_user(clen, &uiov->iov_len))
5396 return -EFAULT;
5397 if (clen < 0)
5398 return -EINVAL;
2d280bc8 5399 sr->len = clen;
257e84a5 5400 iomsg->free_iov = NULL;
52de1fe1 5401 } else {
257e84a5 5402 iomsg->free_iov = iomsg->fast_iov;
89cd35c5 5403 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
257e84a5 5404 UIO_FASTIOV, &iomsg->free_iov,
89cd35c5 5405 &iomsg->msg.msg_iter, true);
52de1fe1
JA
5406 if (ret < 0)
5407 return ret;
5408 }
5409
5410 return 0;
5411}
5412#endif
5413
1400e697
PB
5414static int io_recvmsg_copy_hdr(struct io_kiocb *req,
5415 struct io_async_msghdr *iomsg)
52de1fe1 5416{
1400e697 5417 iomsg->msg.msg_name = &iomsg->addr;
52de1fe1
JA
5418
5419#ifdef CONFIG_COMPAT
5420 if (req->ctx->compat)
1400e697 5421 return __io_compat_recvmsg_copy_hdr(req, iomsg);
fddaface 5422#endif
52de1fe1 5423
1400e697 5424 return __io_recvmsg_copy_hdr(req, iomsg);
52de1fe1
JA
5425}
5426
bcda7baa 5427static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
51aac424 5428 unsigned int issue_flags)
bcda7baa
JA
5429{
5430 struct io_sr_msg *sr = &req->sr_msg;
bcda7baa 5431
51aac424 5432 return io_buffer_select(req, &sr->len, sr->bgid, issue_flags);
fddaface
JA
5433}
5434
93642ef8 5435static int io_recvmsg_prep_async(struct io_kiocb *req)
aa1fa28f 5436{
99bc4c38 5437 int ret;
3529d8c2 5438
93642ef8
PB
5439 ret = io_recvmsg_copy_hdr(req, req->async_data);
5440 if (!ret)
5441 req->flags |= REQ_F_NEED_CLEANUP;
5442 return ret;
5443}
5444
5445static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5446{
5447 struct io_sr_msg *sr = &req->sr_msg;
5448
d2b6f48b
PB
5449 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5450 return -EINVAL;
5451
270a5940 5452 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 5453 sr->len = READ_ONCE(sqe->len);
bcda7baa 5454 sr->bgid = READ_ONCE(sqe->buf_group);
04411806
PB
5455 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
5456 if (sr->msg_flags & MSG_DONTWAIT)
5457 req->flags |= REQ_F_NOWAIT;
06b76d44 5458
d8768362
JA
5459#ifdef CONFIG_COMPAT
5460 if (req->ctx->compat)
5461 sr->msg_flags |= MSG_CMSG_COMPAT;
5462#endif
7ba89d2a 5463 sr->done_io = 0;
93642ef8 5464 return 0;
aa1fa28f
JA
5465}
5466
7ba89d2a
JA
5467static bool io_net_retry(struct socket *sock, int flags)
5468{
5469 if (!(flags & MSG_WAITALL))
5470 return false;
5471 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
5472}
5473
889fca73 5474static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
aa1fa28f 5475{
6b754c8b 5476 struct io_async_msghdr iomsg, *kmsg;
7ba89d2a 5477 struct io_sr_msg *sr = &req->sr_msg;
03b1230c 5478 struct socket *sock;
7fbb1b54 5479 struct io_buffer *kbuf;
7a7cacba 5480 unsigned flags;
d1fd1c20 5481 int ret, min_ret = 0;
45d189c6 5482 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
03b1230c 5483
dba4a925 5484 sock = sock_from_file(req->file);
7a7cacba 5485 if (unlikely(!sock))
dba4a925 5486 return -ENOTSOCK;
3529d8c2 5487
d886e185
PB
5488 if (req_has_async_data(req)) {
5489 kmsg = req->async_data;
5490 } else {
7a7cacba
PB
5491 ret = io_recvmsg_copy_hdr(req, &iomsg);
5492 if (ret)
681fda8d 5493 return ret;
7a7cacba
PB
5494 kmsg = &iomsg;
5495 }
03b1230c 5496
bc02ef33 5497 if (req->flags & REQ_F_BUFFER_SELECT) {
51aac424 5498 kbuf = io_recv_buffer_select(req, issue_flags);
bc02ef33 5499 if (IS_ERR(kbuf))
52de1fe1 5500 return PTR_ERR(kbuf);
7a7cacba 5501 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
5476dfed
PB
5502 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
5503 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
7a7cacba
PB
5504 1, req->sr_msg.len);
5505 }
52de1fe1 5506
04411806
PB
5507 flags = req->sr_msg.msg_flags;
5508 if (force_nonblock)
7a7cacba 5509 flags |= MSG_DONTWAIT;
0031275d
SM
5510 if (flags & MSG_WAITALL)
5511 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
5512
7a7cacba
PB
5513 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
5514 kmsg->uaddr, flags);
7297ce3d
PB
5515 if (ret < min_ret) {
5516 if (ret == -EAGAIN && force_nonblock)
5517 return io_setup_async_msg(req, kmsg);
5518 if (ret == -ERESTARTSYS)
5519 ret = -EINTR;
7ba89d2a
JA
5520 if (ret > 0 && io_net_retry(sock, flags)) {
5521 sr->done_io += ret;
8a3e8ee5 5522 req->flags |= REQ_F_PARTIAL_IO;
7ba89d2a
JA
5523 return io_setup_async_msg(req, kmsg);
5524 }
7297ce3d
PB
5525 req_set_fail(req);
5526 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
5527 req_set_fail(req);
5528 }
03b1230c 5529
257e84a5
PB
5530 /* fast path, check for non-NULL to avoid function call */
5531 if (kmsg->free_iov)
5532 kfree(kmsg->free_iov);
99bc4c38 5533 req->flags &= ~REQ_F_NEED_CLEANUP;
7ba89d2a
JA
5534 if (ret >= 0)
5535 ret += sr->done_io;
5536 else if (sr->done_io)
5537 ret = sr->done_io;
cc3cec83 5538 __io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags));
03b1230c 5539 return 0;
0fa03c62 5540}
5d17b4a4 5541
889fca73 5542static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
fddaface 5543{
6b754c8b 5544 struct io_buffer *kbuf;
7a7cacba
PB
5545 struct io_sr_msg *sr = &req->sr_msg;
5546 struct msghdr msg;
5547 void __user *buf = sr->buf;
fddaface 5548 struct socket *sock;
7a7cacba
PB
5549 struct iovec iov;
5550 unsigned flags;
d1fd1c20 5551 int ret, min_ret = 0;
45d189c6 5552 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
fddaface 5553
dba4a925 5554 sock = sock_from_file(req->file);
7a7cacba 5555 if (unlikely(!sock))
dba4a925 5556 return -ENOTSOCK;
fddaface 5557
bc02ef33 5558 if (req->flags & REQ_F_BUFFER_SELECT) {
51aac424 5559 kbuf = io_recv_buffer_select(req, issue_flags);
bcda7baa
JA
5560 if (IS_ERR(kbuf))
5561 return PTR_ERR(kbuf);
7a7cacba 5562 buf = u64_to_user_ptr(kbuf->addr);
bc02ef33 5563 }
bcda7baa 5564
7a7cacba 5565 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
14c32eee
PB
5566 if (unlikely(ret))
5567 goto out_free;
fddaface 5568
7a7cacba
PB
5569 msg.msg_name = NULL;
5570 msg.msg_control = NULL;
5571 msg.msg_controllen = 0;
5572 msg.msg_namelen = 0;
5573 msg.msg_iocb = NULL;
5574 msg.msg_flags = 0;
fddaface 5575
04411806
PB
5576 flags = req->sr_msg.msg_flags;
5577 if (force_nonblock)
7a7cacba 5578 flags |= MSG_DONTWAIT;
0031275d
SM
5579 if (flags & MSG_WAITALL)
5580 min_ret = iov_iter_count(&msg.msg_iter);
5581
7a7cacba 5582 ret = sock_recvmsg(sock, &msg, flags);
7297ce3d
PB
5583 if (ret < min_ret) {
5584 if (ret == -EAGAIN && force_nonblock)
5585 return -EAGAIN;
5586 if (ret == -ERESTARTSYS)
5587 ret = -EINTR;
7ba89d2a
JA
5588 if (ret > 0 && io_net_retry(sock, flags)) {
5589 sr->len -= ret;
5590 sr->buf += ret;
5591 sr->done_io += ret;
8a3e8ee5 5592 req->flags |= REQ_F_PARTIAL_IO;
7ba89d2a
JA
5593 return -EAGAIN;
5594 }
7297ce3d
PB
5595 req_set_fail(req);
5596 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
0d7c1153 5597out_free:
93d2bcd2 5598 req_set_fail(req);
7297ce3d 5599 }
cc3cec83 5600
7ba89d2a
JA
5601 if (ret >= 0)
5602 ret += sr->done_io;
5603 else if (sr->done_io)
5604 ret = sr->done_io;
cc3cec83 5605 __io_req_complete(req, issue_flags, ret, io_put_kbuf(req, issue_flags));
fddaface 5606 return 0;
fddaface
JA
5607}
5608
3529d8c2 5609static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35 5610{
8ed8d3c3
JA
5611 struct io_accept *accept = &req->accept;
5612
14587a46 5613 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
17f2fe35 5614 return -EINVAL;
aaa4db12 5615 if (sqe->ioprio || sqe->len || sqe->buf_index)
17f2fe35
JA
5616 return -EINVAL;
5617
d55e5f5b
JA
5618 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5619 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 5620 accept->flags = READ_ONCE(sqe->accept_flags);
09952e3e 5621 accept->nofile = rlimit(RLIMIT_NOFILE);
a7083ad5 5622
aaa4db12 5623 accept->file_slot = READ_ONCE(sqe->file_index);
adf3a9e9 5624 if (accept->file_slot && (accept->flags & SOCK_CLOEXEC))
aaa4db12 5625 return -EINVAL;
a7083ad5
PB
5626 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
5627 return -EINVAL;
5628 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
5629 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
8ed8d3c3 5630 return 0;
8ed8d3c3 5631}
17f2fe35 5632
889fca73 5633static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3
JA
5634{
5635 struct io_accept *accept = &req->accept;
45d189c6 5636 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ac45abc0 5637 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
aaa4db12 5638 bool fixed = !!accept->file_slot;
a7083ad5
PB
5639 struct file *file;
5640 int ret, fd;
8ed8d3c3 5641
aaa4db12
PB
5642 if (!fixed) {
5643 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
5644 if (unlikely(fd < 0))
5645 return fd;
5646 }
a7083ad5
PB
5647 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
5648 accept->flags);
5649 if (IS_ERR(file)) {
aaa4db12
PB
5650 if (!fixed)
5651 put_unused_fd(fd);
a7083ad5
PB
5652 ret = PTR_ERR(file);
5653 if (ret == -EAGAIN && force_nonblock)
5654 return -EAGAIN;
ac45abc0
PB
5655 if (ret == -ERESTARTSYS)
5656 ret = -EINTR;
93d2bcd2 5657 req_set_fail(req);
aaa4db12 5658 } else if (!fixed) {
a7083ad5
PB
5659 fd_install(fd, file);
5660 ret = fd;
aaa4db12
PB
5661 } else {
5662 ret = io_install_fixed_file(req, file, issue_flags,
5663 accept->file_slot - 1);
ac45abc0 5664 }
889fca73 5665 __io_req_complete(req, issue_flags, ret, 0);
17f2fe35 5666 return 0;
8ed8d3c3
JA
5667}
5668
93642ef8
PB
5669static int io_connect_prep_async(struct io_kiocb *req)
5670{
5671 struct io_async_connect *io = req->async_data;
5672 struct io_connect *conn = &req->connect;
5673
5674 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
5675}
5676
3529d8c2 5677static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021 5678{
3529d8c2 5679 struct io_connect *conn = &req->connect;
f499a021 5680
14587a46 5681 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3fbb51c1 5682 return -EINVAL;
26578cda
PB
5683 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
5684 sqe->splice_fd_in)
3fbb51c1
JA
5685 return -EINVAL;
5686
3529d8c2
JA
5687 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5688 conn->addr_len = READ_ONCE(sqe->addr2);
93642ef8 5689 return 0;
f499a021
JA
5690}
5691
889fca73 5692static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
f8e85cf2 5693{
e8c2bc1f 5694 struct io_async_connect __io, *io;
f8e85cf2 5695 unsigned file_flags;
3fbb51c1 5696 int ret;
45d189c6 5697 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
f8e85cf2 5698
d886e185 5699 if (req_has_async_data(req)) {
e8c2bc1f 5700 io = req->async_data;
f499a021 5701 } else {
3529d8c2
JA
5702 ret = move_addr_to_kernel(req->connect.addr,
5703 req->connect.addr_len,
e8c2bc1f 5704 &__io.address);
f499a021
JA
5705 if (ret)
5706 goto out;
5707 io = &__io;
5708 }
5709
3fbb51c1
JA
5710 file_flags = force_nonblock ? O_NONBLOCK : 0;
5711
e8c2bc1f 5712 ret = __sys_connect_file(req->file, &io->address,
3fbb51c1 5713 req->connect.addr_len, file_flags);
87f80d62 5714 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
d886e185 5715 if (req_has_async_data(req))
b7bb4f7d 5716 return -EAGAIN;
e8c2bc1f 5717 if (io_alloc_async_data(req)) {
f499a021
JA
5718 ret = -ENOMEM;
5719 goto out;
5720 }
e8c2bc1f 5721 memcpy(req->async_data, &__io, sizeof(__io));
f8e85cf2 5722 return -EAGAIN;
f499a021 5723 }
f8e85cf2
JA
5724 if (ret == -ERESTARTSYS)
5725 ret = -EINTR;
f499a021 5726out:
4e88d6e7 5727 if (ret < 0)
93d2bcd2 5728 req_set_fail(req);
889fca73 5729 __io_req_complete(req, issue_flags, ret, 0);
f8e85cf2 5730 return 0;
469956e8
Y
5731}
5732#else /* !CONFIG_NET */
99a10081
JA
5733#define IO_NETOP_FN(op) \
5734static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
5735{ \
5736 return -EOPNOTSUPP; \
5737}
5738
5739#define IO_NETOP_PREP(op) \
5740IO_NETOP_FN(op) \
5741static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
5742{ \
5743 return -EOPNOTSUPP; \
5744} \
5745
5746#define IO_NETOP_PREP_ASYNC(op) \
5747IO_NETOP_PREP(op) \
5748static int io_##op##_prep_async(struct io_kiocb *req) \
5749{ \
5750 return -EOPNOTSUPP; \
5751}
5752
5753IO_NETOP_PREP_ASYNC(sendmsg);
5754IO_NETOP_PREP_ASYNC(recvmsg);
5755IO_NETOP_PREP_ASYNC(connect);
5756IO_NETOP_PREP(accept);
5757IO_NETOP_FN(send);
5758IO_NETOP_FN(recv);
469956e8 5759#endif /* CONFIG_NET */
f8e85cf2 5760
d7718a9d
JA
5761struct io_poll_table {
5762 struct poll_table_struct pt;
5763 struct io_kiocb *req;
68b11e8b 5764 int nr_entries;
d7718a9d
JA
5765 int error;
5766};
ce593a6c 5767
aa43477b 5768#define IO_POLL_CANCEL_FLAG BIT(31)
e2c0cb7c 5769#define IO_POLL_REF_MASK GENMASK(30, 0)
6d816e08 5770
aa43477b
PB
5771/*
5772 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
5773 * bump it and acquire ownership. It's disallowed to modify requests while not
5774 * owning it, that prevents from races for enqueueing task_work's and b/w
5775 * arming poll and wakeups.
5776 */
5777static inline bool io_poll_get_ownership(struct io_kiocb *req)
5778{
5779 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
d7718a9d
JA
5780}
5781
aa43477b 5782static void io_poll_mark_cancelled(struct io_kiocb *req)
74ce6ce4 5783{
aa43477b 5784 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
74ce6ce4
JA
5785}
5786
d4e7cd36 5787static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
18bceab1 5788{
e8c2bc1f 5789 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
d4e7cd36 5790 if (req->opcode == IORING_OP_POLL_ADD)
e8c2bc1f 5791 return req->async_data;
d4e7cd36
JA
5792 return req->apoll->double_poll;
5793}
5794
5795static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
5796{
5797 if (req->opcode == IORING_OP_POLL_ADD)
5798 return &req->poll;
5799 return &req->apoll->poll;
5800}
5801
5641897a 5802static void io_poll_req_insert(struct io_kiocb *req)
d4e7cd36 5803{
5641897a
PB
5804 struct io_ring_ctx *ctx = req->ctx;
5805 struct hlist_head *list;
18bceab1 5806
cef216fc 5807 list = &ctx->cancel_hash[hash_long(req->cqe.user_data, ctx->cancel_hash_bits)];
5641897a 5808 hlist_add_head(&req->hash_node, list);
18bceab1
JA
5809}
5810
5641897a
PB
5811static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5812 wait_queue_func_t wake_func)
18bceab1 5813{
5641897a 5814 poll->head = NULL;
5641897a
PB
5815#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
5816 /* mask in events that we always want/need */
5817 poll->events = events | IO_POLL_UNMASK;
5818 INIT_LIST_HEAD(&poll->wait.entry);
5819 init_waitqueue_func_entry(&poll->wait, wake_func);
18bceab1
JA
5820}
5821
aa43477b 5822static inline void io_poll_remove_entry(struct io_poll_iocb *poll)
18bceab1 5823{
791f3465 5824 struct wait_queue_head *head = smp_load_acquire(&poll->head);
18bceab1 5825
791f3465
PB
5826 if (head) {
5827 spin_lock_irq(&head->lock);
5828 list_del_init(&poll->wait.entry);
5829 poll->head = NULL;
5830 spin_unlock_irq(&head->lock);
5831 }
aa43477b 5832}
18bceab1 5833
aa43477b
PB
5834static void io_poll_remove_entries(struct io_kiocb *req)
5835{
91eac1c6
JA
5836 /*
5837 * Nothing to do if neither of those flags are set. Avoid dipping
5838 * into the poll/apoll/double cachelines if we can.
5839 */
5840 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
5841 return;
18bceab1 5842
791f3465
PB
5843 /*
5844 * While we hold the waitqueue lock and the waitqueue is nonempty,
5845 * wake_up_pollfree() will wait for us. However, taking the waitqueue
5846 * lock in the first place can race with the waitqueue being freed.
5847 *
5848 * We solve this as eventpoll does: by taking advantage of the fact that
5849 * all users of wake_up_pollfree() will RCU-delay the actual free. If
5850 * we enter rcu_read_lock() and see that the pointer to the queue is
5851 * non-NULL, we can then lock it without the memory being freed out from
5852 * under us.
5853 *
5854 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
5855 * case the caller deletes the entry from the queue, leaving it empty.
5856 * In that case, only RCU prevents the queue memory from being freed.
5857 */
5858 rcu_read_lock();
91eac1c6
JA
5859 if (req->flags & REQ_F_SINGLE_POLL)
5860 io_poll_remove_entry(io_poll_get_single(req));
5861 if (req->flags & REQ_F_DOUBLE_POLL)
5862 io_poll_remove_entry(io_poll_get_double(req));
791f3465 5863 rcu_read_unlock();
18bceab1
JA
5864}
5865
aa43477b
PB
5866/*
5867 * All poll tw should go through this. Checks for poll events, manages
5868 * references, does rewait, etc.
5869 *
5870 * Returns a negative error on failure. >0 when no action require, which is
5871 * either spurious wakeup or multishot CQE is served. 0 when it's done with
cef216fc 5872 * the request, then the mask is stored in req->cqe.res.
aa43477b 5873 */
5106dd6e 5874static int io_poll_check_events(struct io_kiocb *req, bool locked)
18bceab1 5875{
74ce6ce4 5876 struct io_ring_ctx *ctx = req->ctx;
aa43477b 5877 int v;
18bceab1 5878
316319e8 5879 /* req->task == current here, checking PF_EXITING is safe */
e09ee510 5880 if (unlikely(req->task->flags & PF_EXITING))
aa43477b 5881 io_poll_mark_cancelled(req);
18bceab1 5882
aa43477b
PB
5883 do {
5884 v = atomic_read(&req->poll_refs);
74ce6ce4 5885
aa43477b
PB
5886 /* tw handler should be the owner, and so have some references */
5887 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
5888 return 0;
5889 if (v & IO_POLL_CANCEL_FLAG)
5890 return -ECANCELED;
8706e04e 5891
cef216fc 5892 if (!req->cqe.res) {
2804ecd8 5893 struct poll_table_struct pt = { ._key = req->apoll_events };
cce64ef0 5894 unsigned flags = locked ? 0 : IO_URING_F_UNLOCKED;
18bceab1 5895
cce64ef0 5896 if (unlikely(!io_assign_file(req, flags)))
7179c3ce 5897 return -EBADF;
cef216fc 5898 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
c8b5e260 5899 }
74ce6ce4 5900
aa43477b 5901 /* multishot, just fill an CQE and proceed */
cef216fc
PB
5902 if (req->cqe.res && !(req->apoll_events & EPOLLONESHOT)) {
5903 __poll_t mask = mangle_poll(req->cqe.res & req->apoll_events);
aa43477b 5904 bool filled;
18bceab1 5905
aa43477b 5906 spin_lock(&ctx->completion_lock);
cef216fc 5907 filled = io_fill_cqe_aux(ctx, req->cqe.user_data, mask,
aa43477b
PB
5908 IORING_CQE_F_MORE);
5909 io_commit_cqring(ctx);
5910 spin_unlock(&ctx->completion_lock);
5911 if (unlikely(!filled))
5912 return -ECANCELED;
5913 io_cqring_ev_posted(ctx);
cef216fc 5914 } else if (req->cqe.res) {
aa43477b
PB
5915 return 0;
5916 }
18bceab1 5917
aa43477b
PB
5918 /*
5919 * Release all references, retry if someone tried to restart
5920 * task_work while we were executing it.
5921 */
5922 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
18bceab1 5923
18bceab1
JA
5924 return 1;
5925}
5926
aa43477b 5927static void io_poll_task_func(struct io_kiocb *req, bool *locked)
18bceab1 5928{
18bceab1 5929 struct io_ring_ctx *ctx = req->ctx;
aa43477b 5930 int ret;
18bceab1 5931
5106dd6e 5932 ret = io_poll_check_events(req, *locked);
aa43477b
PB
5933 if (ret > 0)
5934 return;
5935
5936 if (!ret) {
cef216fc 5937 req->cqe.res = mangle_poll(req->cqe.res & req->poll.events);
e27414be 5938 } else {
cef216fc 5939 req->cqe.res = ret;
aa43477b 5940 req_set_fail(req);
a62682f9 5941 }
aa43477b
PB
5942
5943 io_poll_remove_entries(req);
5944 spin_lock(&ctx->completion_lock);
5945 hash_del(&req->hash_node);
cef216fc 5946 __io_req_complete_post(req, req->cqe.res, 0);
aa43477b
PB
5947 io_commit_cqring(ctx);
5948 spin_unlock(&ctx->completion_lock);
5949 io_cqring_ev_posted(ctx);
18bceab1
JA
5950}
5951
aa43477b 5952static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
18bceab1
JA
5953{
5954 struct io_ring_ctx *ctx = req->ctx;
aa43477b 5955 int ret;
18bceab1 5956
5106dd6e 5957 ret = io_poll_check_events(req, *locked);
aa43477b
PB
5958 if (ret > 0)
5959 return;
18bceab1 5960
aa43477b
PB
5961 io_poll_remove_entries(req);
5962 spin_lock(&ctx->completion_lock);
5963 hash_del(&req->hash_node);
5964 spin_unlock(&ctx->completion_lock);
18bceab1 5965
aa43477b
PB
5966 if (!ret)
5967 io_req_task_submit(req, locked);
5968 else
5969 io_req_complete_failed(req, ret);
18bceab1
JA
5970}
5971
81459350 5972static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
aa43477b 5973{
cef216fc 5974 req->cqe.res = mask;
81459350
JA
5975 /*
5976 * This is useful for poll that is armed on behalf of another
5977 * request, and where the wakeup path could be on a different
5978 * CPU. We want to avoid pulling in req->apoll->events for that
5979 * case.
5980 */
2804ecd8 5981 req->apoll_events = events;
aa43477b
PB
5982 if (req->opcode == IORING_OP_POLL_ADD)
5983 req->io_task_work.func = io_poll_task_func;
5984 else
5985 req->io_task_work.func = io_apoll_task_func;
5986
cef216fc 5987 trace_io_uring_task_add(req->ctx, req, req->cqe.user_data, req->opcode, mask);
aa43477b
PB
5988 io_req_task_work_add(req, false);
5989}
5990
81459350 5991static inline void io_poll_execute(struct io_kiocb *req, int res, int events)
aa43477b
PB
5992{
5993 if (io_poll_get_ownership(req))
81459350 5994 __io_poll_execute(req, res, events);
aa43477b
PB
5995}
5996
5997static void io_poll_cancel_req(struct io_kiocb *req)
5998{
5999 io_poll_mark_cancelled(req);
6000 /* kick tw, which should complete the request */
81459350 6001 io_poll_execute(req, 0, 0);
aa43477b
PB
6002}
6003
d89a4fac
JA
6004#define wqe_to_req(wait) ((void *)((unsigned long) (wait)->private & ~1))
6005#define wqe_is_double(wait) ((unsigned long) (wait)->private & 1)
6006
aa43477b
PB
6007static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
6008 void *key)
18bceab1 6009{
d89a4fac 6010 struct io_kiocb *req = wqe_to_req(wait);
aa43477b
PB
6011 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
6012 wait);
18bceab1
JA
6013 __poll_t mask = key_to_poll(key);
6014
791f3465
PB
6015 if (unlikely(mask & POLLFREE)) {
6016 io_poll_mark_cancelled(req);
6017 /* we have to kick tw in case it's not already */
81459350 6018 io_poll_execute(req, 0, poll->events);
791f3465
PB
6019
6020 /*
6021 * If the waitqueue is being freed early but someone is already
6022 * holds ownership over it, we have to tear down the request as
6023 * best we can. That means immediately removing the request from
6024 * its waitqueue and preventing all further accesses to the
6025 * waitqueue via the request.
6026 */
6027 list_del_init(&poll->wait.entry);
6028
6029 /*
6030 * Careful: this *must* be the last step, since as soon
6031 * as req->head is NULL'ed out, the request can be
6032 * completed and freed, since aio_poll_complete_work()
6033 * will no longer need to take the waitqueue lock.
6034 */
6035 smp_store_release(&poll->head, NULL);
6036 return 1;
6037 }
6038
aa43477b 6039 /* for instances that support it check for an event match first */
18bceab1
JA
6040 if (mask && !(mask & poll->events))
6041 return 0;
6042
eb0089d6
PB
6043 if (io_poll_get_ownership(req)) {
6044 /* optional, saves extra locking for removal in tw handler */
6045 if (mask && poll->events & EPOLLONESHOT) {
6046 list_del_init(&poll->wait.entry);
6047 poll->head = NULL;
d89a4fac
JA
6048 if (wqe_is_double(wait))
6049 req->flags &= ~REQ_F_DOUBLE_POLL;
6050 else
6051 req->flags &= ~REQ_F_SINGLE_POLL;
eb0089d6 6052 }
81459350 6053 __io_poll_execute(req, mask, poll->events);
eb0089d6 6054 }
18bceab1 6055 return 1;
18bceab1
JA
6056}
6057
6058static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
807abcb0
JA
6059 struct wait_queue_head *head,
6060 struct io_poll_iocb **poll_ptr)
18bceab1
JA
6061{
6062 struct io_kiocb *req = pt->req;
d89a4fac 6063 unsigned long wqe_private = (unsigned long) req;
18bceab1
JA
6064
6065 /*
68b11e8b
PB
6066 * The file being polled uses multiple waitqueues for poll handling
6067 * (e.g. one for read, one for write). Setup a separate io_poll_iocb
6068 * if this happens.
18bceab1 6069 */
68b11e8b 6070 if (unlikely(pt->nr_entries)) {
aa43477b 6071 struct io_poll_iocb *first = poll;
58852d4d 6072
23a65db8 6073 /* double add on the same waitqueue head, ignore */
aa43477b 6074 if (first->head == head)
23a65db8 6075 return;
18bceab1 6076 /* already have a 2nd entry, fail a third attempt */
807abcb0 6077 if (*poll_ptr) {
23a65db8
PB
6078 if ((*poll_ptr)->head == head)
6079 return;
18bceab1
JA
6080 pt->error = -EINVAL;
6081 return;
6082 }
aa43477b 6083
18bceab1
JA
6084 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
6085 if (!poll) {
6086 pt->error = -ENOMEM;
6087 return;
6088 }
d89a4fac
JA
6089 /* mark as double wq entry */
6090 wqe_private |= 1;
91eac1c6 6091 req->flags |= REQ_F_DOUBLE_POLL;
aa43477b 6092 io_init_poll_iocb(poll, first->events, first->wait.func);
807abcb0 6093 *poll_ptr = poll;
d886e185
PB
6094 if (req->opcode == IORING_OP_POLL_ADD)
6095 req->flags |= REQ_F_ASYNC_DATA;
18bceab1
JA
6096 }
6097
91eac1c6 6098 req->flags |= REQ_F_SINGLE_POLL;
68b11e8b 6099 pt->nr_entries++;
18bceab1 6100 poll->head = head;
d89a4fac 6101 poll->wait.private = (void *) wqe_private;
a31eb4a2
JX
6102
6103 if (poll->events & EPOLLEXCLUSIVE)
6104 add_wait_queue_exclusive(head, &poll->wait);
6105 else
6106 add_wait_queue(head, &poll->wait);
18bceab1
JA
6107}
6108
aa43477b 6109static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
18bceab1
JA
6110 struct poll_table_struct *p)
6111{
6112 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
d7718a9d 6113
aa43477b
PB
6114 __io_queue_proc(&pt->req->poll, pt, head,
6115 (struct io_poll_iocb **) &pt->req->async_data);
d7718a9d
JA
6116}
6117
aa43477b
PB
6118static int __io_arm_poll_handler(struct io_kiocb *req,
6119 struct io_poll_iocb *poll,
6120 struct io_poll_table *ipt, __poll_t mask)
d7718a9d
JA
6121{
6122 struct io_ring_ctx *ctx = req->ctx;
aa43477b 6123 int v;
d7718a9d 6124
4d52f338 6125 INIT_HLIST_NODE(&req->hash_node);
aa43477b 6126 io_init_poll_iocb(poll, mask, io_poll_wake);
b90cd197 6127 poll->file = req->file;
d7718a9d
JA
6128
6129 ipt->pt._key = mask;
6130 ipt->req = req;
68b11e8b
PB
6131 ipt->error = 0;
6132 ipt->nr_entries = 0;
d7718a9d 6133
aa43477b
PB
6134 /*
6135 * Take the ownership to delay any tw execution up until we're done
6136 * with poll arming. see io_poll_get_ownership().
6137 */
6138 atomic_set(&req->poll_refs, 1);
d7718a9d 6139 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
aa43477b
PB
6140
6141 if (mask && (poll->events & EPOLLONESHOT)) {
6142 io_poll_remove_entries(req);
6143 /* no one else has access to the req, forget about the ref */
6144 return mask;
6145 }
6146 if (!mask && unlikely(ipt->error || !ipt->nr_entries)) {
6147 io_poll_remove_entries(req);
6148 if (!ipt->error)
6149 ipt->error = -EINVAL;
6150 return 0;
6151 }
d7718a9d 6152
79ebeaee 6153 spin_lock(&ctx->completion_lock);
aa43477b
PB
6154 io_poll_req_insert(req);
6155 spin_unlock(&ctx->completion_lock);
6156
6157 if (mask) {
6158 /* can't multishot if failed, just queue the event we've got */
6159 if (unlikely(ipt->error || !ipt->nr_entries))
6160 poll->events |= EPOLLONESHOT;
81459350 6161 __io_poll_execute(req, mask, poll->events);
aa43477b 6162 return 0;
d7718a9d
JA
6163 }
6164
aa43477b
PB
6165 /*
6166 * Release ownership. If someone tried to queue a tw while it was
6167 * locked, kick it off for them.
6168 */
6169 v = atomic_dec_return(&req->poll_refs);
6170 if (unlikely(v & IO_POLL_REF_MASK))
81459350 6171 __io_poll_execute(req, 0, poll->events);
aa43477b
PB
6172 return 0;
6173}
6174
6175static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
6176 struct poll_table_struct *p)
6177{
6178 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
6179 struct async_poll *apoll = pt->req->apoll;
6180
6181 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
d7718a9d
JA
6182}
6183
59b735ae
OL
6184enum {
6185 IO_APOLL_OK,
6186 IO_APOLL_ABORTED,
6187 IO_APOLL_READY
6188};
6189
4d9237e3 6190static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
d7718a9d
JA
6191{
6192 const struct io_op_def *def = &io_op_defs[req->opcode];
6193 struct io_ring_ctx *ctx = req->ctx;
6194 struct async_poll *apoll;
6195 struct io_poll_table ipt;
aa43477b
PB
6196 __poll_t mask = EPOLLONESHOT | POLLERR | POLLPRI;
6197 int ret;
d7718a9d 6198
b2d9c3da
PB
6199 if (!def->pollin && !def->pollout)
6200 return IO_APOLL_ABORTED;
658d0a40
PB
6201 if (!file_can_poll(req->file) || (req->flags & REQ_F_POLLED))
6202 return IO_APOLL_ABORTED;
b2d9c3da
PB
6203
6204 if (def->pollin) {
b2d9c3da
PB
6205 mask |= POLLIN | POLLRDNORM;
6206
6207 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
6208 if ((req->opcode == IORING_OP_RECVMSG) &&
6209 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
6210 mask &= ~POLLIN;
6211 } else {
b2d9c3da
PB
6212 mask |= POLLOUT | POLLWRNORM;
6213 }
52dd8640
DY
6214 if (def->poll_exclusive)
6215 mask |= EPOLLEXCLUSIVE;
4d9237e3
JA
6216 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
6217 !list_empty(&ctx->apoll_cache)) {
6218 apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
6219 poll.wait.entry);
6220 list_del_init(&apoll->poll.wait.entry);
6221 } else {
6222 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
6223 if (unlikely(!apoll))
6224 return IO_APOLL_ABORTED;
6225 }
807abcb0 6226 apoll->double_poll = NULL;
d7718a9d 6227 req->apoll = apoll;
b2d9c3da 6228 req->flags |= REQ_F_POLLED;
d7718a9d
JA
6229 ipt.pt._qproc = io_async_queue_proc;
6230
4d55f238 6231 io_kbuf_recycle(req, issue_flags);
abdad709 6232
aa43477b 6233 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
41a5169c
HX
6234 if (ret || ipt.error)
6235 return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
6236
cef216fc 6237 trace_io_uring_poll_arm(ctx, req, req->cqe.user_data, req->opcode,
236daeae 6238 mask, apoll->poll.events);
59b735ae 6239 return IO_APOLL_OK;
d7718a9d
JA
6240}
6241
76e1b642
JA
6242/*
6243 * Returns true if we found and killed one or more poll requests
6244 */
c072481d
PB
6245static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
6246 struct task_struct *tsk, bool cancel_all)
221c5eb2 6247{
78076bb6 6248 struct hlist_node *tmp;
221c5eb2 6249 struct io_kiocb *req;
aa43477b
PB
6250 bool found = false;
6251 int i;
221c5eb2 6252
79ebeaee 6253 spin_lock(&ctx->completion_lock);
78076bb6
JA
6254 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
6255 struct hlist_head *list;
6256
6257 list = &ctx->cancel_hash[i];
f3606e3a 6258 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
42a7b4ed 6259 if (io_match_task_safe(req, tsk, cancel_all)) {
61bc84c4 6260 hlist_del_init(&req->hash_node);
aa43477b
PB
6261 io_poll_cancel_req(req);
6262 found = true;
6263 }
f3606e3a 6264 }
221c5eb2 6265 }
79ebeaee 6266 spin_unlock(&ctx->completion_lock);
aa43477b 6267 return found;
221c5eb2
JA
6268}
6269
9ba5fac8
PB
6270static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
6271 bool poll_only)
e07785b0 6272 __must_hold(&ctx->completion_lock)
47f46768 6273{
78076bb6 6274 struct hlist_head *list;
47f46768
JA
6275 struct io_kiocb *req;
6276
78076bb6
JA
6277 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
6278 hlist_for_each_entry(req, list, hash_node) {
cef216fc 6279 if (sqe_addr != req->cqe.user_data)
b41e9852 6280 continue;
9ba5fac8
PB
6281 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
6282 continue;
b2cb805f 6283 return req;
47f46768 6284 }
b2cb805f
JA
6285 return NULL;
6286}
6287
aa43477b
PB
6288static bool io_poll_disarm(struct io_kiocb *req)
6289 __must_hold(&ctx->completion_lock)
6290{
6291 if (!io_poll_get_ownership(req))
6292 return false;
6293 io_poll_remove_entries(req);
6294 hash_del(&req->hash_node);
6295 return true;
6296}
6297
9ba5fac8
PB
6298static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
6299 bool poll_only)
e07785b0 6300 __must_hold(&ctx->completion_lock)
b2cb805f 6301{
aa43477b 6302 struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only);
b2cb805f 6303
b2cb805f
JA
6304 if (!req)
6305 return -ENOENT;
aa43477b
PB
6306 io_poll_cancel_req(req);
6307 return 0;
47f46768
JA
6308}
6309
9096af3e
PB
6310static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
6311 unsigned int flags)
6312{
6313 u32 events;
47f46768 6314
9096af3e
PB
6315 events = READ_ONCE(sqe->poll32_events);
6316#ifdef __BIG_ENDIAN
6317 events = swahw32(events);
6318#endif
6319 if (!(flags & IORING_POLL_ADD_MULTI))
6320 events |= EPOLLONESHOT;
6321 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
47f46768
JA
6322}
6323
c5de0036 6324static int io_poll_update_prep(struct io_kiocb *req,
3529d8c2 6325 const struct io_uring_sqe *sqe)
0969e783 6326{
c5de0036
PB
6327 struct io_poll_update *upd = &req->poll_update;
6328 u32 flags;
6329
0969e783
JA
6330 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6331 return -EINVAL;
26578cda 6332 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
c5de0036
PB
6333 return -EINVAL;
6334 flags = READ_ONCE(sqe->len);
6335 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
6336 IORING_POLL_ADD_MULTI))
6337 return -EINVAL;
6338 /* meaningless without update */
6339 if (flags == IORING_POLL_ADD_MULTI)
0969e783
JA
6340 return -EINVAL;
6341
c5de0036
PB
6342 upd->old_user_data = READ_ONCE(sqe->addr);
6343 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
6344 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
221c5eb2 6345
c5de0036
PB
6346 upd->new_user_data = READ_ONCE(sqe->off);
6347 if (!upd->update_user_data && upd->new_user_data)
6348 return -EINVAL;
6349 if (upd->update_events)
6350 upd->events = io_poll_parse_events(sqe, flags);
6351 else if (sqe->poll32_events)
6352 return -EINVAL;
221c5eb2 6353
221c5eb2
JA
6354 return 0;
6355}
6356
3529d8c2 6357static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
6358{
6359 struct io_poll_iocb *poll = &req->poll;
c5de0036 6360 u32 flags;
221c5eb2
JA
6361
6362 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6363 return -EINVAL;
c5de0036 6364 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
88e41cf9
JA
6365 return -EINVAL;
6366 flags = READ_ONCE(sqe->len);
c5de0036 6367 if (flags & ~IORING_POLL_ADD_MULTI)
221c5eb2 6368 return -EINVAL;
04c76b41
PB
6369 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
6370 return -EINVAL;
221c5eb2 6371
48dcd38d 6372 io_req_set_refcount(req);
2804ecd8 6373 req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
0969e783
JA
6374 return 0;
6375}
6376
61e98203 6377static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
0969e783
JA
6378{
6379 struct io_poll_iocb *poll = &req->poll;
0969e783 6380 struct io_poll_table ipt;
aa43477b 6381 int ret;
0969e783 6382
d7718a9d 6383 ipt.pt._qproc = io_poll_queue_proc;
36703247 6384
aa43477b
PB
6385 ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
6386 ret = ret ?: ipt.error;
6387 if (ret)
6388 __io_req_complete(req, issue_flags, ret, 0);
6389 return 0;
221c5eb2
JA
6390}
6391
c5de0036 6392static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
b69de288
JA
6393{
6394 struct io_ring_ctx *ctx = req->ctx;
6395 struct io_kiocb *preq;
2bbb146d 6396 int ret2, ret = 0;
cc8e9ba7 6397 bool locked;
b69de288 6398
79ebeaee 6399 spin_lock(&ctx->completion_lock);
9ba5fac8 6400 preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
aa43477b 6401 if (!preq || !io_poll_disarm(preq)) {
79ebeaee 6402 spin_unlock(&ctx->completion_lock);
aa43477b 6403 ret = preq ? -EALREADY : -ENOENT;
2bbb146d 6404 goto out;
b69de288 6405 }
79ebeaee 6406 spin_unlock(&ctx->completion_lock);
cb3b200e 6407
2bbb146d
PB
6408 if (req->poll_update.update_events || req->poll_update.update_user_data) {
6409 /* only mask one event flags, keep behavior flags */
6410 if (req->poll_update.update_events) {
6411 preq->poll.events &= ~0xffff;
6412 preq->poll.events |= req->poll_update.events & 0xffff;
6413 preq->poll.events |= IO_POLL_UNMASK;
cb3b200e 6414 }
2bbb146d 6415 if (req->poll_update.update_user_data)
cef216fc 6416 preq->cqe.user_data = req->poll_update.new_user_data;
b69de288 6417
2bbb146d
PB
6418 ret2 = io_poll_add(preq, issue_flags);
6419 /* successfully updated, don't complete poll request */
6420 if (!ret2)
6421 goto out;
b69de288 6422 }
6224590d 6423
2bbb146d 6424 req_set_fail(preq);
cef216fc 6425 preq->cqe.res = -ECANCELED;
cc8e9ba7
PB
6426 locked = !(issue_flags & IO_URING_F_UNLOCKED);
6427 io_req_task_complete(preq, &locked);
2bbb146d
PB
6428out:
6429 if (ret < 0)
6224590d 6430 req_set_fail(req);
2bbb146d 6431 /* complete update request, we're done with it */
cc8e9ba7 6432 __io_req_complete(req, issue_flags, ret, 0);
b69de288 6433 return 0;
89850fce
JA
6434}
6435
5262f567
JA
6436static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
6437{
ad8a48ac
JA
6438 struct io_timeout_data *data = container_of(timer,
6439 struct io_timeout_data, timer);
6440 struct io_kiocb *req = data->req;
6441 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
6442 unsigned long flags;
6443
89850fce 6444 spin_lock_irqsave(&ctx->timeout_lock, flags);
a71976f3 6445 list_del_init(&req->timeout.list);
01cec8c1
PB
6446 atomic_set(&req->ctx->cq_timeouts,
6447 atomic_read(&req->ctx->cq_timeouts) + 1);
89850fce 6448 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
01cec8c1 6449
a90c8bf6
PB
6450 if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
6451 req_set_fail(req);
6452
cef216fc 6453 req->cqe.res = -ETIME;
a90c8bf6 6454 req->io_task_work.func = io_req_task_complete;
4813c377 6455 io_req_task_work_add(req, false);
5262f567
JA
6456 return HRTIMER_NORESTART;
6457}
6458
fbd15848
PB
6459static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
6460 __u64 user_data)
89850fce 6461 __must_hold(&ctx->timeout_lock)
f254ac04 6462{
fbd15848 6463 struct io_timeout_data *io;
47f46768 6464 struct io_kiocb *req;
fd9c7bc5 6465 bool found = false;
f254ac04 6466
135fcde8 6467 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
cef216fc 6468 found = user_data == req->cqe.user_data;
fd9c7bc5 6469 if (found)
47f46768 6470 break;
47f46768 6471 }
fd9c7bc5
PB
6472 if (!found)
6473 return ERR_PTR(-ENOENT);
fbd15848
PB
6474
6475 io = req->async_data;
fd9c7bc5 6476 if (hrtimer_try_to_cancel(&io->timer) == -1)
fbd15848 6477 return ERR_PTR(-EALREADY);
a71976f3 6478 list_del_init(&req->timeout.list);
fbd15848
PB
6479 return req;
6480}
47f46768 6481
fbd15848 6482static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
ec3c3d0f 6483 __must_hold(&ctx->completion_lock)
89850fce 6484 __must_hold(&ctx->timeout_lock)
fbd15848
PB
6485{
6486 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
6487
6488 if (IS_ERR(req))
6489 return PTR_ERR(req);
6695490d 6490 io_req_task_queue_fail(req, -ECANCELED);
f254ac04
JA
6491 return 0;
6492}
6493
50c1df2b
JA
6494static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
6495{
6496 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
6497 case IORING_TIMEOUT_BOOTTIME:
6498 return CLOCK_BOOTTIME;
6499 case IORING_TIMEOUT_REALTIME:
6500 return CLOCK_REALTIME;
6501 default:
6502 /* can't happen, vetted at prep time */
6503 WARN_ON_ONCE(1);
6504 fallthrough;
6505 case 0:
6506 return CLOCK_MONOTONIC;
6507 }
6508}
6509
f1042b6c
PB
6510static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
6511 struct timespec64 *ts, enum hrtimer_mode mode)
6512 __must_hold(&ctx->timeout_lock)
6513{
6514 struct io_timeout_data *io;
6515 struct io_kiocb *req;
6516 bool found = false;
6517
6518 list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) {
cef216fc 6519 found = user_data == req->cqe.user_data;
f1042b6c
PB
6520 if (found)
6521 break;
6522 }
6523 if (!found)
6524 return -ENOENT;
6525
6526 io = req->async_data;
6527 if (hrtimer_try_to_cancel(&io->timer) == -1)
6528 return -EALREADY;
6529 hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
6530 io->timer.function = io_link_timeout_fn;
6531 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
6532 return 0;
6533}
6534
9c8e11b3
PB
6535static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
6536 struct timespec64 *ts, enum hrtimer_mode mode)
89850fce 6537 __must_hold(&ctx->timeout_lock)
47f46768 6538{
9c8e11b3
PB
6539 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
6540 struct io_timeout_data *data;
47f46768 6541
9c8e11b3
PB
6542 if (IS_ERR(req))
6543 return PTR_ERR(req);
47f46768 6544
9c8e11b3
PB
6545 req->timeout.off = 0; /* noseq */
6546 data = req->async_data;
6547 list_add_tail(&req->timeout.list, &ctx->timeout_list);
50c1df2b 6548 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
9c8e11b3
PB
6549 data->timer.function = io_timeout_fn;
6550 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
6551 return 0;
47f46768
JA
6552}
6553
3529d8c2
JA
6554static int io_timeout_remove_prep(struct io_kiocb *req,
6555 const struct io_uring_sqe *sqe)
b29472ee 6556{
9c8e11b3
PB
6557 struct io_timeout_rem *tr = &req->timeout_rem;
6558
b29472ee
JA
6559 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6560 return -EINVAL;
61710e43
DA
6561 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6562 return -EINVAL;
26578cda 6563 if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
b29472ee
JA
6564 return -EINVAL;
6565
f1042b6c 6566 tr->ltimeout = false;
9c8e11b3
PB
6567 tr->addr = READ_ONCE(sqe->addr);
6568 tr->flags = READ_ONCE(sqe->timeout_flags);
f1042b6c
PB
6569 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
6570 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
6571 return -EINVAL;
6572 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
6573 tr->ltimeout = true;
6574 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
9c8e11b3
PB
6575 return -EINVAL;
6576 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
6577 return -EFAULT;
2087009c
YB
6578 if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0)
6579 return -EINVAL;
9c8e11b3
PB
6580 } else if (tr->flags) {
6581 /* timeout removal doesn't support flags */
b29472ee 6582 return -EINVAL;
9c8e11b3 6583 }
b29472ee 6584
b29472ee
JA
6585 return 0;
6586}
6587
8662daec
PB
6588static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
6589{
6590 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
6591 : HRTIMER_MODE_REL;
6592}
6593
11365043
JA
6594/*
6595 * Remove or update an existing timeout command
6596 */
61e98203 6597static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
11365043 6598{
9c8e11b3 6599 struct io_timeout_rem *tr = &req->timeout_rem;
11365043 6600 struct io_ring_ctx *ctx = req->ctx;
47f46768 6601 int ret;
11365043 6602
ec3c3d0f
PB
6603 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
6604 spin_lock(&ctx->completion_lock);
6605 spin_lock_irq(&ctx->timeout_lock);
9c8e11b3 6606 ret = io_timeout_cancel(ctx, tr->addr);
ec3c3d0f
PB
6607 spin_unlock_irq(&ctx->timeout_lock);
6608 spin_unlock(&ctx->completion_lock);
6609 } else {
f1042b6c
PB
6610 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
6611
ec3c3d0f 6612 spin_lock_irq(&ctx->timeout_lock);
f1042b6c
PB
6613 if (tr->ltimeout)
6614 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
6615 else
6616 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
ec3c3d0f
PB
6617 spin_unlock_irq(&ctx->timeout_lock);
6618 }
11365043 6619
4e88d6e7 6620 if (ret < 0)
93d2bcd2 6621 req_set_fail(req);
505657bc 6622 io_req_complete_post(req, ret, 0);
11365043 6623 return 0;
5262f567
JA
6624}
6625
3529d8c2 6626static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2d28390a 6627 bool is_timeout_link)
5262f567 6628{
ad8a48ac 6629 struct io_timeout_data *data;
a41525ab 6630 unsigned flags;
56080b02 6631 u32 off = READ_ONCE(sqe->off);
5262f567 6632
ad8a48ac 6633 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5262f567 6634 return -EINVAL;
26578cda
PB
6635 if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
6636 sqe->splice_fd_in)
a41525ab 6637 return -EINVAL;
56080b02 6638 if (off && is_timeout_link)
2d28390a 6639 return -EINVAL;
a41525ab 6640 flags = READ_ONCE(sqe->timeout_flags);
6224590d
PB
6641 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK |
6642 IORING_TIMEOUT_ETIME_SUCCESS))
50c1df2b
JA
6643 return -EINVAL;
6644 /* more than one clock specified is invalid, obviously */
6645 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
5262f567 6646 return -EINVAL;
bdf20073 6647
ef9dd637 6648 INIT_LIST_HEAD(&req->timeout.list);
bfe68a22 6649 req->timeout.off = off;
f18ee4cf
PB
6650 if (unlikely(off && !req->ctx->off_timeout_used))
6651 req->ctx->off_timeout_used = true;
26a61679 6652
d6a644a7
PB
6653 if (WARN_ON_ONCE(req_has_async_data(req)))
6654 return -EFAULT;
6655 if (io_alloc_async_data(req))
26a61679
JA
6656 return -ENOMEM;
6657
e8c2bc1f 6658 data = req->async_data;
ad8a48ac 6659 data->req = req;
50c1df2b 6660 data->flags = flags;
ad8a48ac
JA
6661
6662 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
6663 return -EFAULT;
6664
f6223ff7
YB
6665 if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
6666 return -EINVAL;
6667
e677edbc 6668 INIT_LIST_HEAD(&req->timeout.list);
8662daec 6669 data->mode = io_translate_timeout_mode(flags);
50c1df2b 6670 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
b97e736a
PB
6671
6672 if (is_timeout_link) {
6673 struct io_submit_link *link = &req->ctx->submit_state.link;
6674
6675 if (!link->head)
6676 return -EINVAL;
6677 if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
6678 return -EINVAL;
4d13d1a4
PB
6679 req->timeout.head = link->last;
6680 link->last->flags |= REQ_F_ARM_LTIMEOUT;
b97e736a 6681 }
ad8a48ac
JA
6682 return 0;
6683}
6684
61e98203 6685static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
ad8a48ac 6686{
ad8a48ac 6687 struct io_ring_ctx *ctx = req->ctx;
e8c2bc1f 6688 struct io_timeout_data *data = req->async_data;
ad8a48ac 6689 struct list_head *entry;
bfe68a22 6690 u32 tail, off = req->timeout.off;
ad8a48ac 6691
89850fce 6692 spin_lock_irq(&ctx->timeout_lock);
93bd25bb 6693
5262f567
JA
6694 /*
6695 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
6696 * timeout event to be satisfied. If it isn't set, then this is
6697 * a pure timeout request, sequence isn't used.
5262f567 6698 */
8eb7e2d0 6699 if (io_is_timeout_noseq(req)) {
93bd25bb
JA
6700 entry = ctx->timeout_list.prev;
6701 goto add;
6702 }
5262f567 6703
bfe68a22
PB
6704 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
6705 req->timeout.target_seq = tail + off;
5262f567 6706
f010505b
MDG
6707 /* Update the last seq here in case io_flush_timeouts() hasn't.
6708 * This is safe because ->completion_lock is held, and submissions
6709 * and completions are never mixed in the same ->completion_lock section.
6710 */
6711 ctx->cq_last_tm_flush = tail;
6712
5262f567
JA
6713 /*
6714 * Insertion sort, ensuring the first entry in the list is always
6715 * the one we need first.
6716 */
5262f567 6717 list_for_each_prev(entry, &ctx->timeout_list) {
135fcde8
PB
6718 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
6719 timeout.list);
5262f567 6720
8eb7e2d0 6721 if (io_is_timeout_noseq(nxt))
93bd25bb 6722 continue;
bfe68a22
PB
6723 /* nxt.seq is behind @tail, otherwise would've been completed */
6724 if (off >= nxt->timeout.target_seq - tail)
5262f567
JA
6725 break;
6726 }
93bd25bb 6727add:
135fcde8 6728 list_add(&req->timeout.list, entry);
ad8a48ac
JA
6729 data->timer.function = io_timeout_fn;
6730 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
89850fce 6731 spin_unlock_irq(&ctx->timeout_lock);
5262f567
JA
6732 return 0;
6733}
5262f567 6734
f458dd84
PB
6735struct io_cancel_data {
6736 struct io_ring_ctx *ctx;
6737 u64 user_data;
6738};
6739
62755e35
JA
6740static bool io_cancel_cb(struct io_wq_work *work, void *data)
6741{
6742 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
f458dd84 6743 struct io_cancel_data *cd = data;
62755e35 6744
cef216fc 6745 return req->ctx == cd->ctx && req->cqe.user_data == cd->user_data;
62755e35
JA
6746}
6747
f458dd84
PB
6748static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
6749 struct io_ring_ctx *ctx)
62755e35 6750{
f458dd84 6751 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
62755e35 6752 enum io_wq_cancel cancel_ret;
62755e35
JA
6753 int ret = 0;
6754
f458dd84 6755 if (!tctx || !tctx->io_wq)
5aa75ed5
JA
6756 return -ENOENT;
6757
f458dd84 6758 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
62755e35
JA
6759 switch (cancel_ret) {
6760 case IO_WQ_CANCEL_OK:
6761 ret = 0;
6762 break;
6763 case IO_WQ_CANCEL_RUNNING:
6764 ret = -EALREADY;
6765 break;
6766 case IO_WQ_CANCEL_NOTFOUND:
6767 ret = -ENOENT;
6768 break;
6769 }
6770
e977d6d3
JA
6771 return ret;
6772}
6773
8cb01fac 6774static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
47f46768 6775{
8cb01fac 6776 struct io_ring_ctx *ctx = req->ctx;
47f46768
JA
6777 int ret;
6778
dadebc35 6779 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
8cb01fac 6780
f458dd84 6781 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
ccbf7261
JA
6782 /*
6783 * Fall-through even for -EALREADY, as we may have poll armed
6784 * that need unarming.
6785 */
6786 if (!ret)
6787 return 0;
505657bc
PB
6788
6789 spin_lock(&ctx->completion_lock);
ccbf7261
JA
6790 ret = io_poll_cancel(ctx, sqe_addr, false);
6791 if (ret != -ENOENT)
6792 goto out;
6793
79ebeaee 6794 spin_lock_irq(&ctx->timeout_lock);
47f46768 6795 ret = io_timeout_cancel(ctx, sqe_addr);
79ebeaee 6796 spin_unlock_irq(&ctx->timeout_lock);
505657bc
PB
6797out:
6798 spin_unlock(&ctx->completion_lock);
6799 return ret;
47f46768
JA
6800}
6801
3529d8c2
JA
6802static int io_async_cancel_prep(struct io_kiocb *req,
6803 const struct io_uring_sqe *sqe)
e977d6d3 6804{
fbf23849 6805 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
e977d6d3 6806 return -EINVAL;
61710e43
DA
6807 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6808 return -EINVAL;
26578cda
PB
6809 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
6810 sqe->splice_fd_in)
e977d6d3
JA
6811 return -EINVAL;
6812
fbf23849
JA
6813 req->cancel.addr = READ_ONCE(sqe->addr);
6814 return 0;
6815}
6816
61e98203 6817static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
fbf23849
JA
6818{
6819 struct io_ring_ctx *ctx = req->ctx;
58f99373
PB
6820 u64 sqe_addr = req->cancel.addr;
6821 struct io_tctx_node *node;
6822 int ret;
6823
8cb01fac 6824 ret = io_try_cancel_userdata(req, sqe_addr);
58f99373
PB
6825 if (ret != -ENOENT)
6826 goto done;
58f99373
PB
6827
6828 /* slow path, try all io-wq's */
f8929630 6829 io_ring_submit_lock(ctx, issue_flags);
58f99373
PB
6830 ret = -ENOENT;
6831 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
6832 struct io_uring_task *tctx = node->task->io_uring;
fbf23849 6833
58f99373
PB
6834 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
6835 if (ret != -ENOENT)
6836 break;
6837 }
f8929630 6838 io_ring_submit_unlock(ctx, issue_flags);
58f99373 6839done:
58f99373 6840 if (ret < 0)
93d2bcd2 6841 req_set_fail(req);
505657bc 6842 io_req_complete_post(req, ret, 0);
5262f567
JA
6843 return 0;
6844}
6845
269bbe5f 6846static int io_rsrc_update_prep(struct io_kiocb *req,
05f3fb3c
JA
6847 const struct io_uring_sqe *sqe)
6848{
61710e43
DA
6849 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6850 return -EINVAL;
26578cda 6851 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
05f3fb3c
JA
6852 return -EINVAL;
6853
269bbe5f
BM
6854 req->rsrc_update.offset = READ_ONCE(sqe->off);
6855 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
6856 if (!req->rsrc_update.nr_args)
05f3fb3c 6857 return -EINVAL;
269bbe5f 6858 req->rsrc_update.arg = READ_ONCE(sqe->addr);
05f3fb3c
JA
6859 return 0;
6860}
6861
889fca73 6862static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
fbf23849
JA
6863{
6864 struct io_ring_ctx *ctx = req->ctx;
c3bdad02 6865 struct io_uring_rsrc_update2 up;
05f3fb3c 6866 int ret;
fbf23849 6867
269bbe5f
BM
6868 up.offset = req->rsrc_update.offset;
6869 up.data = req->rsrc_update.arg;
c3bdad02
PB
6870 up.nr = 0;
6871 up.tags = 0;
615cee49 6872 up.resv = 0;
d8a3ba9c 6873 up.resv2 = 0;
05f3fb3c 6874
f8929630 6875 io_ring_submit_lock(ctx, issue_flags);
fdecb662 6876 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
98f0b3b4 6877 &up, req->rsrc_update.nr_args);
f8929630 6878 io_ring_submit_unlock(ctx, issue_flags);
05f3fb3c
JA
6879
6880 if (ret < 0)
93d2bcd2 6881 req_set_fail(req);
889fca73 6882 __io_req_complete(req, issue_flags, ret, 0);
5262f567
JA
6883 return 0;
6884}
6885
bfe76559 6886static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1 6887{
d625c6ee 6888 switch (req->opcode) {
e781573e 6889 case IORING_OP_NOP:
bfe76559 6890 return 0;
f67676d1
JA
6891 case IORING_OP_READV:
6892 case IORING_OP_READ_FIXED:
3a6820f2 6893 case IORING_OP_READ:
f67676d1
JA
6894 case IORING_OP_WRITEV:
6895 case IORING_OP_WRITE_FIXED:
3a6820f2 6896 case IORING_OP_WRITE:
584b0180 6897 return io_prep_rw(req, sqe);
0969e783 6898 case IORING_OP_POLL_ADD:
bfe76559 6899 return io_poll_add_prep(req, sqe);
0969e783 6900 case IORING_OP_POLL_REMOVE:
c5de0036 6901 return io_poll_update_prep(req, sqe);
8ed8d3c3 6902 case IORING_OP_FSYNC:
1155c76a 6903 return io_fsync_prep(req, sqe);
8ed8d3c3 6904 case IORING_OP_SYNC_FILE_RANGE:
1155c76a 6905 return io_sfr_prep(req, sqe);
03b1230c 6906 case IORING_OP_SENDMSG:
fddaface 6907 case IORING_OP_SEND:
bfe76559 6908 return io_sendmsg_prep(req, sqe);
03b1230c 6909 case IORING_OP_RECVMSG:
fddaface 6910 case IORING_OP_RECV:
bfe76559 6911 return io_recvmsg_prep(req, sqe);
f499a021 6912 case IORING_OP_CONNECT:
bfe76559 6913 return io_connect_prep(req, sqe);
2d28390a 6914 case IORING_OP_TIMEOUT:
bfe76559 6915 return io_timeout_prep(req, sqe, false);
b29472ee 6916 case IORING_OP_TIMEOUT_REMOVE:
bfe76559 6917 return io_timeout_remove_prep(req, sqe);
fbf23849 6918 case IORING_OP_ASYNC_CANCEL:
bfe76559 6919 return io_async_cancel_prep(req, sqe);
2d28390a 6920 case IORING_OP_LINK_TIMEOUT:
bfe76559 6921 return io_timeout_prep(req, sqe, true);
8ed8d3c3 6922 case IORING_OP_ACCEPT:
bfe76559 6923 return io_accept_prep(req, sqe);
d63d1b5e 6924 case IORING_OP_FALLOCATE:
bfe76559 6925 return io_fallocate_prep(req, sqe);
15b71abe 6926 case IORING_OP_OPENAT:
bfe76559 6927 return io_openat_prep(req, sqe);
b5dba59e 6928 case IORING_OP_CLOSE:
bfe76559 6929 return io_close_prep(req, sqe);
05f3fb3c 6930 case IORING_OP_FILES_UPDATE:
269bbe5f 6931 return io_rsrc_update_prep(req, sqe);
eddc7ef5 6932 case IORING_OP_STATX:
bfe76559 6933 return io_statx_prep(req, sqe);
4840e418 6934 case IORING_OP_FADVISE:
bfe76559 6935 return io_fadvise_prep(req, sqe);
c1ca757b 6936 case IORING_OP_MADVISE:
bfe76559 6937 return io_madvise_prep(req, sqe);
cebdb986 6938 case IORING_OP_OPENAT2:
bfe76559 6939 return io_openat2_prep(req, sqe);
3e4827b0 6940 case IORING_OP_EPOLL_CTL:
bfe76559 6941 return io_epoll_ctl_prep(req, sqe);
7d67af2c 6942 case IORING_OP_SPLICE:
bfe76559 6943 return io_splice_prep(req, sqe);
ddf0322d 6944 case IORING_OP_PROVIDE_BUFFERS:
bfe76559 6945 return io_provide_buffers_prep(req, sqe);
067524e9 6946 case IORING_OP_REMOVE_BUFFERS:
bfe76559 6947 return io_remove_buffers_prep(req, sqe);
f2a8d5c7 6948 case IORING_OP_TEE:
bfe76559 6949 return io_tee_prep(req, sqe);
36f4fa68
JA
6950 case IORING_OP_SHUTDOWN:
6951 return io_shutdown_prep(req, sqe);
80a261fd
JA
6952 case IORING_OP_RENAMEAT:
6953 return io_renameat_prep(req, sqe);
14a1143b
JA
6954 case IORING_OP_UNLINKAT:
6955 return io_unlinkat_prep(req, sqe);
e34a02dc
DK
6956 case IORING_OP_MKDIRAT:
6957 return io_mkdirat_prep(req, sqe);
7a8721f8
DK
6958 case IORING_OP_SYMLINKAT:
6959 return io_symlinkat_prep(req, sqe);
cf30da90
DK
6960 case IORING_OP_LINKAT:
6961 return io_linkat_prep(req, sqe);
4f57f06c
JA
6962 case IORING_OP_MSG_RING:
6963 return io_msg_ring_prep(req, sqe);
f67676d1
JA
6964 }
6965
bfe76559
PB
6966 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
6967 req->opcode);
bd54b6fe 6968 return -EINVAL;
bfe76559
PB
6969}
6970
93642ef8 6971static int io_req_prep_async(struct io_kiocb *req)
bfe76559 6972{
b7e298d2
PB
6973 if (!io_op_defs[req->opcode].needs_async_setup)
6974 return 0;
d886e185 6975 if (WARN_ON_ONCE(req_has_async_data(req)))
b7e298d2
PB
6976 return -EFAULT;
6977 if (io_alloc_async_data(req))
6978 return -EAGAIN;
6979
93642ef8
PB
6980 switch (req->opcode) {
6981 case IORING_OP_READV:
93642ef8
PB
6982 return io_rw_prep_async(req, READ);
6983 case IORING_OP_WRITEV:
93642ef8
PB
6984 return io_rw_prep_async(req, WRITE);
6985 case IORING_OP_SENDMSG:
93642ef8
PB
6986 return io_sendmsg_prep_async(req);
6987 case IORING_OP_RECVMSG:
93642ef8
PB
6988 return io_recvmsg_prep_async(req);
6989 case IORING_OP_CONNECT:
6990 return io_connect_prep_async(req);
6991 }
b7e298d2
PB
6992 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
6993 req->opcode);
6994 return -EFAULT;
f67676d1
JA
6995}
6996
9cf7c104
PB
6997static u32 io_get_sequence(struct io_kiocb *req)
6998{
a3dbdf54 6999 u32 seq = req->ctx->cached_sq_head;
963c6abb 7000 struct io_kiocb *cur;
9cf7c104 7001
a3dbdf54 7002 /* need original cached_sq_head, but it was increased for each req */
963c6abb 7003 io_for_each_link(cur, req)
a3dbdf54
PB
7004 seq--;
7005 return seq;
9cf7c104
PB
7006}
7007
c072481d 7008static __cold void io_drain_req(struct io_kiocb *req)
de0617e4 7009{
a197f664 7010 struct io_ring_ctx *ctx = req->ctx;
27dc8338 7011 struct io_defer_entry *de;
f67676d1 7012 int ret;
e0eb71dc 7013 u32 seq = io_get_sequence(req);
3c19966d 7014
9d858b21 7015 /* Still need defer if there is pending req in defer list. */
e302f104 7016 spin_lock(&ctx->completion_lock);
5e371265 7017 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
e302f104 7018 spin_unlock(&ctx->completion_lock);
e0eb71dc 7019queue:
10c66904 7020 ctx->drain_active = false;
e0eb71dc
PB
7021 io_req_task_queue(req);
7022 return;
10c66904 7023 }
e302f104 7024 spin_unlock(&ctx->completion_lock);
9cf7c104 7025
b7e298d2 7026 ret = io_req_prep_async(req);
e0eb71dc
PB
7027 if (ret) {
7028fail:
7029 io_req_complete_failed(req, ret);
7030 return;
7031 }
cbdcb435 7032 io_prep_async_link(req);
27dc8338 7033 de = kmalloc(sizeof(*de), GFP_KERNEL);
76cc33d7 7034 if (!de) {
1b48773f 7035 ret = -ENOMEM;
e0eb71dc 7036 goto fail;
76cc33d7 7037 }
2d28390a 7038
79ebeaee 7039 spin_lock(&ctx->completion_lock);
9cf7c104 7040 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
79ebeaee 7041 spin_unlock(&ctx->completion_lock);
27dc8338 7042 kfree(de);
e0eb71dc 7043 goto queue;
de0617e4
JA
7044 }
7045
cef216fc 7046 trace_io_uring_defer(ctx, req, req->cqe.user_data, req->opcode);
27dc8338 7047 de->req = req;
9cf7c104 7048 de->seq = seq;
27dc8338 7049 list_add_tail(&de->list, &ctx->defer_list);
79ebeaee 7050 spin_unlock(&ctx->completion_lock);
de0617e4
JA
7051}
7052
68fb8979 7053static void io_clean_op(struct io_kiocb *req)
99bc4c38 7054{
8197b053
PB
7055 if (req->flags & REQ_F_BUFFER_SELECTED) {
7056 spin_lock(&req->ctx->completion_lock);
cc3cec83 7057 io_put_kbuf_comp(req);
8197b053
PB
7058 spin_unlock(&req->ctx->completion_lock);
7059 }
99bc4c38 7060
0e1b6fe3
PB
7061 if (req->flags & REQ_F_NEED_CLEANUP) {
7062 switch (req->opcode) {
7063 case IORING_OP_READV:
7064 case IORING_OP_READ_FIXED:
7065 case IORING_OP_READ:
7066 case IORING_OP_WRITEV:
7067 case IORING_OP_WRITE_FIXED:
e8c2bc1f
JA
7068 case IORING_OP_WRITE: {
7069 struct io_async_rw *io = req->async_data;
1dacb4df
PB
7070
7071 kfree(io->free_iovec);
0e1b6fe3 7072 break;
e8c2bc1f 7073 }
0e1b6fe3 7074 case IORING_OP_RECVMSG:
e8c2bc1f
JA
7075 case IORING_OP_SENDMSG: {
7076 struct io_async_msghdr *io = req->async_data;
257e84a5
PB
7077
7078 kfree(io->free_iov);
0e1b6fe3 7079 break;
e8c2bc1f 7080 }
f3cd4850
JA
7081 case IORING_OP_OPENAT:
7082 case IORING_OP_OPENAT2:
7083 if (req->open.filename)
7084 putname(req->open.filename);
7085 break;
80a261fd
JA
7086 case IORING_OP_RENAMEAT:
7087 putname(req->rename.oldpath);
7088 putname(req->rename.newpath);
7089 break;
14a1143b
JA
7090 case IORING_OP_UNLINKAT:
7091 putname(req->unlink.filename);
7092 break;
e34a02dc
DK
7093 case IORING_OP_MKDIRAT:
7094 putname(req->mkdir.filename);
7095 break;
7a8721f8
DK
7096 case IORING_OP_SYMLINKAT:
7097 putname(req->symlink.oldpath);
7098 putname(req->symlink.newpath);
7099 break;
cf30da90
DK
7100 case IORING_OP_LINKAT:
7101 putname(req->hardlink.oldpath);
7102 putname(req->hardlink.newpath);
7103 break;
1b6fe6e0
SR
7104 case IORING_OP_STATX:
7105 if (req->statx.filename)
7106 putname(req->statx.filename);
7107 break;
0e1b6fe3 7108 }
99bc4c38 7109 }
75652a30
JA
7110 if ((req->flags & REQ_F_POLLED) && req->apoll) {
7111 kfree(req->apoll->double_poll);
7112 kfree(req->apoll);
7113 req->apoll = NULL;
7114 }
c854357b 7115 if (req->flags & REQ_F_CREDS)
b8e64b53 7116 put_cred(req->creds);
d886e185
PB
7117 if (req->flags & REQ_F_ASYNC_DATA) {
7118 kfree(req->async_data);
7119 req->async_data = NULL;
7120 }
c854357b 7121 req->flags &= ~IO_REQ_CLEAN_FLAGS;
99bc4c38
PB
7122}
7123
6bf9c47a
JA
7124static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
7125{
7126 if (req->file || !io_op_defs[req->opcode].needs_file)
7127 return true;
7128
7129 if (req->flags & REQ_F_FIXED_FILE)
cef216fc 7130 req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags);
6bf9c47a 7131 else
cef216fc 7132 req->file = io_file_get_normal(req, req->cqe.fd);
6bf9c47a
JA
7133 if (req->file)
7134 return true;
7135
7136 req_set_fail(req);
cef216fc 7137 req->cqe.res = -EBADF;
6bf9c47a
JA
7138 return false;
7139}
7140
889fca73 7141static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1 7142{
5730b27e 7143 const struct cred *creds = NULL;
d625c6ee 7144 int ret;
2b188cc1 7145
70152140
JA
7146 if (unlikely(!io_assign_file(req, issue_flags)))
7147 return -EBADF;
7148
6878b40e 7149 if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
c10d1f98 7150 creds = override_creds(req->creds);
5730b27e 7151
5bd2182d
PM
7152 if (!io_op_defs[req->opcode].audit_skip)
7153 audit_uring_entry(req->opcode);
7154
d625c6ee 7155 switch (req->opcode) {
2b188cc1 7156 case IORING_OP_NOP:
889fca73 7157 ret = io_nop(req, issue_flags);
2b188cc1
JA
7158 break;
7159 case IORING_OP_READV:
edafccee 7160 case IORING_OP_READ_FIXED:
3a6820f2 7161 case IORING_OP_READ:
889fca73 7162 ret = io_read(req, issue_flags);
edafccee 7163 break;
3529d8c2 7164 case IORING_OP_WRITEV:
edafccee 7165 case IORING_OP_WRITE_FIXED:
3a6820f2 7166 case IORING_OP_WRITE:
889fca73 7167 ret = io_write(req, issue_flags);
2b188cc1 7168 break;
c992fe29 7169 case IORING_OP_FSYNC:
45d189c6 7170 ret = io_fsync(req, issue_flags);
c992fe29 7171 break;
221c5eb2 7172 case IORING_OP_POLL_ADD:
61e98203 7173 ret = io_poll_add(req, issue_flags);
221c5eb2
JA
7174 break;
7175 case IORING_OP_POLL_REMOVE:
c5de0036 7176 ret = io_poll_update(req, issue_flags);
221c5eb2 7177 break;
5d17b4a4 7178 case IORING_OP_SYNC_FILE_RANGE:
45d189c6 7179 ret = io_sync_file_range(req, issue_flags);
5d17b4a4 7180 break;
0fa03c62 7181 case IORING_OP_SENDMSG:
889fca73 7182 ret = io_sendmsg(req, issue_flags);
062d04d7 7183 break;
fddaface 7184 case IORING_OP_SEND:
889fca73 7185 ret = io_send(req, issue_flags);
0fa03c62 7186 break;
aa1fa28f 7187 case IORING_OP_RECVMSG:
889fca73 7188 ret = io_recvmsg(req, issue_flags);
062d04d7 7189 break;
fddaface 7190 case IORING_OP_RECV:
889fca73 7191 ret = io_recv(req, issue_flags);
aa1fa28f 7192 break;
5262f567 7193 case IORING_OP_TIMEOUT:
61e98203 7194 ret = io_timeout(req, issue_flags);
5262f567 7195 break;
11365043 7196 case IORING_OP_TIMEOUT_REMOVE:
61e98203 7197 ret = io_timeout_remove(req, issue_flags);
11365043 7198 break;
17f2fe35 7199 case IORING_OP_ACCEPT:
889fca73 7200 ret = io_accept(req, issue_flags);
17f2fe35 7201 break;
f8e85cf2 7202 case IORING_OP_CONNECT:
889fca73 7203 ret = io_connect(req, issue_flags);
f8e85cf2 7204 break;
62755e35 7205 case IORING_OP_ASYNC_CANCEL:
61e98203 7206 ret = io_async_cancel(req, issue_flags);
62755e35 7207 break;
d63d1b5e 7208 case IORING_OP_FALLOCATE:
45d189c6 7209 ret = io_fallocate(req, issue_flags);
d63d1b5e 7210 break;
15b71abe 7211 case IORING_OP_OPENAT:
45d189c6 7212 ret = io_openat(req, issue_flags);
15b71abe 7213 break;
b5dba59e 7214 case IORING_OP_CLOSE:
889fca73 7215 ret = io_close(req, issue_flags);
b5dba59e 7216 break;
05f3fb3c 7217 case IORING_OP_FILES_UPDATE:
889fca73 7218 ret = io_files_update(req, issue_flags);
05f3fb3c 7219 break;
eddc7ef5 7220 case IORING_OP_STATX:
45d189c6 7221 ret = io_statx(req, issue_flags);
eddc7ef5 7222 break;
4840e418 7223 case IORING_OP_FADVISE:
45d189c6 7224 ret = io_fadvise(req, issue_flags);
4840e418 7225 break;
c1ca757b 7226 case IORING_OP_MADVISE:
45d189c6 7227 ret = io_madvise(req, issue_flags);
c1ca757b 7228 break;
cebdb986 7229 case IORING_OP_OPENAT2:
45d189c6 7230 ret = io_openat2(req, issue_flags);
cebdb986 7231 break;
3e4827b0 7232 case IORING_OP_EPOLL_CTL:
889fca73 7233 ret = io_epoll_ctl(req, issue_flags);
3e4827b0 7234 break;
7d67af2c 7235 case IORING_OP_SPLICE:
45d189c6 7236 ret = io_splice(req, issue_flags);
7d67af2c 7237 break;
ddf0322d 7238 case IORING_OP_PROVIDE_BUFFERS:
889fca73 7239 ret = io_provide_buffers(req, issue_flags);
ddf0322d 7240 break;
067524e9 7241 case IORING_OP_REMOVE_BUFFERS:
889fca73 7242 ret = io_remove_buffers(req, issue_flags);
3e4827b0 7243 break;
f2a8d5c7 7244 case IORING_OP_TEE:
45d189c6 7245 ret = io_tee(req, issue_flags);
f2a8d5c7 7246 break;
36f4fa68 7247 case IORING_OP_SHUTDOWN:
45d189c6 7248 ret = io_shutdown(req, issue_flags);
36f4fa68 7249 break;
80a261fd 7250 case IORING_OP_RENAMEAT:
45d189c6 7251 ret = io_renameat(req, issue_flags);
80a261fd 7252 break;
14a1143b 7253 case IORING_OP_UNLINKAT:
45d189c6 7254 ret = io_unlinkat(req, issue_flags);
14a1143b 7255 break;
e34a02dc
DK
7256 case IORING_OP_MKDIRAT:
7257 ret = io_mkdirat(req, issue_flags);
7258 break;
7a8721f8
DK
7259 case IORING_OP_SYMLINKAT:
7260 ret = io_symlinkat(req, issue_flags);
7261 break;
cf30da90
DK
7262 case IORING_OP_LINKAT:
7263 ret = io_linkat(req, issue_flags);
7264 break;
4f57f06c
JA
7265 case IORING_OP_MSG_RING:
7266 ret = io_msg_ring(req, issue_flags);
7267 break;
2b188cc1
JA
7268 default:
7269 ret = -EINVAL;
7270 break;
7271 }
7272
5bd2182d
PM
7273 if (!io_op_defs[req->opcode].audit_skip)
7274 audit_uring_exit(!ret, ret);
7275
5730b27e
JA
7276 if (creds)
7277 revert_creds(creds);
def596e9
JA
7278 if (ret)
7279 return ret;
b532576e 7280 /* If the op doesn't have a file, we're not polling for it */
9983028e 7281 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file)
9882131c 7282 io_iopoll_req_issued(req, issue_flags);
def596e9
JA
7283
7284 return 0;
2b188cc1
JA
7285}
7286
ebc11b6c
PB
7287static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
7288{
7289 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7290
7291 req = io_put_req_find_next(req);
7292 return req ? &req->work : NULL;
7293}
7294
5280f7e5 7295static void io_wq_submit_work(struct io_wq_work *work)
2b188cc1
JA
7296{
7297 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6bf9c47a 7298 const struct io_op_def *def = &io_op_defs[req->opcode];
d01905db
PB
7299 unsigned int issue_flags = IO_URING_F_UNLOCKED;
7300 bool needs_poll = false;
6df1db6b 7301 struct io_kiocb *timeout;
6bf9c47a 7302 int ret = 0, err = -ECANCELED;
2b188cc1 7303
48dcd38d
PB
7304 /* one will be dropped by ->io_free_work() after returning to io-wq */
7305 if (!(req->flags & REQ_F_REFCOUNT))
7306 __io_req_set_refcount(req, 2);
7307 else
7308 req_ref_get(req);
5d5901a3 7309
6df1db6b
PB
7310 timeout = io_prep_linked_timeout(req);
7311 if (timeout)
7312 io_queue_linked_timeout(timeout);
d4c81f38 7313
6bf9c47a 7314
dadebc35 7315 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
d01905db 7316 if (work->flags & IO_WQ_WORK_CANCEL) {
0f8da75b 7317fail:
6bf9c47a 7318 io_req_task_queue_fail(req, err);
d01905db
PB
7319 return;
7320 }
0f8da75b
PB
7321 if (!io_assign_file(req, issue_flags)) {
7322 err = -EBADF;
7323 work->flags |= IO_WQ_WORK_CANCEL;
7324 goto fail;
7325 }
31b51510 7326
d01905db 7327 if (req->flags & REQ_F_FORCE_ASYNC) {
afb7f56f
PB
7328 bool opcode_poll = def->pollin || def->pollout;
7329
7330 if (opcode_poll && file_can_poll(req->file)) {
7331 needs_poll = true;
d01905db 7332 issue_flags |= IO_URING_F_NONBLOCK;
afb7f56f 7333 }
561fb04a 7334 }
31b51510 7335
d01905db
PB
7336 do {
7337 ret = io_issue_sqe(req, issue_flags);
7338 if (ret != -EAGAIN)
7339 break;
7340 /*
7341 * We can get EAGAIN for iopolled IO even though we're
7342 * forcing a sync submission from here, since we can't
7343 * wait for request slots on the block side.
7344 */
7345 if (!needs_poll) {
7346 cond_resched();
7347 continue;
90fa0288
HX
7348 }
7349
4d9237e3 7350 if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK)
d01905db
PB
7351 return;
7352 /* aborted or ready, in either case retry blocking */
7353 needs_poll = false;
7354 issue_flags &= ~IO_URING_F_NONBLOCK;
7355 } while (1);
31b51510 7356
a3df7698 7357 /* avoid locking problems by failing it from a clean context */
5d5901a3 7358 if (ret)
a3df7698 7359 io_req_task_queue_fail(req, ret);
2b188cc1
JA
7360}
7361
aeca241b 7362static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
042b0d85 7363 unsigned i)
65e19f54 7364{
042b0d85 7365 return &table->files[i];
dafecf19
PB
7366}
7367
65e19f54
JA
7368static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
7369 int index)
7370{
aeca241b 7371 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
65e19f54 7372
a04b0ac0 7373 return (struct file *) (slot->file_ptr & FFS_MASK);
65e19f54
JA
7374}
7375
a04b0ac0 7376static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
9a321c98
PB
7377{
7378 unsigned long file_ptr = (unsigned long) file;
7379
88459b50 7380 file_ptr |= io_file_get_flags(file);
a04b0ac0 7381 file_slot->file_ptr = file_ptr;
65e19f54
JA
7382}
7383
5106dd6e
JA
7384static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
7385 unsigned int issue_flags)
09bb8394 7386{
5106dd6e
JA
7387 struct io_ring_ctx *ctx = req->ctx;
7388 struct file *file = NULL;
ac177053 7389 unsigned long file_ptr;
09bb8394 7390
5106dd6e
JA
7391 if (issue_flags & IO_URING_F_UNLOCKED)
7392 mutex_lock(&ctx->uring_lock);
7393
ac177053 7394 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
5106dd6e 7395 goto out;
ac177053
PB
7396 fd = array_index_nospec(fd, ctx->nr_user_files);
7397 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
7398 file = (struct file *) (file_ptr & FFS_MASK);
7399 file_ptr &= ~FFS_MASK;
7400 /* mask in overlapping REQ_F and FFS bits */
35645ac3 7401 req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
5106dd6e
JA
7402 io_req_set_rsrc_node(req, ctx, 0);
7403out:
7404 if (issue_flags & IO_URING_F_UNLOCKED)
7405 mutex_unlock(&ctx->uring_lock);
ac177053
PB
7406 return file;
7407}
d44f554e 7408
d5361233
JA
7409/*
7410 * Drop the file for requeue operations. Only used of req->file is the
7411 * io_uring descriptor itself.
7412 */
7413static void io_drop_inflight_file(struct io_kiocb *req)
7414{
7415 if (unlikely(req->flags & REQ_F_INFLIGHT)) {
7416 fput(req->file);
7417 req->file = NULL;
7418 req->flags &= ~REQ_F_INFLIGHT;
7419 }
7420}
7421
5106dd6e 7422static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
ac177053 7423{
62906e89 7424 struct file *file = fget(fd);
ac177053 7425
cef216fc 7426 trace_io_uring_file_get(req->ctx, req, req->cqe.user_data, fd);
09bb8394 7427
ac177053 7428 /* we don't allow fixed io_uring files */
d5361233
JA
7429 if (file && file->f_op == &io_uring_fops)
7430 req->flags |= REQ_F_INFLIGHT;
8371adf5 7431 return file;
09bb8394
JA
7432}
7433
f237c30a 7434static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
89b263f6
JA
7435{
7436 struct io_kiocb *prev = req->timeout.prev;
617a8948 7437 int ret = -ENOENT;
89b263f6
JA
7438
7439 if (prev) {
617a8948 7440 if (!(req->task->flags & PF_EXITING))
cef216fc 7441 ret = io_try_cancel_userdata(req, prev->cqe.user_data);
505657bc 7442 io_req_complete_post(req, ret ?: -ETIME, 0);
89b263f6 7443 io_put_req(prev);
89b263f6
JA
7444 } else {
7445 io_req_complete_post(req, -ETIME, 0);
7446 }
7447}
7448
2665abfd 7449static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 7450{
ad8a48ac
JA
7451 struct io_timeout_data *data = container_of(timer,
7452 struct io_timeout_data, timer);
90cd7e42 7453 struct io_kiocb *prev, *req = data->req;
2665abfd 7454 struct io_ring_ctx *ctx = req->ctx;
2665abfd 7455 unsigned long flags;
2665abfd 7456
89b263f6 7457 spin_lock_irqsave(&ctx->timeout_lock, flags);
90cd7e42
PB
7458 prev = req->timeout.head;
7459 req->timeout.head = NULL;
2665abfd
JA
7460
7461 /*
7462 * We don't expect the list to be empty, that will only happen if we
7463 * race with the completion of the linked work.
7464 */
447c19f3 7465 if (prev) {
f2f87370 7466 io_remove_next_linked(prev);
447c19f3
PB
7467 if (!req_ref_inc_not_zero(prev))
7468 prev = NULL;
7469 }
ef9dd637 7470 list_del(&req->timeout.list);
89b263f6
JA
7471 req->timeout.prev = prev;
7472 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
2665abfd 7473
89b263f6 7474 req->io_task_work.func = io_req_task_link_timeout;
4813c377 7475 io_req_task_work_add(req, false);
2665abfd
JA
7476 return HRTIMER_NORESTART;
7477}
7478
de968c18 7479static void io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 7480{
de968c18
PB
7481 struct io_ring_ctx *ctx = req->ctx;
7482
89b263f6 7483 spin_lock_irq(&ctx->timeout_lock);
76a46e06 7484 /*
f2f87370
PB
7485 * If the back reference is NULL, then our linked request finished
7486 * before we got a chance to setup the timer
76a46e06 7487 */
90cd7e42 7488 if (req->timeout.head) {
e8c2bc1f 7489 struct io_timeout_data *data = req->async_data;
94ae5e77 7490
ad8a48ac
JA
7491 data->timer.function = io_link_timeout_fn;
7492 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
7493 data->mode);
ef9dd637 7494 list_add_tail(&req->timeout.list, &ctx->ltimeout_list);
2665abfd 7495 }
89b263f6 7496 spin_unlock_irq(&ctx->timeout_lock);
2665abfd 7497 /* drop submission reference */
76a46e06
JA
7498 io_put_req(req);
7499}
2665abfd 7500
d475a9a6
PB
7501static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
7502 __must_hold(&req->ctx->uring_lock)
7503{
7504 struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
7505
4d9237e3 7506 switch (io_arm_poll_handler(req, 0)) {
d475a9a6 7507 case IO_APOLL_READY:
d475a9a6
PB
7508 io_req_task_queue(req);
7509 break;
7510 case IO_APOLL_ABORTED:
7511 /*
7512 * Queued up for async execution, worker will release
7513 * submit reference when the iocb is actually submitted.
7514 */
7515 io_queue_async_work(req, NULL);
7516 break;
b1c62645 7517 case IO_APOLL_OK:
b1c62645 7518 break;
d475a9a6
PB
7519 }
7520
7521 if (linked_timeout)
7522 io_queue_linked_timeout(linked_timeout);
7523}
7524
7525static inline void __io_queue_sqe(struct io_kiocb *req)
282cdc86 7526 __must_hold(&req->ctx->uring_lock)
2b188cc1 7527{
906c6caa 7528 struct io_kiocb *linked_timeout;
e0c5c576 7529 int ret;
2b188cc1 7530
c5eef2b9 7531 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
193155c8 7532
fff4e40e
PB
7533 if (req->flags & REQ_F_COMPLETE_INLINE) {
7534 io_req_add_compl_list(req);
d9f9d284 7535 return;
fff4e40e 7536 }
491381ce
JA
7537 /*
7538 * We async punt it if the file wasn't marked NOWAIT, or if the file
7539 * doesn't support non-blocking read/write attempts
7540 */
1840038e 7541 if (likely(!ret)) {
906c6caa
PB
7542 linked_timeout = io_prep_linked_timeout(req);
7543 if (linked_timeout)
7544 io_queue_linked_timeout(linked_timeout);
1840038e 7545 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
d475a9a6 7546 io_queue_sqe_arm_apoll(req);
0d63c148 7547 } else {
f41db273 7548 io_req_complete_failed(req, ret);
9e645e11 7549 }
2b188cc1
JA
7550}
7551
4652fe3f 7552static void io_queue_sqe_fallback(struct io_kiocb *req)
282cdc86 7553 __must_hold(&req->ctx->uring_lock)
4fe2c963 7554{
4652fe3f 7555 if (req->flags & REQ_F_FAIL) {
c6d3d9cb 7556 io_req_complete_fail_submit(req);
e0eb71dc
PB
7557 } else if (unlikely(req->ctx->drain_active)) {
7558 io_drain_req(req);
76cc33d7
PB
7559 } else {
7560 int ret = io_req_prep_async(req);
7561
7562 if (unlikely(ret))
7563 io_req_complete_failed(req, ret);
7564 else
f237c30a 7565 io_queue_async_work(req, NULL);
ce35a47a 7566 }
4fe2c963
JL
7567}
7568
4652fe3f
PB
7569static inline void io_queue_sqe(struct io_kiocb *req)
7570 __must_hold(&req->ctx->uring_lock)
7571{
7572 if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))))
7573 __io_queue_sqe(req);
7574 else
7575 io_queue_sqe_fallback(req);
7576}
7577
b16fed66
PB
7578/*
7579 * Check SQE restrictions (opcode and flags).
7580 *
7581 * Returns 'true' if SQE is allowed, 'false' otherwise.
7582 */
7583static inline bool io_check_restriction(struct io_ring_ctx *ctx,
7584 struct io_kiocb *req,
7585 unsigned int sqe_flags)
4fe2c963 7586{
b16fed66
PB
7587 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
7588 return false;
7589
7590 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
7591 ctx->restrictions.sqe_flags_required)
7592 return false;
7593
7594 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
7595 ctx->restrictions.sqe_flags_required))
7596 return false;
7597
7598 return true;
4fe2c963
JL
7599}
7600
22b2ca31
PB
7601static void io_init_req_drain(struct io_kiocb *req)
7602{
7603 struct io_ring_ctx *ctx = req->ctx;
7604 struct io_kiocb *head = ctx->submit_state.link.head;
7605
7606 ctx->drain_active = true;
7607 if (head) {
7608 /*
7609 * If we need to drain a request in the middle of a link, drain
7610 * the head request and the next request/link after the current
7611 * link. Considering sequential execution of links,
b6c7db32 7612 * REQ_F_IO_DRAIN will be maintained for every request of our
22b2ca31
PB
7613 * link.
7614 */
b6c7db32 7615 head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
22b2ca31
PB
7616 ctx->drain_next = true;
7617 }
7618}
7619
b16fed66
PB
7620static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
7621 const struct io_uring_sqe *sqe)
282cdc86 7622 __must_hold(&ctx->uring_lock)
b16fed66 7623{
b16fed66 7624 unsigned int sqe_flags;
fc0ae024 7625 int personality;
4a04d1d1 7626 u8 opcode;
b16fed66 7627
864ea921 7628 /* req is partially pre-initialised, see io_preinit_req() */
4a04d1d1 7629 req->opcode = opcode = READ_ONCE(sqe->opcode);
b16fed66
PB
7630 /* same numerical values with corresponding REQ_F_*, safe to copy */
7631 req->flags = sqe_flags = READ_ONCE(sqe->flags);
cef216fc 7632 req->cqe.user_data = READ_ONCE(sqe->user_data);
b16fed66 7633 req->file = NULL;
b16fed66 7634 req->fixed_rsrc_refs = NULL;
b16fed66 7635 req->task = current;
b16fed66 7636
4a04d1d1
PB
7637 if (unlikely(opcode >= IORING_OP_LAST)) {
7638 req->opcode = 0;
b16fed66 7639 return -EINVAL;
4a04d1d1 7640 }
68fe256a
PB
7641 if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
7642 /* enforce forwards compatibility on users */
7643 if (sqe_flags & ~SQE_VALID_FLAGS)
7644 return -EINVAL;
7645 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
4a04d1d1 7646 !io_op_defs[opcode].buffer_select)
68fe256a 7647 return -EOPNOTSUPP;
5562a8d7
PB
7648 if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
7649 ctx->drain_disabled = true;
7650 if (sqe_flags & IOSQE_IO_DRAIN) {
7651 if (ctx->drain_disabled)
7652 return -EOPNOTSUPP;
22b2ca31 7653 io_init_req_drain(req);
5562a8d7 7654 }
2a56a9bd
PB
7655 }
7656 if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
7657 if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
7658 return -EACCES;
7659 /* knock it to the slow queue path, will be drained there */
7660 if (ctx->drain_active)
7661 req->flags |= REQ_F_FORCE_ASYNC;
7662 /* if there is no link, we're at "next" request and need to drain */
7663 if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
7664 ctx->drain_next = false;
7665 ctx->drain_active = true;
b6c7db32 7666 req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2a56a9bd 7667 }
68fe256a 7668 }
b16fed66 7669
4a04d1d1 7670 if (io_op_defs[opcode].needs_file) {
6d63416d
PB
7671 struct io_submit_state *state = &ctx->submit_state;
7672
cef216fc 7673 req->cqe.fd = READ_ONCE(sqe->fd);
6bf9c47a 7674
6d63416d
PB
7675 /*
7676 * Plug now if we have more than 2 IO left after this, and the
7677 * target is potentially a read/write to block based storage.
7678 */
4a04d1d1 7679 if (state->need_plug && io_op_defs[opcode].plug) {
6d63416d
PB
7680 state->plug_started = true;
7681 state->need_plug = false;
5ca7a8b3 7682 blk_start_plug_nr_ios(&state->plug, state->submit_nr);
6d63416d 7683 }
b16fed66 7684 }
863e0560 7685
003e8dcc
JA
7686 personality = READ_ONCE(sqe->personality);
7687 if (personality) {
cdab10bf
LT
7688 int ret;
7689
c10d1f98
PB
7690 req->creds = xa_load(&ctx->personalities, personality);
7691 if (!req->creds)
003e8dcc 7692 return -EINVAL;
c10d1f98 7693 get_cred(req->creds);
cdc1404a
PM
7694 ret = security_uring_override_creds(req->creds);
7695 if (ret) {
7696 put_cred(req->creds);
7697 return ret;
7698 }
b8e64b53 7699 req->flags |= REQ_F_CREDS;
003e8dcc 7700 }
b16fed66 7701
fc0ae024 7702 return io_req_prep(req, sqe);
b16fed66
PB
7703}
7704
a6b8cadc 7705static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
a1ab7b35 7706 const struct io_uring_sqe *sqe)
282cdc86 7707 __must_hold(&ctx->uring_lock)
9e645e11 7708{
a1ab7b35 7709 struct io_submit_link *link = &ctx->submit_state.link;
ef4ff581 7710 int ret;
9e645e11 7711
a6b8cadc
PB
7712 ret = io_init_req(ctx, req, sqe);
7713 if (unlikely(ret)) {
502c87d6 7714 trace_io_uring_req_failed(sqe, ctx, req, ret);
a87acfde 7715
a8295b98 7716 /* fail even hard links since we don't submit */
de59bc10 7717 if (link->head) {
a8295b98
HX
7718 /*
7719 * we can judge a link req is failed or cancelled by if
7720 * REQ_F_FAIL is set, but the head is an exception since
7721 * it may be set REQ_F_FAIL because of other req's failure
cef216fc 7722 * so let's leverage req->cqe.res to distinguish if a head
a8295b98
HX
7723 * is set REQ_F_FAIL because of its failure or other req's
7724 * failure so that we can set the correct ret code for it.
7725 * init result here to avoid affecting the normal path.
7726 */
7727 if (!(link->head->flags & REQ_F_FAIL))
7728 req_fail_link_node(link->head, -ECANCELED);
7729 } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
7730 /*
7731 * the current req is a normal req, we should return
7732 * error and thus break the submittion loop.
7733 */
7734 io_req_complete_failed(req, ret);
7735 return ret;
de59bc10 7736 }
a8295b98 7737 req_fail_link_node(req, ret);
a6b8cadc 7738 }
441b8a78 7739
be7053b7 7740 /* don't need @sqe from now on */
cef216fc 7741 trace_io_uring_submit_sqe(ctx, req, req->cqe.user_data, req->opcode,
236daeae
OL
7742 req->flags, true,
7743 ctx->flags & IORING_SETUP_SQPOLL);
a6b8cadc 7744
9e645e11
JA
7745 /*
7746 * If we already have a head request, queue this one for async
7747 * submittal once the head completes. If we don't have a head but
7748 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
7749 * submitted sync once the chain is complete. If none of those
7750 * conditions are true (normal request), then just queue it.
7751 */
863e0560
PB
7752 if (link->head) {
7753 struct io_kiocb *head = link->head;
4e88d6e7 7754
a8295b98
HX
7755 if (!(req->flags & REQ_F_FAIL)) {
7756 ret = io_req_prep_async(req);
7757 if (unlikely(ret)) {
7758 req_fail_link_node(req, ret);
7759 if (!(head->flags & REQ_F_FAIL))
7760 req_fail_link_node(head, -ECANCELED);
7761 }
7762 }
9d76377f 7763 trace_io_uring_link(ctx, req, head);
f2f87370 7764 link->last->link = req;
863e0560 7765 link->last = req;
32fe525b 7766
f15a3431
PB
7767 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
7768 return 0;
32fe525b 7769 /* last request of a link, enqueue the link */
f15a3431
PB
7770 link->head = NULL;
7771 req = head;
7772 } else if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
7773 link->head = req;
7774 link->last = req;
7775 return 0;
9e645e11 7776 }
2e6e1fde 7777
f15a3431 7778 io_queue_sqe(req);
1d4240cc 7779 return 0;
9e645e11
JA
7780}
7781
9a56a232
JA
7782/*
7783 * Batched submission is done, ensure local IO is flushed out.
7784 */
553deffd 7785static void io_submit_state_end(struct io_ring_ctx *ctx)
9a56a232 7786{
553deffd
PB
7787 struct io_submit_state *state = &ctx->submit_state;
7788
e126391c
PB
7789 if (unlikely(state->link.head))
7790 io_queue_sqe_fallback(state->link.head);
553deffd 7791 /* flush only after queuing links as they can generate completions */
c450178d 7792 io_submit_flush_completions(ctx);
27926b68
JA
7793 if (state->plug_started)
7794 blk_finish_plug(&state->plug);
9a56a232
JA
7795}
7796
7797/*
7798 * Start submission side cache.
7799 */
7800static void io_submit_state_start(struct io_submit_state *state,
ba88ff11 7801 unsigned int max_ios)
9a56a232 7802{
27926b68 7803 state->plug_started = false;
4b628aeb 7804 state->need_plug = max_ios > 2;
5ca7a8b3 7805 state->submit_nr = max_ios;
a1ab7b35
PB
7806 /* set only head, no need to init link_last in advance */
7807 state->link.head = NULL;
9a56a232
JA
7808}
7809
2b188cc1
JA
7810static void io_commit_sqring(struct io_ring_ctx *ctx)
7811{
75b28aff 7812 struct io_rings *rings = ctx->rings;
2b188cc1 7813
caf582c6
PB
7814 /*
7815 * Ensure any loads from the SQEs are done at this point,
7816 * since once we write the new head, the application could
7817 * write new data to them.
7818 */
7819 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
7820}
7821
2b188cc1 7822/*
dd9ae8a0 7823 * Fetch an sqe, if one is available. Note this returns a pointer to memory
2b188cc1
JA
7824 * that is mapped by userspace. This means that care needs to be taken to
7825 * ensure that reads are stable, as we cannot rely on userspace always
7826 * being a good citizen. If members of the sqe are validated and then later
7827 * used, it's important that those reads are done through READ_ONCE() to
7828 * prevent a re-load down the line.
7829 */
709b302f 7830static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2b188cc1 7831{
ea5ab3b5 7832 unsigned head, mask = ctx->sq_entries - 1;
17d3aeb3 7833 unsigned sq_idx = ctx->cached_sq_head++ & mask;
2b188cc1
JA
7834
7835 /*
7836 * The cached sq head (or cq tail) serves two purposes:
7837 *
7838 * 1) allows us to batch the cost of updating the user visible
7839 * head updates.
7840 * 2) allows the kernel side to track the head on its own, even
7841 * though the application is the one updating it.
7842 */
17d3aeb3 7843 head = READ_ONCE(ctx->sq_array[sq_idx]);
709b302f
PB
7844 if (likely(head < ctx->sq_entries))
7845 return &ctx->sq_sqes[head];
2b188cc1
JA
7846
7847 /* drop invalid entries */
15641e42
PB
7848 ctx->cq_extra--;
7849 WRITE_ONCE(ctx->rings->sq_dropped,
7850 READ_ONCE(ctx->rings->sq_dropped) + 1);
709b302f
PB
7851 return NULL;
7852}
7853
0f212204 7854static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
282cdc86 7855 __must_hold(&ctx->uring_lock)
6c271ce2 7856{
69629809 7857 unsigned int entries = io_sqring_entries(ctx);
46c4e16a 7858 int submitted = 0;
6c271ce2 7859
51d48dab 7860 if (unlikely(!entries))
69629809 7861 return 0;
ee7d46d9 7862 /* make sure SQ entry isn't read before tail */
69629809 7863 nr = min3(nr, ctx->sq_entries, entries);
9a10867a 7864 io_get_task_refs(nr);
6c271ce2 7865
ba88ff11 7866 io_submit_state_start(&ctx->submit_state, nr);
69629809 7867 do {
3529d8c2 7868 const struct io_uring_sqe *sqe;
196be95c 7869 struct io_kiocb *req;
fb5ccc98 7870
a33ae9ce 7871 if (unlikely(!io_alloc_req_refill(ctx))) {
196be95c
PB
7872 if (!submitted)
7873 submitted = -EAGAIN;
fb5ccc98 7874 break;
196be95c 7875 }
a33ae9ce 7876 req = io_alloc_req(ctx);
4fccfcbb
PB
7877 sqe = io_get_sqe(ctx);
7878 if (unlikely(!sqe)) {
fa05457a 7879 io_req_add_to_cache(req, ctx);
4fccfcbb
PB
7880 break;
7881 }
d3656344
JA
7882 /* will complete beyond this point, count as submitted */
7883 submitted++;
bcbb7bf6
JA
7884 if (io_submit_sqe(ctx, req, sqe)) {
7885 /*
7886 * Continue submitting even for sqe failure if the
7887 * ring was setup with IORING_SETUP_SUBMIT_ALL
7888 */
7889 if (!(ctx->flags & IORING_SETUP_SUBMIT_ALL))
7890 break;
7891 }
69629809 7892 } while (submitted < nr);
6c271ce2 7893
9466f437
PB
7894 if (unlikely(submitted != nr)) {
7895 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
d8a6df10 7896 int unused = nr - ref_used;
9466f437 7897
09899b19 7898 current->io_uring->cached_refs += unused;
9466f437 7899 }
6c271ce2 7900
553deffd 7901 io_submit_state_end(ctx);
ae9428ca
PB
7902 /* Commit SQ ring head once we've consumed and submitted all SQEs */
7903 io_commit_sqring(ctx);
7904
6c271ce2
JA
7905 return submitted;
7906}
7907
e4b6d902
PB
7908static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
7909{
7910 return READ_ONCE(sqd->state);
7911}
7912
23b3628e
XW
7913static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
7914{
7915 /* Tell userspace we may need a wakeup call */
79ebeaee 7916 spin_lock(&ctx->completion_lock);
20c0b380
NA
7917 WRITE_ONCE(ctx->rings->sq_flags,
7918 ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
79ebeaee 7919 spin_unlock(&ctx->completion_lock);
23b3628e
XW
7920}
7921
7922static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
7923{
79ebeaee 7924 spin_lock(&ctx->completion_lock);
20c0b380
NA
7925 WRITE_ONCE(ctx->rings->sq_flags,
7926 ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
79ebeaee 7927 spin_unlock(&ctx->completion_lock);
23b3628e
XW
7928}
7929
08369246 7930static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
6c271ce2 7931{
c8d1ba58 7932 unsigned int to_submit;
bdcd3eab 7933 int ret = 0;
6c271ce2 7934
c8d1ba58 7935 to_submit = io_sqring_entries(ctx);
e95eee2d 7936 /* if we're handling multiple rings, cap submit size for fairness */
4ce8ad95
OL
7937 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
7938 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
e95eee2d 7939
5eef4e87 7940 if (!wq_list_empty(&ctx->iopoll_list) || to_submit) {
948e1947
PB
7941 const struct cred *creds = NULL;
7942
7943 if (ctx->sq_creds != current_cred())
7944 creds = override_creds(ctx->sq_creds);
a4c0b3de 7945
c8d1ba58 7946 mutex_lock(&ctx->uring_lock);
5eef4e87 7947 if (!wq_list_empty(&ctx->iopoll_list))
5ba3c874 7948 io_do_iopoll(ctx, true);
906a3c6f 7949
3b763ba1
PB
7950 /*
7951 * Don't submit if refs are dying, good for io_uring_register(),
7952 * but also it is relied upon by io_ring_exit_work()
7953 */
0298ef96
PB
7954 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
7955 !(ctx->flags & IORING_SETUP_R_DISABLED))
08369246 7956 ret = io_submit_sqes(ctx, to_submit);
c8d1ba58 7957 mutex_unlock(&ctx->uring_lock);
cb318216 7958
acfb381d
PB
7959 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
7960 wake_up(&ctx->sqo_sq_wait);
948e1947
PB
7961 if (creds)
7962 revert_creds(creds);
acfb381d 7963 }
6c271ce2 7964
08369246
XW
7965 return ret;
7966}
6c271ce2 7967
c072481d 7968static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd)
08369246
XW
7969{
7970 struct io_ring_ctx *ctx;
7971 unsigned sq_thread_idle = 0;
6c271ce2 7972
c9dca27d
PB
7973 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7974 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
08369246 7975 sqd->sq_thread_idle = sq_thread_idle;
c8d1ba58 7976}
6c271ce2 7977
e4b6d902
PB
7978static bool io_sqd_handle_event(struct io_sq_data *sqd)
7979{
7980 bool did_sig = false;
7981 struct ksignal ksig;
7982
7983 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
7984 signal_pending(current)) {
7985 mutex_unlock(&sqd->lock);
7986 if (signal_pending(current))
7987 did_sig = get_signal(&ksig);
7988 cond_resched();
7989 mutex_lock(&sqd->lock);
7990 }
e4b6d902
PB
7991 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
7992}
7993
c8d1ba58
JA
7994static int io_sq_thread(void *data)
7995{
69fb2131
JA
7996 struct io_sq_data *sqd = data;
7997 struct io_ring_ctx *ctx;
a0d9205f 7998 unsigned long timeout = 0;
37d1e2e3 7999 char buf[TASK_COMM_LEN];
08369246 8000 DEFINE_WAIT(wait);
6c271ce2 8001
696ee88a 8002 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
37d1e2e3 8003 set_task_comm(current, buf);
37d1e2e3
JA
8004
8005 if (sqd->sq_cpu != -1)
8006 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
8007 else
8008 set_cpus_allowed_ptr(current, cpu_online_mask);
8009 current->flags |= PF_NO_SETAFFINITY;
8010
5bd2182d
PM
8011 audit_alloc_kernel(current);
8012
09a6f4ef 8013 mutex_lock(&sqd->lock);
e4b6d902 8014 while (1) {
1a924a80 8015 bool cap_entries, sqt_spin = false;
c1edbf5f 8016
e4b6d902
PB
8017 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
8018 if (io_sqd_handle_event(sqd))
c7d95613 8019 break;
08369246
XW
8020 timeout = jiffies + sqd->sq_thread_idle;
8021 }
e4b6d902 8022
e95eee2d 8023 cap_entries = !list_is_singular(&sqd->ctx_list);
69fb2131 8024 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
948e1947 8025 int ret = __io_sq_thread(ctx, cap_entries);
7c30f36a 8026
5eef4e87 8027 if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list)))
08369246 8028 sqt_spin = true;
69fb2131 8029 }
dd432ea5
PB
8030 if (io_run_task_work())
8031 sqt_spin = true;
6c271ce2 8032
08369246 8033 if (sqt_spin || !time_after(jiffies, timeout)) {
c8d1ba58 8034 cond_resched();
08369246
XW
8035 if (sqt_spin)
8036 timeout = jiffies + sqd->sq_thread_idle;
8037 continue;
8038 }
8039
08369246 8040 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
7f62d40d 8041 if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) {
1a924a80
PB
8042 bool needs_sched = true;
8043
724cb4f9 8044 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
aaa9f0f4
PB
8045 io_ring_set_wakeup_flag(ctx);
8046
724cb4f9 8047 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
5eef4e87 8048 !wq_list_empty(&ctx->iopoll_list)) {
724cb4f9
HX
8049 needs_sched = false;
8050 break;
8051 }
649bb75d
AK
8052
8053 /*
8054 * Ensure the store of the wakeup flag is not
8055 * reordered with the load of the SQ tail
8056 */
8057 smp_mb();
8058
724cb4f9
HX
8059 if (io_sqring_entries(ctx)) {
8060 needs_sched = false;
8061 break;
8062 }
8063 }
8064
8065 if (needs_sched) {
8066 mutex_unlock(&sqd->lock);
8067 schedule();
8068 mutex_lock(&sqd->lock);
8069 }
69fb2131
JA
8070 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
8071 io_ring_clear_wakeup_flag(ctx);
6c271ce2 8072 }
08369246
XW
8073
8074 finish_wait(&sqd->wait, &wait);
8075 timeout = jiffies + sqd->sq_thread_idle;
6c271ce2 8076 }
28cea78a 8077
78cc687b 8078 io_uring_cancel_generic(true, sqd);
37d1e2e3 8079 sqd->thread = NULL;
05962f95 8080 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
5f3f26f9 8081 io_ring_set_wakeup_flag(ctx);
521d6a73 8082 io_run_task_work();
734551df
PB
8083 mutex_unlock(&sqd->lock);
8084
5bd2182d
PM
8085 audit_free(current);
8086
37d1e2e3
JA
8087 complete(&sqd->exited);
8088 do_exit(0);
6c271ce2
JA
8089}
8090
bda52162
JA
8091struct io_wait_queue {
8092 struct wait_queue_entry wq;
8093 struct io_ring_ctx *ctx;
5fd46178 8094 unsigned cq_tail;
bda52162
JA
8095 unsigned nr_timeouts;
8096};
8097
6c503150 8098static inline bool io_should_wake(struct io_wait_queue *iowq)
bda52162
JA
8099{
8100 struct io_ring_ctx *ctx = iowq->ctx;
5fd46178 8101 int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
bda52162
JA
8102
8103 /*
d195a66e 8104 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
8105 * started waiting. For timeouts, we always want to return to userspace,
8106 * regardless of event count.
8107 */
5fd46178 8108 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
bda52162
JA
8109}
8110
8111static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
8112 int wake_flags, void *key)
8113{
8114 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
8115 wq);
8116
6c503150
PB
8117 /*
8118 * Cannot safely flush overflowed CQEs from here, ensure we wake up
8119 * the task, and the next invocation will do it.
8120 */
5ed7a37d 8121 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
6c503150
PB
8122 return autoremove_wake_function(curr, mode, wake_flags, key);
8123 return -1;
bda52162
JA
8124}
8125
af9c1a44
JA
8126static int io_run_task_work_sig(void)
8127{
8128 if (io_run_task_work())
8129 return 1;
0b8cfa97 8130 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
792ee0f6 8131 return -ERESTARTSYS;
c5020bc8
OL
8132 if (task_sigpending(current))
8133 return -EINTR;
8134 return 0;
af9c1a44
JA
8135}
8136
eeb60b9a
PB
8137/* when returns >0, the caller should retry */
8138static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
8139 struct io_wait_queue *iowq,
22833966 8140 ktime_t timeout)
eeb60b9a
PB
8141{
8142 int ret;
8143
8144 /* make sure we run task_work before checking for signals */
8145 ret = io_run_task_work_sig();
8146 if (ret || io_should_wake(iowq))
8147 return ret;
8148 /* let the caller flush overflows, retry */
5ed7a37d 8149 if (test_bit(0, &ctx->check_cq_overflow))
eeb60b9a
PB
8150 return 1;
8151
22833966
JA
8152 if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
8153 return -ETIME;
8154 return 1;
eeb60b9a
PB
8155}
8156
2b188cc1
JA
8157/*
8158 * Wait until events become available, if we don't already have some. The
8159 * application must reap them itself, as they reside on the shared cq ring.
8160 */
8161static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
c73ebb68
HX
8162 const sigset_t __user *sig, size_t sigsz,
8163 struct __kernel_timespec __user *uts)
2b188cc1 8164{
90291099 8165 struct io_wait_queue iowq;
75b28aff 8166 struct io_rings *rings = ctx->rings;
22833966 8167 ktime_t timeout = KTIME_MAX;
c1d5a224 8168 int ret;
2b188cc1 8169
b41e9852 8170 do {
90f67366 8171 io_cqring_overflow_flush(ctx);
6c503150 8172 if (io_cqring_events(ctx) >= min_events)
b41e9852 8173 return 0;
4c6e277c 8174 if (!io_run_task_work())
b41e9852 8175 break;
b41e9852 8176 } while (1);
2b188cc1
JA
8177
8178 if (sig) {
9e75ad5d
AB
8179#ifdef CONFIG_COMPAT
8180 if (in_compat_syscall())
8181 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 8182 sigsz);
9e75ad5d
AB
8183 else
8184#endif
b772434b 8185 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 8186
2b188cc1
JA
8187 if (ret)
8188 return ret;
8189 }
8190
950e79dd
OL
8191 if (uts) {
8192 struct timespec64 ts;
8193
8194 if (get_timespec64(&ts, uts))
8195 return -EFAULT;
8196 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
8197 }
8198
90291099
PB
8199 init_waitqueue_func_entry(&iowq.wq, io_wake_function);
8200 iowq.wq.private = current;
8201 INIT_LIST_HEAD(&iowq.wq.entry);
8202 iowq.ctx = ctx;
bda52162 8203 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
5fd46178 8204 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
90291099 8205
c826bd7a 8206 trace_io_uring_cqring_wait(ctx, min_events);
bda52162 8207 do {
ca0a2651 8208 /* if we can't even flush overflow, don't wait for more */
90f67366 8209 if (!io_cqring_overflow_flush(ctx)) {
ca0a2651
JA
8210 ret = -EBUSY;
8211 break;
8212 }
311997b3 8213 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
bda52162 8214 TASK_INTERRUPTIBLE);
22833966 8215 ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
ca0a2651 8216 cond_resched();
eeb60b9a 8217 } while (ret > 0);
bda52162 8218
b4f20bb4 8219 finish_wait(&ctx->cq_wait, &iowq.wq);
b7db41c9 8220 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 8221
75b28aff 8222 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
8223}
8224
9123c8ff 8225static void io_free_page_table(void **table, size_t size)
05f3fb3c 8226{
9123c8ff 8227 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
05f3fb3c 8228
846a4ef2 8229 for (i = 0; i < nr_tables; i++)
9123c8ff
PB
8230 kfree(table[i]);
8231 kfree(table);
8232}
8233
c072481d 8234static __cold void **io_alloc_page_table(size_t size)
9123c8ff
PB
8235{
8236 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
8237 size_t init_size = size;
8238 void **table;
8239
0bea96f5 8240 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
9123c8ff
PB
8241 if (!table)
8242 return NULL;
8243
8244 for (i = 0; i < nr_tables; i++) {
27f6b318 8245 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
9123c8ff 8246
0bea96f5 8247 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
9123c8ff
PB
8248 if (!table[i]) {
8249 io_free_page_table(table, init_size);
8250 return NULL;
8251 }
8252 size -= this_size;
8253 }
8254 return table;
05f3fb3c
JA
8255}
8256
28a9fe25 8257static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
1642b445 8258{
28a9fe25
PB
8259 percpu_ref_exit(&ref_node->refs);
8260 kfree(ref_node);
1642b445
PB
8261}
8262
c072481d 8263static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref)
b9bd2bea
PB
8264{
8265 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
8266 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
8267 unsigned long flags;
8268 bool first_add = false;
b36a2050 8269 unsigned long delay = HZ;
b9bd2bea
PB
8270
8271 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
8272 node->done = true;
8273
b36a2050
DY
8274 /* if we are mid-quiesce then do not delay */
8275 if (node->rsrc_data->quiesce)
8276 delay = 0;
8277
b9bd2bea
PB
8278 while (!list_empty(&ctx->rsrc_ref_list)) {
8279 node = list_first_entry(&ctx->rsrc_ref_list,
8280 struct io_rsrc_node, node);
8281 /* recycle ref nodes in order */
8282 if (!node->done)
8283 break;
8284 list_del(&node->node);
8285 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
8286 }
8287 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
8288
8289 if (first_add)
b36a2050 8290 mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
b9bd2bea
PB
8291}
8292
f6133fbd 8293static struct io_rsrc_node *io_rsrc_node_alloc(void)
b9bd2bea
PB
8294{
8295 struct io_rsrc_node *ref_node;
8296
8297 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
8298 if (!ref_node)
8299 return NULL;
8300
8301 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
8302 0, GFP_KERNEL)) {
8303 kfree(ref_node);
8304 return NULL;
8305 }
8306 INIT_LIST_HEAD(&ref_node->node);
8307 INIT_LIST_HEAD(&ref_node->rsrc_list);
8308 ref_node->done = false;
8309 return ref_node;
8310}
8311
a7f0ed5a
PB
8312static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
8313 struct io_rsrc_data *data_to_kill)
ab409402 8314 __must_hold(&ctx->uring_lock)
6b06314c 8315{
a7f0ed5a
PB
8316 WARN_ON_ONCE(!ctx->rsrc_backup_node);
8317 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
6b06314c 8318
ab409402
PB
8319 io_rsrc_refs_drop(ctx);
8320
a7f0ed5a
PB
8321 if (data_to_kill) {
8322 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
82fbcfa9 8323
a7f0ed5a 8324 rsrc_node->rsrc_data = data_to_kill;
4956b9ea 8325 spin_lock_irq(&ctx->rsrc_ref_lock);
a7f0ed5a 8326 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
4956b9ea 8327 spin_unlock_irq(&ctx->rsrc_ref_lock);
82fbcfa9 8328
3e942498 8329 atomic_inc(&data_to_kill->refs);
a7f0ed5a
PB
8330 percpu_ref_kill(&rsrc_node->refs);
8331 ctx->rsrc_node = NULL;
8332 }
6b06314c 8333
a7f0ed5a
PB
8334 if (!ctx->rsrc_node) {
8335 ctx->rsrc_node = ctx->rsrc_backup_node;
8336 ctx->rsrc_backup_node = NULL;
8337 }
8bad28d8
HX
8338}
8339
a7f0ed5a 8340static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
8dd03afe
PB
8341{
8342 if (ctx->rsrc_backup_node)
8343 return 0;
f6133fbd 8344 ctx->rsrc_backup_node = io_rsrc_node_alloc();
8dd03afe 8345 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
8bad28d8
HX
8346}
8347
c072481d
PB
8348static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
8349 struct io_ring_ctx *ctx)
8bad28d8
HX
8350{
8351 int ret;
05589553 8352
215c3902 8353 /* As we may drop ->uring_lock, other task may have started quiesce */
8bad28d8
HX
8354 if (data->quiesce)
8355 return -ENXIO;
05589553 8356
8bad28d8 8357 data->quiesce = true;
1ffc5422 8358 do {
a7f0ed5a 8359 ret = io_rsrc_node_switch_start(ctx);
8dd03afe 8360 if (ret)
f2303b1f 8361 break;
a7f0ed5a 8362 io_rsrc_node_switch(ctx, data);
f2303b1f 8363
3e942498
PB
8364 /* kill initial ref, already quiesced if zero */
8365 if (atomic_dec_and_test(&data->refs))
8366 break;
c018db4a 8367 mutex_unlock(&ctx->uring_lock);
8bad28d8 8368 flush_delayed_work(&ctx->rsrc_put_work);
1ffc5422 8369 ret = wait_for_completion_interruptible(&data->done);
c018db4a
JA
8370 if (!ret) {
8371 mutex_lock(&ctx->uring_lock);
80912cef
DY
8372 if (atomic_read(&data->refs) > 0) {
8373 /*
8374 * it has been revived by another thread while
8375 * we were unlocked
8376 */
8377 mutex_unlock(&ctx->uring_lock);
8378 } else {
8379 break;
8380 }
c018db4a 8381 }
8bad28d8 8382
3e942498
PB
8383 atomic_inc(&data->refs);
8384 /* wait for all works potentially completing data->done */
8385 flush_delayed_work(&ctx->rsrc_put_work);
cb5e1b81 8386 reinit_completion(&data->done);
8dd03afe 8387
1ffc5422 8388 ret = io_run_task_work_sig();
8bad28d8 8389 mutex_lock(&ctx->uring_lock);
f2303b1f 8390 } while (ret >= 0);
8bad28d8 8391 data->quiesce = false;
05f3fb3c 8392
8bad28d8 8393 return ret;
d7954b2b
BM
8394}
8395
2d091d62
PB
8396static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
8397{
8398 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
8399 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
8400
8401 return &data->tags[table_idx][off];
8402}
8403
44b31f2f 8404static void io_rsrc_data_free(struct io_rsrc_data *data)
1ad555c6 8405{
2d091d62
PB
8406 size_t size = data->nr * sizeof(data->tags[0][0]);
8407
8408 if (data->tags)
8409 io_free_page_table((void **)data->tags, size);
44b31f2f
PB
8410 kfree(data);
8411}
8412
c072481d
PB
8413static __cold int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
8414 u64 __user *utags, unsigned nr,
8415 struct io_rsrc_data **pdata)
1ad555c6 8416{
b895c9a6 8417 struct io_rsrc_data *data;
2d091d62 8418 int ret = -ENOMEM;
d878c816 8419 unsigned i;
1ad555c6
BM
8420
8421 data = kzalloc(sizeof(*data), GFP_KERNEL);
8422 if (!data)
d878c816 8423 return -ENOMEM;
2d091d62 8424 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
b60c8dce 8425 if (!data->tags) {
1ad555c6 8426 kfree(data);
d878c816
PB
8427 return -ENOMEM;
8428 }
2d091d62
PB
8429
8430 data->nr = nr;
8431 data->ctx = ctx;
8432 data->do_put = do_put;
d878c816 8433 if (utags) {
2d091d62 8434 ret = -EFAULT;
d878c816 8435 for (i = 0; i < nr; i++) {
fdd1dc31
CIK
8436 u64 *tag_slot = io_get_tag_slot(data, i);
8437
8438 if (copy_from_user(tag_slot, &utags[i],
8439 sizeof(*tag_slot)))
2d091d62 8440 goto fail;
d878c816 8441 }
1ad555c6 8442 }
b60c8dce 8443
3e942498 8444 atomic_set(&data->refs, 1);
1ad555c6 8445 init_completion(&data->done);
d878c816
PB
8446 *pdata = data;
8447 return 0;
2d091d62
PB
8448fail:
8449 io_rsrc_data_free(data);
8450 return ret;
1ad555c6
BM
8451}
8452
9123c8ff
PB
8453static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
8454{
0bea96f5
PB
8455 table->files = kvcalloc(nr_files, sizeof(table->files[0]),
8456 GFP_KERNEL_ACCOUNT);
9123c8ff
PB
8457 return !!table->files;
8458}
8459
042b0d85 8460static void io_free_file_tables(struct io_file_table *table)
9123c8ff 8461{
042b0d85 8462 kvfree(table->files);
9123c8ff
PB
8463 table->files = NULL;
8464}
8465
fff4db76 8466static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
1ad555c6 8467{
1f59bc0f
PB
8468 int i;
8469
8470 for (i = 0; i < ctx->nr_user_files; i++) {
8471 struct file *file = io_file_from_index(ctx, i);
8472
8473 if (!file || io_file_need_scm(file))
8474 continue;
8475 io_fixed_file_slot(&ctx->file_table, i)->file_ptr = 0;
8476 fput(file);
8477 }
8478
fff4db76
PB
8479#if defined(CONFIG_UNIX)
8480 if (ctx->ring_sock) {
8481 struct sock *sock = ctx->ring_sock->sk;
8482 struct sk_buff *skb;
8483
8484 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
8485 kfree_skb(skb);
8486 }
fff4db76 8487#endif
042b0d85 8488 io_free_file_tables(&ctx->file_table);
44b31f2f 8489 io_rsrc_data_free(ctx->file_data);
fff4db76
PB
8490 ctx->file_data = NULL;
8491 ctx->nr_user_files = 0;
1ad555c6
BM
8492}
8493
d7954b2b
BM
8494static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
8495{
d7954b2b
BM
8496 int ret;
8497
08480400 8498 if (!ctx->file_data)
d7954b2b 8499 return -ENXIO;
08480400
PB
8500 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
8501 if (!ret)
8502 __io_sqe_files_unregister(ctx);
8503 return ret;
6b06314c
JA
8504}
8505
37d1e2e3 8506static void io_sq_thread_unpark(struct io_sq_data *sqd)
09a6f4ef 8507 __releases(&sqd->lock)
37d1e2e3 8508{
521d6a73
PB
8509 WARN_ON_ONCE(sqd->thread == current);
8510
9e138a48
PB
8511 /*
8512 * Do the dance but not conditional clear_bit() because it'd race with
8513 * other threads incrementing park_pending and setting the bit.
8514 */
37d1e2e3 8515 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
9e138a48
PB
8516 if (atomic_dec_return(&sqd->park_pending))
8517 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
09a6f4ef 8518 mutex_unlock(&sqd->lock);
37d1e2e3
JA
8519}
8520
86e0d676 8521static void io_sq_thread_park(struct io_sq_data *sqd)
09a6f4ef 8522 __acquires(&sqd->lock)
37d1e2e3 8523{
521d6a73
PB
8524 WARN_ON_ONCE(sqd->thread == current);
8525
9e138a48 8526 atomic_inc(&sqd->park_pending);
86e0d676 8527 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
09a6f4ef 8528 mutex_lock(&sqd->lock);
05962f95 8529 if (sqd->thread)
86e0d676 8530 wake_up_process(sqd->thread);
37d1e2e3
JA
8531}
8532
8533static void io_sq_thread_stop(struct io_sq_data *sqd)
8534{
521d6a73 8535 WARN_ON_ONCE(sqd->thread == current);
88885f66 8536 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
521d6a73 8537
05962f95 8538 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
88885f66 8539 mutex_lock(&sqd->lock);
e8f98f24
JA
8540 if (sqd->thread)
8541 wake_up_process(sqd->thread);
09a6f4ef 8542 mutex_unlock(&sqd->lock);
05962f95 8543 wait_for_completion(&sqd->exited);
37d1e2e3
JA
8544}
8545
534ca6d6 8546static void io_put_sq_data(struct io_sq_data *sqd)
6c271ce2 8547{
534ca6d6 8548 if (refcount_dec_and_test(&sqd->refs)) {
9e138a48
PB
8549 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
8550
37d1e2e3
JA
8551 io_sq_thread_stop(sqd);
8552 kfree(sqd);
8553 }
8554}
8555
8556static void io_sq_thread_finish(struct io_ring_ctx *ctx)
8557{
8558 struct io_sq_data *sqd = ctx->sq_data;
8559
8560 if (sqd) {
05962f95 8561 io_sq_thread_park(sqd);
521d6a73 8562 list_del_init(&ctx->sqd_list);
37d1e2e3 8563 io_sqd_update_thread_idle(sqd);
05962f95 8564 io_sq_thread_unpark(sqd);
37d1e2e3
JA
8565
8566 io_put_sq_data(sqd);
8567 ctx->sq_data = NULL;
534ca6d6
JA
8568 }
8569}
8570
aa06165d
JA
8571static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
8572{
8573 struct io_ring_ctx *ctx_attach;
8574 struct io_sq_data *sqd;
8575 struct fd f;
8576
8577 f = fdget(p->wq_fd);
8578 if (!f.file)
8579 return ERR_PTR(-ENXIO);
8580 if (f.file->f_op != &io_uring_fops) {
8581 fdput(f);
8582 return ERR_PTR(-EINVAL);
8583 }
8584
8585 ctx_attach = f.file->private_data;
8586 sqd = ctx_attach->sq_data;
8587 if (!sqd) {
8588 fdput(f);
8589 return ERR_PTR(-EINVAL);
8590 }
5c2469e0
JA
8591 if (sqd->task_tgid != current->tgid) {
8592 fdput(f);
8593 return ERR_PTR(-EPERM);
8594 }
aa06165d
JA
8595
8596 refcount_inc(&sqd->refs);
8597 fdput(f);
8598 return sqd;
8599}
8600
26984fbf
PB
8601static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
8602 bool *attached)
534ca6d6
JA
8603{
8604 struct io_sq_data *sqd;
8605
26984fbf 8606 *attached = false;
5c2469e0
JA
8607 if (p->flags & IORING_SETUP_ATTACH_WQ) {
8608 sqd = io_attach_sq_data(p);
26984fbf
PB
8609 if (!IS_ERR(sqd)) {
8610 *attached = true;
5c2469e0 8611 return sqd;
26984fbf 8612 }
5c2469e0
JA
8613 /* fall through for EPERM case, setup new sqd/task */
8614 if (PTR_ERR(sqd) != -EPERM)
8615 return sqd;
8616 }
aa06165d 8617
534ca6d6
JA
8618 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
8619 if (!sqd)
8620 return ERR_PTR(-ENOMEM);
8621
9e138a48 8622 atomic_set(&sqd->park_pending, 0);
534ca6d6 8623 refcount_set(&sqd->refs, 1);
69fb2131 8624 INIT_LIST_HEAD(&sqd->ctx_list);
09a6f4ef 8625 mutex_init(&sqd->lock);
534ca6d6 8626 init_waitqueue_head(&sqd->wait);
37d1e2e3 8627 init_completion(&sqd->exited);
534ca6d6
JA
8628 return sqd;
8629}
8630
6b06314c
JA
8631/*
8632 * Ensure the UNIX gc is aware of our file set, so we are certain that
8633 * the io_uring can be safely unregistered on process exit, even if we have
1f59bc0f
PB
8634 * loops in the file referencing. We account only files that can hold other
8635 * files because otherwise they can't form a loop and so are not interesting
8636 * for GC.
6b06314c 8637 */
8b3171bd 8638static int io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
6b06314c 8639{
73b25d3b 8640#if defined(CONFIG_UNIX)
6b06314c 8641 struct sock *sk = ctx->ring_sock->sk;
73b25d3b 8642 struct sk_buff_head *head = &sk->sk_receive_queue;
6b06314c
JA
8643 struct scm_fp_list *fpl;
8644 struct sk_buff *skb;
6b06314c 8645
73b25d3b
PB
8646 if (likely(!io_file_need_scm(file)))
8647 return 0;
8648
8649 /*
8650 * See if we can merge this file into an existing skb SCM_RIGHTS
8651 * file set. If there's no room, fall back to allocating a new skb
8652 * and filling it in.
8653 */
8654 spin_lock_irq(&head->lock);
8655 skb = skb_peek(head);
8656 if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
8657 __skb_unlink(skb, head);
8658 else
8659 skb = NULL;
8660 spin_unlock_irq(&head->lock);
6b06314c 8661
6b06314c 8662 if (!skb) {
73b25d3b
PB
8663 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
8664 if (!fpl)
8665 return -ENOMEM;
6b06314c 8666
73b25d3b
PB
8667 skb = alloc_skb(0, GFP_KERNEL);
8668 if (!skb) {
8669 kfree(fpl);
8670 return -ENOMEM;
8671 }
6b06314c 8672
73b25d3b
PB
8673 fpl->user = get_uid(current_user());
8674 fpl->max = SCM_MAX_FD;
8675 fpl->count = 0;
dca58c6a 8676
73b25d3b
PB
8677 UNIXCB(skb).fp = fpl;
8678 skb->sk = sk;
8679 skb->destructor = unix_destruct_scm;
8680 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
8681 }
8682
8683 fpl = UNIXCB(skb).fp;
8684 fpl->fp[fpl->count++] = get_file(file);
8685 unix_inflight(fpl->user, file);
8686 skb_queue_head(head, skb);
dca58c6a 8687 fput(file);
73b25d3b 8688#endif
6b06314c
JA
8689 return 0;
8690}
6b06314c 8691
47e90392 8692static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
05f3fb3c 8693{
50238531 8694 struct file *file = prsrc->file;
05f3fb3c
JA
8695#if defined(CONFIG_UNIX)
8696 struct sock *sock = ctx->ring_sock->sk;
8697 struct sk_buff_head list, *head = &sock->sk_receive_queue;
8698 struct sk_buff *skb;
8699 int i;
8700
1f59bc0f
PB
8701 if (!io_file_need_scm(file)) {
8702 fput(file);
8703 return;
8704 }
8705
05f3fb3c
JA
8706 __skb_queue_head_init(&list);
8707
8708 /*
8709 * Find the skb that holds this file in its SCM_RIGHTS. When found,
8710 * remove this entry and rearrange the file array.
8711 */
8712 skb = skb_dequeue(head);
8713 while (skb) {
8714 struct scm_fp_list *fp;
8715
8716 fp = UNIXCB(skb).fp;
8717 for (i = 0; i < fp->count; i++) {
8718 int left;
8719
8720 if (fp->fp[i] != file)
8721 continue;
8722
8723 unix_notinflight(fp->user, fp->fp[i]);
8724 left = fp->count - 1 - i;
8725 if (left) {
8726 memmove(&fp->fp[i], &fp->fp[i + 1],
8727 left * sizeof(struct file *));
8728 }
8729 fp->count--;
8730 if (!fp->count) {
8731 kfree_skb(skb);
8732 skb = NULL;
8733 } else {
8734 __skb_queue_tail(&list, skb);
8735 }
8736 fput(file);
8737 file = NULL;
8738 break;
8739 }
8740
8741 if (!file)
8742 break;
8743
8744 __skb_queue_tail(&list, skb);
8745
8746 skb = skb_dequeue(head);
8747 }
8748
8749 if (skb_peek(&list)) {
8750 spin_lock_irq(&head->lock);
8751 while ((skb = __skb_dequeue(&list)) != NULL)
8752 __skb_queue_tail(head, skb);
8753 spin_unlock_irq(&head->lock);
8754 }
8755#else
8756 fput(file);
8757#endif
8758}
8759
b895c9a6 8760static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
65e19f54 8761{
b895c9a6 8762 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
269bbe5f
BM
8763 struct io_ring_ctx *ctx = rsrc_data->ctx;
8764 struct io_rsrc_put *prsrc, *tmp;
05589553 8765
269bbe5f
BM
8766 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
8767 list_del(&prsrc->list);
b60c8dce
PB
8768
8769 if (prsrc->tag) {
f8929630
PB
8770 if (ctx->flags & IORING_SETUP_IOPOLL)
8771 mutex_lock(&ctx->uring_lock);
b60c8dce 8772
79ebeaee 8773 spin_lock(&ctx->completion_lock);
913a571a 8774 io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
b60c8dce 8775 io_commit_cqring(ctx);
79ebeaee 8776 spin_unlock(&ctx->completion_lock);
b60c8dce 8777 io_cqring_ev_posted(ctx);
f8929630
PB
8778
8779 if (ctx->flags & IORING_SETUP_IOPOLL)
8780 mutex_unlock(&ctx->uring_lock);
b60c8dce
PB
8781 }
8782
40ae0ff7 8783 rsrc_data->do_put(ctx, prsrc);
269bbe5f 8784 kfree(prsrc);
65e19f54 8785 }
05589553 8786
28a9fe25 8787 io_rsrc_node_destroy(ref_node);
3e942498
PB
8788 if (atomic_dec_and_test(&rsrc_data->refs))
8789 complete(&rsrc_data->done);
2faf852d 8790}
65e19f54 8791
269bbe5f 8792static void io_rsrc_put_work(struct work_struct *work)
4a38aed2
JA
8793{
8794 struct io_ring_ctx *ctx;
8795 struct llist_node *node;
8796
269bbe5f
BM
8797 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
8798 node = llist_del_all(&ctx->rsrc_put_llist);
4a38aed2
JA
8799
8800 while (node) {
b895c9a6 8801 struct io_rsrc_node *ref_node;
4a38aed2
JA
8802 struct llist_node *next = node->next;
8803
b895c9a6 8804 ref_node = llist_entry(node, struct io_rsrc_node, llist);
269bbe5f 8805 __io_rsrc_put_work(ref_node);
4a38aed2
JA
8806 node = next;
8807 }
8808}
8809
6b06314c 8810static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
792e3582 8811 unsigned nr_args, u64 __user *tags)
6b06314c
JA
8812{
8813 __s32 __user *fds = (__s32 __user *) arg;
05f3fb3c 8814 struct file *file;
f3baed39 8815 int fd, ret;
846a4ef2 8816 unsigned i;
6b06314c 8817
05f3fb3c 8818 if (ctx->file_data)
6b06314c
JA
8819 return -EBUSY;
8820 if (!nr_args)
8821 return -EINVAL;
8822 if (nr_args > IORING_MAX_FIXED_FILES)
8823 return -EMFILE;
3a1b8a4e
PB
8824 if (nr_args > rlimit(RLIMIT_NOFILE))
8825 return -EMFILE;
a7f0ed5a 8826 ret = io_rsrc_node_switch_start(ctx);
f3baed39
PB
8827 if (ret)
8828 return ret;
d878c816
PB
8829 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
8830 &ctx->file_data);
8831 if (ret)
8832 return ret;
6b06314c 8833
a03a2a20
PB
8834 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
8835 io_rsrc_data_free(ctx->file_data);
8836 ctx->file_data = NULL;
8837 return -ENOMEM;
8838 }
65e19f54 8839
08a45173 8840 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
a03a2a20
PB
8841 struct io_fixed_file *file_slot;
8842
d878c816 8843 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
600cf3f8 8844 ret = -EFAULT;
a03a2a20 8845 goto fail;
600cf3f8 8846 }
08a45173 8847 /* allow sparse sets */
792e3582
PB
8848 if (fd == -1) {
8849 ret = -EINVAL;
2d091d62 8850 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
a03a2a20 8851 goto fail;
08a45173 8852 continue;
792e3582 8853 }
6b06314c 8854
05f3fb3c 8855 file = fget(fd);
6b06314c 8856 ret = -EBADF;
792e3582 8857 if (unlikely(!file))
a03a2a20 8858 goto fail;
05f3fb3c 8859
6b06314c
JA
8860 /*
8861 * Don't allow io_uring instances to be registered. If UNIX
8862 * isn't enabled, then this causes a reference cycle and this
8863 * instance can never get freed. If UNIX is enabled we'll
8864 * handle it just fine, but there's still no point in allowing
8865 * a ring fd as it doesn't support regular read/write anyway.
8866 */
05f3fb3c
JA
8867 if (file->f_op == &io_uring_fops) {
8868 fput(file);
a03a2a20
PB
8869 goto fail;
8870 }
8b3171bd 8871 ret = io_scm_file_account(ctx, file);
a03a2a20 8872 if (ret) {
a03a2a20
PB
8873 fput(file);
8874 goto fail;
6b06314c 8875 }
e390510a
PB
8876 file_slot = io_fixed_file_slot(&ctx->file_table, i);
8877 io_fixed_file_set(file_slot, file);
05589553 8878 }
6b06314c 8879
a7f0ed5a 8880 io_rsrc_node_switch(ctx, NULL);
a03a2a20
PB
8881 return 0;
8882fail:
8883 __io_sqe_files_unregister(ctx);
6b06314c
JA
8884 return ret;
8885}
8886
9c7b0ba8
PB
8887static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
8888 struct io_rsrc_node *node, void *rsrc)
8889{
8f0a2480 8890 u64 *tag_slot = io_get_tag_slot(data, idx);
9c7b0ba8
PB
8891 struct io_rsrc_put *prsrc;
8892
8893 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
8894 if (!prsrc)
8895 return -ENOMEM;
8896
8f0a2480
PB
8897 prsrc->tag = *tag_slot;
8898 *tag_slot = 0;
9c7b0ba8
PB
8899 prsrc->rsrc = rsrc;
8900 list_add(&prsrc->list, &node->rsrc_list);
8901 return 0;
8902}
8903
b9445598
PB
8904static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
8905 unsigned int issue_flags, u32 slot_index)
8906{
8907 struct io_ring_ctx *ctx = req->ctx;
9c7b0ba8 8908 bool needs_switch = false;
b9445598
PB
8909 struct io_fixed_file *file_slot;
8910 int ret = -EBADF;
8911
f8929630 8912 io_ring_submit_lock(ctx, issue_flags);
b9445598
PB
8913 if (file->f_op == &io_uring_fops)
8914 goto err;
8915 ret = -ENXIO;
8916 if (!ctx->file_data)
8917 goto err;
8918 ret = -EINVAL;
8919 if (slot_index >= ctx->nr_user_files)
8920 goto err;
8921
8922 slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
8923 file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
9c7b0ba8
PB
8924
8925 if (file_slot->file_ptr) {
8926 struct file *old_file;
8927
8928 ret = io_rsrc_node_switch_start(ctx);
8929 if (ret)
8930 goto err;
8931
8932 old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
8933 ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
8934 ctx->rsrc_node, old_file);
8935 if (ret)
8936 goto err;
8937 file_slot->file_ptr = 0;
8938 needs_switch = true;
8939 }
b9445598 8940
8b3171bd 8941 ret = io_scm_file_account(ctx, file);
e390510a
PB
8942 if (!ret) {
8943 *io_get_tag_slot(ctx->file_data, slot_index) = 0;
8944 io_fixed_file_set(file_slot, file);
b9445598 8945 }
b9445598 8946err:
9c7b0ba8
PB
8947 if (needs_switch)
8948 io_rsrc_node_switch(ctx, ctx->file_data);
f8929630 8949 io_ring_submit_unlock(ctx, issue_flags);
b9445598
PB
8950 if (ret)
8951 fput(file);
8952 return ret;
8953}
8954
7df778be
PB
8955static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
8956{
8957 unsigned int offset = req->close.file_slot - 1;
8958 struct io_ring_ctx *ctx = req->ctx;
8959 struct io_fixed_file *file_slot;
8960 struct file *file;
4cdd158b 8961 int ret;
7df778be 8962
f8929630 8963 io_ring_submit_lock(ctx, issue_flags);
7df778be
PB
8964 ret = -ENXIO;
8965 if (unlikely(!ctx->file_data))
8966 goto out;
8967 ret = -EINVAL;
8968 if (offset >= ctx->nr_user_files)
8969 goto out;
8970 ret = io_rsrc_node_switch_start(ctx);
8971 if (ret)
8972 goto out;
8973
4cdd158b
PB
8974 offset = array_index_nospec(offset, ctx->nr_user_files);
8975 file_slot = io_fixed_file_slot(&ctx->file_table, offset);
7df778be
PB
8976 ret = -EBADF;
8977 if (!file_slot->file_ptr)
8978 goto out;
8979
8980 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
8981 ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
8982 if (ret)
8983 goto out;
8984
8985 file_slot->file_ptr = 0;
8986 io_rsrc_node_switch(ctx, ctx->file_data);
8987 ret = 0;
8988out:
f8929630 8989 io_ring_submit_unlock(ctx, issue_flags);
7df778be
PB
8990 return ret;
8991}
8992
05f3fb3c 8993static int __io_sqe_files_update(struct io_ring_ctx *ctx,
c3bdad02 8994 struct io_uring_rsrc_update2 *up,
05f3fb3c
JA
8995 unsigned nr_args)
8996{
c3bdad02 8997 u64 __user *tags = u64_to_user_ptr(up->tags);
98f0b3b4 8998 __s32 __user *fds = u64_to_user_ptr(up->data);
b895c9a6 8999 struct io_rsrc_data *data = ctx->file_data;
a04b0ac0
PB
9000 struct io_fixed_file *file_slot;
9001 struct file *file;
98f0b3b4
PB
9002 int fd, i, err = 0;
9003 unsigned int done;
05589553 9004 bool needs_switch = false;
c3a31e60 9005
98f0b3b4
PB
9006 if (!ctx->file_data)
9007 return -ENXIO;
9008 if (up->offset + nr_args > ctx->nr_user_files)
c3a31e60
JA
9009 return -EINVAL;
9010
67973b93 9011 for (done = 0; done < nr_args; done++) {
c3bdad02
PB
9012 u64 tag = 0;
9013
9014 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
9015 copy_from_user(&fd, &fds[done], sizeof(fd))) {
c3a31e60
JA
9016 err = -EFAULT;
9017 break;
9018 }
c3bdad02
PB
9019 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
9020 err = -EINVAL;
9021 break;
9022 }
4e0377a1 9023 if (fd == IORING_REGISTER_FILES_SKIP)
9024 continue;
9025
67973b93 9026 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
aeca241b 9027 file_slot = io_fixed_file_slot(&ctx->file_table, i);
ea64ec02 9028
a04b0ac0
PB
9029 if (file_slot->file_ptr) {
9030 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
4cdd158b 9031 err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
a5318d3c
HD
9032 if (err)
9033 break;
a04b0ac0 9034 file_slot->file_ptr = 0;
05589553 9035 needs_switch = true;
c3a31e60
JA
9036 }
9037 if (fd != -1) {
c3a31e60
JA
9038 file = fget(fd);
9039 if (!file) {
9040 err = -EBADF;
9041 break;
9042 }
9043 /*
9044 * Don't allow io_uring instances to be registered. If
9045 * UNIX isn't enabled, then this causes a reference
9046 * cycle and this instance can never get freed. If UNIX
9047 * is enabled we'll handle it just fine, but there's
9048 * still no point in allowing a ring fd as it doesn't
9049 * support regular read/write anyway.
9050 */
9051 if (file->f_op == &io_uring_fops) {
9052 fput(file);
9053 err = -EBADF;
9054 break;
9055 }
8b3171bd 9056 err = io_scm_file_account(ctx, file);
f3bd9dae
YY
9057 if (err) {
9058 fput(file);
c3a31e60 9059 break;
f3bd9dae 9060 }
e390510a
PB
9061 *io_get_tag_slot(data, i) = tag;
9062 io_fixed_file_set(file_slot, file);
c3a31e60 9063 }
05f3fb3c
JA
9064 }
9065
a7f0ed5a
PB
9066 if (needs_switch)
9067 io_rsrc_node_switch(ctx, data);
c3a31e60
JA
9068 return done ? done : err;
9069}
05589553 9070
685fe7fe
JA
9071static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
9072 struct task_struct *task)
24369c2e 9073{
e941894e 9074 struct io_wq_hash *hash;
24369c2e 9075 struct io_wq_data data;
24369c2e 9076 unsigned int concurrency;
24369c2e 9077
362a9e65 9078 mutex_lock(&ctx->uring_lock);
e941894e
JA
9079 hash = ctx->hash_map;
9080 if (!hash) {
9081 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
362a9e65
YY
9082 if (!hash) {
9083 mutex_unlock(&ctx->uring_lock);
e941894e 9084 return ERR_PTR(-ENOMEM);
362a9e65 9085 }
e941894e
JA
9086 refcount_set(&hash->refs, 1);
9087 init_waitqueue_head(&hash->wait);
9088 ctx->hash_map = hash;
24369c2e 9089 }
362a9e65 9090 mutex_unlock(&ctx->uring_lock);
24369c2e 9091
e941894e 9092 data.hash = hash;
685fe7fe 9093 data.task = task;
ebc11b6c 9094 data.free_work = io_wq_free_work;
f5fa38c5 9095 data.do_work = io_wq_submit_work;
24369c2e 9096
d25e3a3d
JA
9097 /* Do QD, or 4 * CPUS, whatever is smallest */
9098 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
24369c2e 9099
5aa75ed5 9100 return io_wq_create(concurrency, &data);
24369c2e
PB
9101}
9102
c072481d
PB
9103static __cold int io_uring_alloc_task_context(struct task_struct *task,
9104 struct io_ring_ctx *ctx)
0f212204
JA
9105{
9106 struct io_uring_task *tctx;
d8a6df10 9107 int ret;
0f212204 9108
09899b19 9109 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
0f212204
JA
9110 if (unlikely(!tctx))
9111 return -ENOMEM;
9112
e7a6c00d
JA
9113 tctx->registered_rings = kcalloc(IO_RINGFD_REG_MAX,
9114 sizeof(struct file *), GFP_KERNEL);
9115 if (unlikely(!tctx->registered_rings)) {
9116 kfree(tctx);
9117 return -ENOMEM;
9118 }
9119
d8a6df10
JA
9120 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
9121 if (unlikely(ret)) {
e7a6c00d 9122 kfree(tctx->registered_rings);
d8a6df10
JA
9123 kfree(tctx);
9124 return ret;
9125 }
9126
685fe7fe 9127 tctx->io_wq = io_init_wq_offload(ctx, task);
5aa75ed5
JA
9128 if (IS_ERR(tctx->io_wq)) {
9129 ret = PTR_ERR(tctx->io_wq);
9130 percpu_counter_destroy(&tctx->inflight);
e7a6c00d 9131 kfree(tctx->registered_rings);
5aa75ed5
JA
9132 kfree(tctx);
9133 return ret;
9134 }
9135
0f212204
JA
9136 xa_init(&tctx->xa);
9137 init_waitqueue_head(&tctx->wait);
fdaf083c 9138 atomic_set(&tctx->in_idle, 0);
0f212204 9139 task->io_uring = tctx;
7cbf1722
JA
9140 spin_lock_init(&tctx->task_lock);
9141 INIT_WQ_LIST(&tctx->task_list);
4813c377 9142 INIT_WQ_LIST(&tctx->prior_task_list);
7cbf1722 9143 init_task_work(&tctx->task_work, tctx_task_work);
0f212204
JA
9144 return 0;
9145}
9146
9147void __io_uring_free(struct task_struct *tsk)
9148{
9149 struct io_uring_task *tctx = tsk->io_uring;
9150
9151 WARN_ON_ONCE(!xa_empty(&tctx->xa));
ef8eaa4e 9152 WARN_ON_ONCE(tctx->io_wq);
09899b19 9153 WARN_ON_ONCE(tctx->cached_refs);
ef8eaa4e 9154
e7a6c00d 9155 kfree(tctx->registered_rings);
d8a6df10 9156 percpu_counter_destroy(&tctx->inflight);
0f212204
JA
9157 kfree(tctx);
9158 tsk->io_uring = NULL;
9159}
9160
c072481d
PB
9161static __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
9162 struct io_uring_params *p)
2b188cc1
JA
9163{
9164 int ret;
9165
d25e3a3d
JA
9166 /* Retain compatibility with failing for an invalid attach attempt */
9167 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
9168 IORING_SETUP_ATTACH_WQ) {
9169 struct fd f;
9170
9171 f = fdget(p->wq_fd);
9172 if (!f.file)
9173 return -ENXIO;
0cc936f7
JA
9174 if (f.file->f_op != &io_uring_fops) {
9175 fdput(f);
f2a48dd0 9176 return -EINVAL;
0cc936f7
JA
9177 }
9178 fdput(f);
d25e3a3d 9179 }
6c271ce2 9180 if (ctx->flags & IORING_SETUP_SQPOLL) {
46fe18b1 9181 struct task_struct *tsk;
534ca6d6 9182 struct io_sq_data *sqd;
26984fbf 9183 bool attached;
534ca6d6 9184
cdc1404a
PM
9185 ret = security_uring_sqpoll();
9186 if (ret)
9187 return ret;
9188
26984fbf 9189 sqd = io_get_sq_data(p, &attached);
534ca6d6
JA
9190 if (IS_ERR(sqd)) {
9191 ret = PTR_ERR(sqd);
9192 goto err;
9193 }
69fb2131 9194
7c30f36a 9195 ctx->sq_creds = get_current_cred();
534ca6d6 9196 ctx->sq_data = sqd;
917257da
JA
9197 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
9198 if (!ctx->sq_thread_idle)
9199 ctx->sq_thread_idle = HZ;
9200
78d7f6ba 9201 io_sq_thread_park(sqd);
de75a3d3
PB
9202 list_add(&ctx->sqd_list, &sqd->ctx_list);
9203 io_sqd_update_thread_idle(sqd);
26984fbf 9204 /* don't attach to a dying SQPOLL thread, would be racy */
f2a48dd0 9205 ret = (attached && !sqd->thread) ? -ENXIO : 0;
78d7f6ba
PB
9206 io_sq_thread_unpark(sqd);
9207
de75a3d3
PB
9208 if (ret < 0)
9209 goto err;
9210 if (attached)
5aa75ed5 9211 return 0;
aa06165d 9212
6c271ce2 9213 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 9214 int cpu = p->sq_thread_cpu;
6c271ce2 9215
917257da 9216 ret = -EINVAL;
f2a48dd0 9217 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
e8f98f24 9218 goto err_sqpoll;
37d1e2e3 9219 sqd->sq_cpu = cpu;
6c271ce2 9220 } else {
37d1e2e3 9221 sqd->sq_cpu = -1;
6c271ce2 9222 }
37d1e2e3
JA
9223
9224 sqd->task_pid = current->pid;
5c2469e0 9225 sqd->task_tgid = current->tgid;
46fe18b1
JA
9226 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
9227 if (IS_ERR(tsk)) {
9228 ret = PTR_ERR(tsk);
e8f98f24 9229 goto err_sqpoll;
6c271ce2 9230 }
97a73a0f 9231
46fe18b1 9232 sqd->thread = tsk;
97a73a0f 9233 ret = io_uring_alloc_task_context(tsk, ctx);
46fe18b1 9234 wake_up_new_task(tsk);
0f212204
JA
9235 if (ret)
9236 goto err;
6c271ce2
JA
9237 } else if (p->flags & IORING_SETUP_SQ_AFF) {
9238 /* Can't have SQ_AFF without SQPOLL */
9239 ret = -EINVAL;
9240 goto err;
9241 }
9242
2b188cc1 9243 return 0;
f2a48dd0
PB
9244err_sqpoll:
9245 complete(&ctx->sq_data->exited);
2b188cc1 9246err:
37d1e2e3 9247 io_sq_thread_finish(ctx);
2b188cc1
JA
9248 return ret;
9249}
9250
a087e2b5
BM
9251static inline void __io_unaccount_mem(struct user_struct *user,
9252 unsigned long nr_pages)
2b188cc1
JA
9253{
9254 atomic_long_sub(nr_pages, &user->locked_vm);
9255}
9256
a087e2b5
BM
9257static inline int __io_account_mem(struct user_struct *user,
9258 unsigned long nr_pages)
2b188cc1
JA
9259{
9260 unsigned long page_limit, cur_pages, new_pages;
9261
9262 /* Don't allow more pages than we can safely lock */
9263 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
9264
9265 do {
9266 cur_pages = atomic_long_read(&user->locked_vm);
9267 new_pages = cur_pages + nr_pages;
9268 if (new_pages > page_limit)
9269 return -ENOMEM;
9270 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
9271 new_pages) != cur_pages);
9272
9273 return 0;
9274}
9275
26bfa89e 9276static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
a087e2b5 9277{
62e398be 9278 if (ctx->user)
a087e2b5 9279 __io_unaccount_mem(ctx->user, nr_pages);
30975825 9280
26bfa89e
JA
9281 if (ctx->mm_account)
9282 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
a087e2b5
BM
9283}
9284
26bfa89e 9285static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
a087e2b5 9286{
30975825
BM
9287 int ret;
9288
62e398be 9289 if (ctx->user) {
30975825
BM
9290 ret = __io_account_mem(ctx->user, nr_pages);
9291 if (ret)
9292 return ret;
9293 }
9294
26bfa89e
JA
9295 if (ctx->mm_account)
9296 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
a087e2b5
BM
9297
9298 return 0;
9299}
9300
2b188cc1
JA
9301static void io_mem_free(void *ptr)
9302{
52e04ef4
MR
9303 struct page *page;
9304
9305 if (!ptr)
9306 return;
2b188cc1 9307
52e04ef4 9308 page = virt_to_head_page(ptr);
2b188cc1
JA
9309 if (put_page_testzero(page))
9310 free_compound_page(page);
9311}
9312
9313static void *io_mem_alloc(size_t size)
9314{
0a3f1e0b 9315 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
2b188cc1 9316
0a3f1e0b 9317 return (void *) __get_free_pages(gfp, get_order(size));
2b188cc1
JA
9318}
9319
75b28aff
HV
9320static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
9321 size_t *sq_offset)
9322{
9323 struct io_rings *rings;
9324 size_t off, sq_array_size;
9325
9326 off = struct_size(rings, cqes, cq_entries);
9327 if (off == SIZE_MAX)
9328 return SIZE_MAX;
9329
9330#ifdef CONFIG_SMP
9331 off = ALIGN(off, SMP_CACHE_BYTES);
9332 if (off == 0)
9333 return SIZE_MAX;
9334#endif
9335
b36200f5
DV
9336 if (sq_offset)
9337 *sq_offset = off;
9338
75b28aff
HV
9339 sq_array_size = array_size(sizeof(u32), sq_entries);
9340 if (sq_array_size == SIZE_MAX)
9341 return SIZE_MAX;
9342
9343 if (check_add_overflow(off, sq_array_size, &off))
9344 return SIZE_MAX;
9345
75b28aff
HV
9346 return off;
9347}
9348
41edf1a5 9349static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
7f61a1e9 9350{
41edf1a5 9351 struct io_mapped_ubuf *imu = *slot;
7f61a1e9
PB
9352 unsigned int i;
9353
6224843d
PB
9354 if (imu != ctx->dummy_ubuf) {
9355 for (i = 0; i < imu->nr_bvecs; i++)
9356 unpin_user_page(imu->bvec[i].bv_page);
9357 if (imu->acct_pages)
9358 io_unaccount_mem(ctx, imu->acct_pages);
9359 kvfree(imu);
9360 }
41edf1a5 9361 *slot = NULL;
7f61a1e9
PB
9362}
9363
bd54b6fe 9364static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
edafccee 9365{
634d00df
PB
9366 io_buffer_unmap(ctx, &prsrc->buf);
9367 prsrc->buf = NULL;
bd54b6fe 9368}
edafccee 9369
bd54b6fe
BM
9370static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
9371{
9372 unsigned int i;
edafccee 9373
7f61a1e9
PB
9374 for (i = 0; i < ctx->nr_user_bufs; i++)
9375 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
edafccee 9376 kfree(ctx->user_bufs);
bb6659cc 9377 io_rsrc_data_free(ctx->buf_data);
edafccee 9378 ctx->user_bufs = NULL;
bd54b6fe 9379 ctx->buf_data = NULL;
edafccee 9380 ctx->nr_user_bufs = 0;
bd54b6fe
BM
9381}
9382
0a96bbe4 9383static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
edafccee 9384{
bd54b6fe 9385 int ret;
edafccee 9386
bd54b6fe 9387 if (!ctx->buf_data)
edafccee
JA
9388 return -ENXIO;
9389
bd54b6fe
BM
9390 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
9391 if (!ret)
9392 __io_sqe_buffers_unregister(ctx);
9393 return ret;
edafccee
JA
9394}
9395
9396static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
9397 void __user *arg, unsigned index)
9398{
9399 struct iovec __user *src;
9400
9401#ifdef CONFIG_COMPAT
9402 if (ctx->compat) {
9403 struct compat_iovec __user *ciovs;
9404 struct compat_iovec ciov;
9405
9406 ciovs = (struct compat_iovec __user *) arg;
9407 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
9408 return -EFAULT;
9409
d55e5f5b 9410 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
9411 dst->iov_len = ciov.iov_len;
9412 return 0;
9413 }
9414#endif
9415 src = (struct iovec __user *) arg;
9416 if (copy_from_user(dst, &src[index], sizeof(*dst)))
9417 return -EFAULT;
9418 return 0;
9419}
9420
de293938
JA
9421/*
9422 * Not super efficient, but this is just a registration time. And we do cache
9423 * the last compound head, so generally we'll only do a full search if we don't
9424 * match that one.
9425 *
9426 * We check if the given compound head page has already been accounted, to
9427 * avoid double accounting it. This allows us to account the full size of the
9428 * page, not just the constituent pages of a huge page.
9429 */
9430static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
9431 int nr_pages, struct page *hpage)
9432{
9433 int i, j;
9434
9435 /* check current page array */
9436 for (i = 0; i < nr_pages; i++) {
9437 if (!PageCompound(pages[i]))
9438 continue;
9439 if (compound_head(pages[i]) == hpage)
9440 return true;
9441 }
9442
9443 /* check previously registered pages */
9444 for (i = 0; i < ctx->nr_user_bufs; i++) {
41edf1a5 9445 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
de293938
JA
9446
9447 for (j = 0; j < imu->nr_bvecs; j++) {
9448 if (!PageCompound(imu->bvec[j].bv_page))
9449 continue;
9450 if (compound_head(imu->bvec[j].bv_page) == hpage)
9451 return true;
9452 }
9453 }
9454
9455 return false;
9456}
9457
9458static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
9459 int nr_pages, struct io_mapped_ubuf *imu,
9460 struct page **last_hpage)
9461{
9462 int i, ret;
9463
216e5835 9464 imu->acct_pages = 0;
de293938
JA
9465 for (i = 0; i < nr_pages; i++) {
9466 if (!PageCompound(pages[i])) {
9467 imu->acct_pages++;
9468 } else {
9469 struct page *hpage;
9470
9471 hpage = compound_head(pages[i]);
9472 if (hpage == *last_hpage)
9473 continue;
9474 *last_hpage = hpage;
9475 if (headpage_already_acct(ctx, pages, i, hpage))
9476 continue;
9477 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
9478 }
9479 }
9480
9481 if (!imu->acct_pages)
9482 return 0;
9483
26bfa89e 9484 ret = io_account_mem(ctx, imu->acct_pages);
de293938
JA
9485 if (ret)
9486 imu->acct_pages = 0;
9487 return ret;
9488}
9489
0a96bbe4 9490static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
41edf1a5 9491 struct io_mapped_ubuf **pimu,
0a96bbe4 9492 struct page **last_hpage)
edafccee 9493{
41edf1a5 9494 struct io_mapped_ubuf *imu = NULL;
edafccee
JA
9495 struct vm_area_struct **vmas = NULL;
9496 struct page **pages = NULL;
0a96bbe4
BM
9497 unsigned long off, start, end, ubuf;
9498 size_t size;
9499 int ret, pret, nr_pages, i;
9500
6224843d
PB
9501 if (!iov->iov_base) {
9502 *pimu = ctx->dummy_ubuf;
9503 return 0;
9504 }
9505
0a96bbe4
BM
9506 ubuf = (unsigned long) iov->iov_base;
9507 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
9508 start = ubuf >> PAGE_SHIFT;
9509 nr_pages = end - start;
9510
41edf1a5 9511 *pimu = NULL;
0a96bbe4
BM
9512 ret = -ENOMEM;
9513
9514 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
9515 if (!pages)
9516 goto done;
9517
9518 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
9519 GFP_KERNEL);
9520 if (!vmas)
9521 goto done;
edafccee 9522
41edf1a5 9523 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
a2b4198c 9524 if (!imu)
0a96bbe4
BM
9525 goto done;
9526
9527 ret = 0;
9528 mmap_read_lock(current->mm);
9529 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
9530 pages, vmas);
9531 if (pret == nr_pages) {
9532 /* don't support file backed memory */
9533 for (i = 0; i < nr_pages; i++) {
9534 struct vm_area_struct *vma = vmas[i];
9535
40dad765
PB
9536 if (vma_is_shmem(vma))
9537 continue;
0a96bbe4
BM
9538 if (vma->vm_file &&
9539 !is_file_hugepages(vma->vm_file)) {
9540 ret = -EOPNOTSUPP;
9541 break;
9542 }
9543 }
9544 } else {
9545 ret = pret < 0 ? pret : -EFAULT;
9546 }
9547 mmap_read_unlock(current->mm);
9548 if (ret) {
9549 /*
9550 * if we did partial map, or found file backed vmas,
9551 * release any pages we did get
9552 */
9553 if (pret > 0)
9554 unpin_user_pages(pages, pret);
0a96bbe4
BM
9555 goto done;
9556 }
9557
9558 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
9559 if (ret) {
9560 unpin_user_pages(pages, pret);
0a96bbe4
BM
9561 goto done;
9562 }
9563
9564 off = ubuf & ~PAGE_MASK;
9565 size = iov->iov_len;
9566 for (i = 0; i < nr_pages; i++) {
9567 size_t vec_len;
9568
9569 vec_len = min_t(size_t, size, PAGE_SIZE - off);
9570 imu->bvec[i].bv_page = pages[i];
9571 imu->bvec[i].bv_len = vec_len;
9572 imu->bvec[i].bv_offset = off;
9573 off = 0;
9574 size -= vec_len;
9575 }
9576 /* store original address for later verification */
9577 imu->ubuf = ubuf;
4751f53d 9578 imu->ubuf_end = ubuf + iov->iov_len;
0a96bbe4 9579 imu->nr_bvecs = nr_pages;
41edf1a5 9580 *pimu = imu;
0a96bbe4
BM
9581 ret = 0;
9582done:
41edf1a5
PB
9583 if (ret)
9584 kvfree(imu);
0a96bbe4
BM
9585 kvfree(pages);
9586 kvfree(vmas);
9587 return ret;
9588}
9589
2b358604 9590static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
0a96bbe4 9591{
87094465
PB
9592 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
9593 return ctx->user_bufs ? 0 : -ENOMEM;
2b358604 9594}
edafccee 9595
2b358604
BM
9596static int io_buffer_validate(struct iovec *iov)
9597{
50e96989
PB
9598 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
9599
2b358604
BM
9600 /*
9601 * Don't impose further limits on the size and buffer
9602 * constraints here, we'll -EINVAL later when IO is
9603 * submitted if they are wrong.
9604 */
6224843d
PB
9605 if (!iov->iov_base)
9606 return iov->iov_len ? -EFAULT : 0;
9607 if (!iov->iov_len)
2b358604 9608 return -EFAULT;
edafccee 9609
2b358604
BM
9610 /* arbitrary limit, but we need something */
9611 if (iov->iov_len > SZ_1G)
9612 return -EFAULT;
edafccee 9613
50e96989
PB
9614 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
9615 return -EOVERFLOW;
9616
2b358604
BM
9617 return 0;
9618}
edafccee 9619
2b358604 9620static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
634d00df 9621 unsigned int nr_args, u64 __user *tags)
2b358604 9622{
bd54b6fe
BM
9623 struct page *last_hpage = NULL;
9624 struct io_rsrc_data *data;
2b358604
BM
9625 int i, ret;
9626 struct iovec iov;
edafccee 9627
87094465
PB
9628 if (ctx->user_bufs)
9629 return -EBUSY;
489809e2 9630 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
87094465 9631 return -EINVAL;
bd54b6fe 9632 ret = io_rsrc_node_switch_start(ctx);
2b358604
BM
9633 if (ret)
9634 return ret;
d878c816
PB
9635 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
9636 if (ret)
9637 return ret;
bd54b6fe
BM
9638 ret = io_buffers_map_alloc(ctx, nr_args);
9639 if (ret) {
bb6659cc 9640 io_rsrc_data_free(data);
bd54b6fe
BM
9641 return ret;
9642 }
edafccee 9643
87094465 9644 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
edafccee
JA
9645 ret = io_copy_iov(ctx, &iov, arg, i);
9646 if (ret)
0a96bbe4 9647 break;
2b358604
BM
9648 ret = io_buffer_validate(&iov);
9649 if (ret)
0a96bbe4 9650 break;
2d091d62 9651 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
cf3770e7
CIK
9652 ret = -EINVAL;
9653 break;
9654 }
edafccee 9655
41edf1a5
PB
9656 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
9657 &last_hpage);
0a96bbe4
BM
9658 if (ret)
9659 break;
edafccee 9660 }
0a96bbe4 9661
bd54b6fe 9662 WARN_ON_ONCE(ctx->buf_data);
0a96bbe4 9663
bd54b6fe
BM
9664 ctx->buf_data = data;
9665 if (ret)
9666 __io_sqe_buffers_unregister(ctx);
9667 else
9668 io_rsrc_node_switch(ctx, NULL);
edafccee
JA
9669 return ret;
9670}
9671
634d00df
PB
9672static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
9673 struct io_uring_rsrc_update2 *up,
9674 unsigned int nr_args)
9675{
9676 u64 __user *tags = u64_to_user_ptr(up->tags);
9677 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
634d00df
PB
9678 struct page *last_hpage = NULL;
9679 bool needs_switch = false;
9680 __u32 done;
9681 int i, err;
9682
9683 if (!ctx->buf_data)
9684 return -ENXIO;
9685 if (up->offset + nr_args > ctx->nr_user_bufs)
9686 return -EINVAL;
9687
9688 for (done = 0; done < nr_args; done++) {
0b8c0e7c
PB
9689 struct io_mapped_ubuf *imu;
9690 int offset = up->offset + done;
634d00df
PB
9691 u64 tag = 0;
9692
9693 err = io_copy_iov(ctx, &iov, iovs, done);
9694 if (err)
9695 break;
9696 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
9697 err = -EFAULT;
9698 break;
9699 }
0b8c0e7c
PB
9700 err = io_buffer_validate(&iov);
9701 if (err)
9702 break;
cf3770e7
CIK
9703 if (!iov.iov_base && tag) {
9704 err = -EINVAL;
9705 break;
9706 }
0b8c0e7c
PB
9707 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
9708 if (err)
9709 break;
634d00df 9710
0b8c0e7c 9711 i = array_index_nospec(offset, ctx->nr_user_bufs);
6224843d 9712 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
4cdd158b 9713 err = io_queue_rsrc_removal(ctx->buf_data, i,
0b8c0e7c
PB
9714 ctx->rsrc_node, ctx->user_bufs[i]);
9715 if (unlikely(err)) {
9716 io_buffer_unmap(ctx, &imu);
634d00df 9717 break;
0b8c0e7c 9718 }
634d00df
PB
9719 ctx->user_bufs[i] = NULL;
9720 needs_switch = true;
9721 }
9722
0b8c0e7c 9723 ctx->user_bufs[i] = imu;
2d091d62 9724 *io_get_tag_slot(ctx->buf_data, offset) = tag;
634d00df
PB
9725 }
9726
9727 if (needs_switch)
9728 io_rsrc_node_switch(ctx, ctx->buf_data);
9729 return done ? done : err;
9730}
9731
c75312dd
UA
9732static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
9733 unsigned int eventfd_async)
9b402849 9734{
77bc59b4 9735 struct io_ev_fd *ev_fd;
9b402849 9736 __s32 __user *fds = arg;
f0a4e62b 9737 int fd;
9b402849 9738
77bc59b4
UA
9739 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
9740 lockdep_is_held(&ctx->uring_lock));
9741 if (ev_fd)
9b402849
JA
9742 return -EBUSY;
9743
9744 if (copy_from_user(&fd, fds, sizeof(*fds)))
9745 return -EFAULT;
9746
77bc59b4
UA
9747 ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
9748 if (!ev_fd)
9749 return -ENOMEM;
fe7e3257 9750
77bc59b4
UA
9751 ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
9752 if (IS_ERR(ev_fd->cq_ev_fd)) {
f0a4e62b 9753 int ret = PTR_ERR(ev_fd->cq_ev_fd);
77bc59b4 9754 kfree(ev_fd);
9b402849
JA
9755 return ret;
9756 }
c75312dd 9757 ev_fd->eventfd_async = eventfd_async;
9aa8dfde 9758 ctx->has_evfd = true;
77bc59b4 9759 rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
f0a4e62b 9760 return 0;
77bc59b4
UA
9761}
9762
9763static void io_eventfd_put(struct rcu_head *rcu)
9764{
9765 struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
9766
9767 eventfd_ctx_put(ev_fd->cq_ev_fd);
9768 kfree(ev_fd);
9b402849
JA
9769}
9770
9771static int io_eventfd_unregister(struct io_ring_ctx *ctx)
9772{
77bc59b4
UA
9773 struct io_ev_fd *ev_fd;
9774
9775 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
9776 lockdep_is_held(&ctx->uring_lock));
9777 if (ev_fd) {
9aa8dfde 9778 ctx->has_evfd = false;
77bc59b4
UA
9779 rcu_assign_pointer(ctx->io_ev_fd, NULL);
9780 call_rcu(&ev_fd->rcu, io_eventfd_put);
9b402849
JA
9781 return 0;
9782 }
9783
9784 return -ENXIO;
9785}
9786
5a2e745d
JA
9787static void io_destroy_buffers(struct io_ring_ctx *ctx)
9788{
dbc7d452
JA
9789 int i;
9790
9791 for (i = 0; i < (1U << IO_BUFFERS_HASH_BITS); i++) {
9792 struct list_head *list = &ctx->io_buffers[i];
9e15c3a0 9793
dbc7d452
JA
9794 while (!list_empty(list)) {
9795 struct io_buffer_list *bl;
9796
9797 bl = list_first_entry(list, struct io_buffer_list, list);
9798 __io_remove_buffers(ctx, bl, -1U);
9799 list_del(&bl->list);
9800 kfree(bl);
9801 }
9802 }
cc3cec83
JA
9803
9804 while (!list_empty(&ctx->io_buffers_pages)) {
9805 struct page *page;
9806
9807 page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
9808 list_del_init(&page->lru);
9809 __free_page(page);
9810 }
5a2e745d
JA
9811}
9812
4010fec4 9813static void io_req_caches_free(struct io_ring_ctx *ctx)
2b188cc1 9814{
cd0ca2e0 9815 struct io_submit_state *state = &ctx->submit_state;
37f0e767 9816 int nr = 0;
bf019da7 9817
9a4fdbd8 9818 mutex_lock(&ctx->uring_lock);
cd0ca2e0 9819 io_flush_cached_locked_reqs(ctx, state);
9a4fdbd8 9820
88ab95be 9821 while (!io_req_cache_empty(ctx)) {
c2b6c6bc
PB
9822 struct io_wq_work_node *node;
9823 struct io_kiocb *req;
9a4fdbd8 9824
c2b6c6bc
PB
9825 node = wq_stack_extract(&state->free_list);
9826 req = container_of(node, struct io_kiocb, comp_list);
9827 kmem_cache_free(req_cachep, req);
37f0e767 9828 nr++;
c2b6c6bc 9829 }
37f0e767
PB
9830 if (nr)
9831 percpu_ref_put_many(&ctx->refs, nr);
9a4fdbd8
JA
9832 mutex_unlock(&ctx->uring_lock);
9833}
9834
43597aac 9835static void io_wait_rsrc_data(struct io_rsrc_data *data)
2b188cc1 9836{
43597aac 9837 if (data && !atomic_dec_and_test(&data->refs))
bd54b6fe 9838 wait_for_completion(&data->done);
bd54b6fe 9839}
04fc6c80 9840
4d9237e3
JA
9841static void io_flush_apoll_cache(struct io_ring_ctx *ctx)
9842{
9843 struct async_poll *apoll;
9844
9845 while (!list_empty(&ctx->apoll_cache)) {
9846 apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
9847 poll.wait.entry);
9848 list_del(&apoll->poll.wait.entry);
9849 kfree(apoll);
9850 }
9851}
9852
c072481d 9853static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
2b188cc1 9854{
37d1e2e3 9855 io_sq_thread_finish(ctx);
2aede0e4 9856
37d1e2e3 9857 if (ctx->mm_account) {
2aede0e4
JA
9858 mmdrop(ctx->mm_account);
9859 ctx->mm_account = NULL;
30975825 9860 }
def596e9 9861
ab409402 9862 io_rsrc_refs_drop(ctx);
43597aac
PB
9863 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
9864 io_wait_rsrc_data(ctx->buf_data);
9865 io_wait_rsrc_data(ctx->file_data);
9866
8bad28d8 9867 mutex_lock(&ctx->uring_lock);
43597aac 9868 if (ctx->buf_data)
bd54b6fe 9869 __io_sqe_buffers_unregister(ctx);
43597aac 9870 if (ctx->file_data)
08480400 9871 __io_sqe_files_unregister(ctx);
c4ea060e
PB
9872 if (ctx->rings)
9873 __io_cqring_overflow_flush(ctx, true);
9b402849 9874 io_eventfd_unregister(ctx);
4d9237e3 9875 io_flush_apoll_cache(ctx);
77bc59b4 9876 mutex_unlock(&ctx->uring_lock);
5a2e745d 9877 io_destroy_buffers(ctx);
07db298a
PB
9878 if (ctx->sq_creds)
9879 put_cred(ctx->sq_creds);
def596e9 9880
a7f0ed5a
PB
9881 /* there are no registered resources left, nobody uses it */
9882 if (ctx->rsrc_node)
9883 io_rsrc_node_destroy(ctx->rsrc_node);
8dd03afe 9884 if (ctx->rsrc_backup_node)
b895c9a6 9885 io_rsrc_node_destroy(ctx->rsrc_backup_node);
a7f0ed5a 9886 flush_delayed_work(&ctx->rsrc_put_work);
756ab7c0 9887 flush_delayed_work(&ctx->fallback_work);
a7f0ed5a
PB
9888
9889 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
9890 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
def596e9 9891
2b188cc1 9892#if defined(CONFIG_UNIX)
355e8d26
EB
9893 if (ctx->ring_sock) {
9894 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 9895 sock_release(ctx->ring_sock);
355e8d26 9896 }
2b188cc1 9897#endif
ef9dd637 9898 WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
2b188cc1 9899
75b28aff 9900 io_mem_free(ctx->rings);
2b188cc1 9901 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
9902
9903 percpu_ref_exit(&ctx->refs);
2b188cc1 9904 free_uid(ctx->user);
4010fec4 9905 io_req_caches_free(ctx);
e941894e
JA
9906 if (ctx->hash_map)
9907 io_wq_put_hash(ctx->hash_map);
78076bb6 9908 kfree(ctx->cancel_hash);
6224843d 9909 kfree(ctx->dummy_ubuf);
dbc7d452 9910 kfree(ctx->io_buffers);
2b188cc1
JA
9911 kfree(ctx);
9912}
9913
9914static __poll_t io_uring_poll(struct file *file, poll_table *wait)
9915{
9916 struct io_ring_ctx *ctx = file->private_data;
9917 __poll_t mask = 0;
9918
d60aa65b 9919 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
9920 /*
9921 * synchronizes with barrier from wq_has_sleeper call in
9922 * io_commit_cqring
9923 */
2b188cc1 9924 smp_rmb();
90554200 9925 if (!io_sqring_full(ctx))
2b188cc1 9926 mask |= EPOLLOUT | EPOLLWRNORM;
ed670c3f
HX
9927
9928 /*
9929 * Don't flush cqring overflow list here, just do a simple check.
9930 * Otherwise there could possible be ABBA deadlock:
9931 * CPU0 CPU1
9932 * ---- ----
9933 * lock(&ctx->uring_lock);
9934 * lock(&ep->mtx);
9935 * lock(&ctx->uring_lock);
9936 * lock(&ep->mtx);
9937 *
9938 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
9939 * pushs them to do the flush.
9940 */
5ed7a37d 9941 if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
2b188cc1
JA
9942 mask |= EPOLLIN | EPOLLRDNORM;
9943
9944 return mask;
9945}
9946
0bead8cd 9947static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
071698e1 9948{
4379bf8b 9949 const struct cred *creds;
071698e1 9950
61cf9370 9951 creds = xa_erase(&ctx->personalities, id);
4379bf8b
JA
9952 if (creds) {
9953 put_cred(creds);
0bead8cd 9954 return 0;
1e6fa521 9955 }
0bead8cd
YD
9956
9957 return -EINVAL;
9958}
9959
d56d938b
PB
9960struct io_tctx_exit {
9961 struct callback_head task_work;
9962 struct completion completion;
baf186c4 9963 struct io_ring_ctx *ctx;
d56d938b
PB
9964};
9965
c072481d 9966static __cold void io_tctx_exit_cb(struct callback_head *cb)
d56d938b
PB
9967{
9968 struct io_uring_task *tctx = current->io_uring;
9969 struct io_tctx_exit *work;
9970
9971 work = container_of(cb, struct io_tctx_exit, task_work);
9972 /*
9973 * When @in_idle, we're in cancellation and it's racy to remove the
9974 * node. It'll be removed by the end of cancellation, just ignore it.
9975 */
9976 if (!atomic_read(&tctx->in_idle))
eef51daa 9977 io_uring_del_tctx_node((unsigned long)work->ctx);
d56d938b
PB
9978 complete(&work->completion);
9979}
9980
c072481d 9981static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
28090c13
PB
9982{
9983 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
9984
9985 return req->ctx == data;
9986}
9987
c072481d 9988static __cold void io_ring_exit_work(struct work_struct *work)
85faa7b8 9989{
d56d938b 9990 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
b5bb3a24 9991 unsigned long timeout = jiffies + HZ * 60 * 5;
58d3be2c 9992 unsigned long interval = HZ / 20;
d56d938b
PB
9993 struct io_tctx_exit exit;
9994 struct io_tctx_node *node;
9995 int ret;
85faa7b8 9996
56952e91
JA
9997 /*
9998 * If we're doing polled IO and end up having requests being
9999 * submitted async (out-of-line), then completions can come in while
10000 * we're waiting for refs to drop. We need to reap these manually,
10001 * as nobody else will be looking for them.
10002 */
b2edc0a7 10003 do {
3dd0c97a 10004 io_uring_try_cancel_requests(ctx, NULL, true);
28090c13
PB
10005 if (ctx->sq_data) {
10006 struct io_sq_data *sqd = ctx->sq_data;
10007 struct task_struct *tsk;
10008
10009 io_sq_thread_park(sqd);
10010 tsk = sqd->thread;
10011 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
10012 io_wq_cancel_cb(tsk->io_uring->io_wq,
10013 io_cancel_ctx_cb, ctx, true);
10014 io_sq_thread_unpark(sqd);
10015 }
b5bb3a24 10016
37f0e767
PB
10017 io_req_caches_free(ctx);
10018
58d3be2c
PB
10019 if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
10020 /* there is little hope left, don't run it too often */
10021 interval = HZ * 60;
10022 }
10023 } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
d56d938b 10024
7f00651a
PB
10025 init_completion(&exit.completion);
10026 init_task_work(&exit.task_work, io_tctx_exit_cb);
10027 exit.ctx = ctx;
89b5066e
PB
10028 /*
10029 * Some may use context even when all refs and requests have been put,
10030 * and they are free to do so while still holding uring_lock or
5b0a6acc 10031 * completion_lock, see io_req_task_submit(). Apart from other work,
89b5066e
PB
10032 * this lock/unlock section also waits them to finish.
10033 */
d56d938b
PB
10034 mutex_lock(&ctx->uring_lock);
10035 while (!list_empty(&ctx->tctx_list)) {
b5bb3a24
PB
10036 WARN_ON_ONCE(time_after(jiffies, timeout));
10037
d56d938b
PB
10038 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
10039 ctx_node);
7f00651a
PB
10040 /* don't spin on a single task if cancellation failed */
10041 list_rotate_left(&ctx->tctx_list);
d56d938b
PB
10042 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
10043 if (WARN_ON_ONCE(ret))
10044 continue;
d56d938b
PB
10045
10046 mutex_unlock(&ctx->uring_lock);
10047 wait_for_completion(&exit.completion);
d56d938b
PB
10048 mutex_lock(&ctx->uring_lock);
10049 }
10050 mutex_unlock(&ctx->uring_lock);
79ebeaee
JA
10051 spin_lock(&ctx->completion_lock);
10052 spin_unlock(&ctx->completion_lock);
d56d938b 10053
85faa7b8
JA
10054 io_ring_ctx_free(ctx);
10055}
10056
80c4cbdb 10057/* Returns true if we found and killed one or more timeouts */
c072481d
PB
10058static __cold bool io_kill_timeouts(struct io_ring_ctx *ctx,
10059 struct task_struct *tsk, bool cancel_all)
80c4cbdb
PB
10060{
10061 struct io_kiocb *req, *tmp;
10062 int canceled = 0;
10063
79ebeaee
JA
10064 spin_lock(&ctx->completion_lock);
10065 spin_lock_irq(&ctx->timeout_lock);
80c4cbdb 10066 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
3dd0c97a 10067 if (io_match_task(req, tsk, cancel_all)) {
80c4cbdb
PB
10068 io_kill_timeout(req, -ECANCELED);
10069 canceled++;
10070 }
10071 }
79ebeaee 10072 spin_unlock_irq(&ctx->timeout_lock);
60053be8 10073 io_commit_cqring(ctx);
79ebeaee 10074 spin_unlock(&ctx->completion_lock);
80c4cbdb
PB
10075 if (canceled != 0)
10076 io_cqring_ev_posted(ctx);
10077 return canceled != 0;
10078}
10079
c072481d 10080static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
2b188cc1 10081{
61cf9370
MWO
10082 unsigned long index;
10083 struct creds *creds;
10084
2b188cc1
JA
10085 mutex_lock(&ctx->uring_lock);
10086 percpu_ref_kill(&ctx->refs);
634578f8 10087 if (ctx->rings)
6c2450ae 10088 __io_cqring_overflow_flush(ctx, true);
61cf9370
MWO
10089 xa_for_each(&ctx->personalities, index, creds)
10090 io_unregister_personality(ctx, index);
2b188cc1
JA
10091 mutex_unlock(&ctx->uring_lock);
10092
60053be8
PB
10093 /* failed during ring init, it couldn't have issued any requests */
10094 if (ctx->rings) {
10095 io_kill_timeouts(ctx, NULL, true);
10096 io_poll_remove_all(ctx, NULL, true);
10097 /* if we failed setting up the ctx, we might not have any rings */
10098 io_iopoll_try_reap_events(ctx);
10099 }
309fc03a 10100
85faa7b8 10101 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
fc666777
JA
10102 /*
10103 * Use system_unbound_wq to avoid spawning tons of event kworkers
10104 * if we're exiting a ton of rings at the same time. It just adds
10105 * noise and overhead, there's no discernable change in runtime
10106 * over using system_wq.
10107 */
10108 queue_work(system_unbound_wq, &ctx->exit_work);
2b188cc1
JA
10109}
10110
10111static int io_uring_release(struct inode *inode, struct file *file)
10112{
10113 struct io_ring_ctx *ctx = file->private_data;
10114
10115 file->private_data = NULL;
10116 io_ring_ctx_wait_and_kill(ctx);
10117 return 0;
10118}
10119
f6edbabb
PB
10120struct io_task_cancel {
10121 struct task_struct *task;
3dd0c97a 10122 bool all;
f6edbabb 10123};
f254ac04 10124
f6edbabb 10125static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
b711d4ea 10126{
9a472ef7 10127 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
f6edbabb 10128 struct io_task_cancel *cancel = data;
9a472ef7 10129
6af3f48b 10130 return io_match_task_safe(req, cancel->task, cancel->all);
b711d4ea
JA
10131}
10132
c072481d
PB
10133static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
10134 struct task_struct *task,
10135 bool cancel_all)
b7ddce3c 10136{
e1915f76 10137 struct io_defer_entry *de;
b7ddce3c
PB
10138 LIST_HEAD(list);
10139
79ebeaee 10140 spin_lock(&ctx->completion_lock);
b7ddce3c 10141 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
6af3f48b 10142 if (io_match_task_safe(de->req, task, cancel_all)) {
b7ddce3c
PB
10143 list_cut_position(&list, &ctx->defer_list, &de->list);
10144 break;
10145 }
10146 }
79ebeaee 10147 spin_unlock(&ctx->completion_lock);
e1915f76
PB
10148 if (list_empty(&list))
10149 return false;
b7ddce3c
PB
10150
10151 while (!list_empty(&list)) {
10152 de = list_first_entry(&list, struct io_defer_entry, list);
10153 list_del_init(&de->list);
f41db273 10154 io_req_complete_failed(de->req, -ECANCELED);
b7ddce3c
PB
10155 kfree(de);
10156 }
e1915f76 10157 return true;
b7ddce3c
PB
10158}
10159
c072481d 10160static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
1b00764f
PB
10161{
10162 struct io_tctx_node *node;
10163 enum io_wq_cancel cret;
10164 bool ret = false;
10165
10166 mutex_lock(&ctx->uring_lock);
10167 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
10168 struct io_uring_task *tctx = node->task->io_uring;
10169
10170 /*
10171 * io_wq will stay alive while we hold uring_lock, because it's
10172 * killed after ctx nodes, which requires to take the lock.
10173 */
10174 if (!tctx || !tctx->io_wq)
10175 continue;
10176 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
10177 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
10178 }
10179 mutex_unlock(&ctx->uring_lock);
10180
10181 return ret;
10182}
10183
c072481d
PB
10184static __cold void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
10185 struct task_struct *task,
10186 bool cancel_all)
9936c7c2 10187{
3dd0c97a 10188 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
1b00764f 10189 struct io_uring_task *tctx = task ? task->io_uring : NULL;
9936c7c2 10190
60053be8
PB
10191 /* failed during ring init, it couldn't have issued any requests */
10192 if (!ctx->rings)
10193 return;
10194
9936c7c2
PB
10195 while (1) {
10196 enum io_wq_cancel cret;
10197 bool ret = false;
10198
1b00764f
PB
10199 if (!task) {
10200 ret |= io_uring_try_cancel_iowq(ctx);
10201 } else if (tctx && tctx->io_wq) {
10202 /*
10203 * Cancels requests of all rings, not only @ctx, but
10204 * it's fine as the task is in exit/exec.
10205 */
5aa75ed5 10206 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
9936c7c2
PB
10207 &cancel, true);
10208 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
10209 }
10210
10211 /* SQPOLL thread does its own polling */
3dd0c97a 10212 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
d052d1d6 10213 (ctx->sq_data && ctx->sq_data->thread == current)) {
5eef4e87 10214 while (!wq_list_empty(&ctx->iopoll_list)) {
9936c7c2
PB
10215 io_iopoll_try_reap_events(ctx);
10216 ret = true;
10217 }
10218 }
10219
3dd0c97a
PB
10220 ret |= io_cancel_defer_files(ctx, task, cancel_all);
10221 ret |= io_poll_remove_all(ctx, task, cancel_all);
10222 ret |= io_kill_timeouts(ctx, task, cancel_all);
e5dc480d
PB
10223 if (task)
10224 ret |= io_run_task_work();
9936c7c2
PB
10225 if (!ret)
10226 break;
10227 cond_resched();
10228 }
10229}
10230
eef51daa 10231static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
0f212204 10232{
236434c3 10233 struct io_uring_task *tctx = current->io_uring;
13bf43f5 10234 struct io_tctx_node *node;
a528b04e 10235 int ret;
236434c3
MWO
10236
10237 if (unlikely(!tctx)) {
5aa75ed5 10238 ret = io_uring_alloc_task_context(current, ctx);
0f212204
JA
10239 if (unlikely(ret))
10240 return ret;
e139a1ec 10241
236434c3 10242 tctx = current->io_uring;
e139a1ec
PB
10243 if (ctx->iowq_limits_set) {
10244 unsigned int limits[2] = { ctx->iowq_limits[0],
10245 ctx->iowq_limits[1], };
10246
10247 ret = io_wq_max_workers(tctx->io_wq, limits);
10248 if (ret)
10249 return ret;
10250 }
0f212204 10251 }
cf27f3b1
PB
10252 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
10253 node = kmalloc(sizeof(*node), GFP_KERNEL);
10254 if (!node)
10255 return -ENOMEM;
10256 node->ctx = ctx;
10257 node->task = current;
13bf43f5 10258
cf27f3b1
PB
10259 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
10260 node, GFP_KERNEL));
10261 if (ret) {
10262 kfree(node);
10263 return ret;
0f212204 10264 }
cf27f3b1
PB
10265
10266 mutex_lock(&ctx->uring_lock);
10267 list_add(&node->ctx_node, &ctx->tctx_list);
10268 mutex_unlock(&ctx->uring_lock);
0f212204 10269 }
cf27f3b1 10270 tctx->last = ctx;
0f212204
JA
10271 return 0;
10272}
10273
cf27f3b1
PB
10274/*
10275 * Note that this task has used io_uring. We use it for cancelation purposes.
10276 */
eef51daa 10277static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
cf27f3b1
PB
10278{
10279 struct io_uring_task *tctx = current->io_uring;
10280
10281 if (likely(tctx && tctx->last == ctx))
10282 return 0;
eef51daa 10283 return __io_uring_add_tctx_node(ctx);
cf27f3b1
PB
10284}
10285
0f212204
JA
10286/*
10287 * Remove this io_uring_file -> task mapping.
10288 */
c072481d 10289static __cold void io_uring_del_tctx_node(unsigned long index)
0f212204
JA
10290{
10291 struct io_uring_task *tctx = current->io_uring;
13bf43f5 10292 struct io_tctx_node *node;
2941267b 10293
eebd2e37
PB
10294 if (!tctx)
10295 return;
13bf43f5
PB
10296 node = xa_erase(&tctx->xa, index);
10297 if (!node)
2941267b 10298 return;
0f212204 10299
13bf43f5
PB
10300 WARN_ON_ONCE(current != node->task);
10301 WARN_ON_ONCE(list_empty(&node->ctx_node));
10302
10303 mutex_lock(&node->ctx->uring_lock);
10304 list_del(&node->ctx_node);
10305 mutex_unlock(&node->ctx->uring_lock);
10306
baf186c4 10307 if (tctx->last == node->ctx)
0f212204 10308 tctx->last = NULL;
13bf43f5 10309 kfree(node);
0f212204
JA
10310}
10311
c072481d 10312static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
de7f1d9e 10313{
ba5ef6dc 10314 struct io_wq *wq = tctx->io_wq;
13bf43f5 10315 struct io_tctx_node *node;
de7f1d9e
PB
10316 unsigned long index;
10317
8bab4c09 10318 xa_for_each(&tctx->xa, index, node) {
eef51daa 10319 io_uring_del_tctx_node(index);
8bab4c09
JA
10320 cond_resched();
10321 }
b16ef427
ME
10322 if (wq) {
10323 /*
f6f9b278 10324 * Must be after io_uring_del_tctx_node() (removes nodes under
b16ef427
ME
10325 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
10326 */
ba5ef6dc 10327 io_wq_put_and_exit(wq);
dadebc35 10328 tctx->io_wq = NULL;
b16ef427 10329 }
de7f1d9e
PB
10330}
10331
3f48cf18 10332static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
521d6a73 10333{
3f48cf18 10334 if (tracked)
d5361233 10335 return 0;
521d6a73
PB
10336 return percpu_counter_sum(&tctx->inflight);
10337}
10338
78cc687b
PB
10339/*
10340 * Find any io_uring ctx that this task has registered or done IO on, and cancel
78a78060 10341 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
78cc687b 10342 */
c072481d
PB
10343static __cold void io_uring_cancel_generic(bool cancel_all,
10344 struct io_sq_data *sqd)
0e9ddb39 10345{
521d6a73 10346 struct io_uring_task *tctx = current->io_uring;
734551df 10347 struct io_ring_ctx *ctx;
0e9ddb39
PB
10348 s64 inflight;
10349 DEFINE_WAIT(wait);
fdaf083c 10350
78cc687b
PB
10351 WARN_ON_ONCE(sqd && sqd->thread != current);
10352
6d042ffb
PO
10353 if (!current->io_uring)
10354 return;
17a91051
PB
10355 if (tctx->io_wq)
10356 io_wq_exit_start(tctx->io_wq);
10357
0e9ddb39
PB
10358 atomic_inc(&tctx->in_idle);
10359 do {
e9dbe221 10360 io_uring_drop_tctx_refs(current);
0e9ddb39 10361 /* read completions before cancelations */
78cc687b 10362 inflight = tctx_inflight(tctx, !cancel_all);
0e9ddb39
PB
10363 if (!inflight)
10364 break;
fdaf083c 10365
78cc687b
PB
10366 if (!sqd) {
10367 struct io_tctx_node *node;
10368 unsigned long index;
0f212204 10369
78cc687b
PB
10370 xa_for_each(&tctx->xa, index, node) {
10371 /* sqpoll task will cancel all its requests */
10372 if (node->ctx->sq_data)
10373 continue;
10374 io_uring_try_cancel_requests(node->ctx, current,
10375 cancel_all);
10376 }
10377 } else {
10378 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
10379 io_uring_try_cancel_requests(ctx, current,
10380 cancel_all);
10381 }
17a91051 10382
78a78060
JA
10383 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
10384 io_run_task_work();
e9dbe221 10385 io_uring_drop_tctx_refs(current);
78a78060 10386
0f212204 10387 /*
a1bb3cd5
PB
10388 * If we've seen completions, retry without waiting. This
10389 * avoids a race where a completion comes in before we did
10390 * prepare_to_wait().
0f212204 10391 */
3dd0c97a 10392 if (inflight == tctx_inflight(tctx, !cancel_all))
a1bb3cd5 10393 schedule();
f57555ed 10394 finish_wait(&tctx->wait, &wait);
d8a6df10 10395 } while (1);
de7f1d9e 10396
8452d4a6 10397 io_uring_clean_tctx(tctx);
3dd0c97a 10398 if (cancel_all) {
3cc7fdb9
PB
10399 /*
10400 * We shouldn't run task_works after cancel, so just leave
10401 * ->in_idle set for normal exit.
10402 */
10403 atomic_dec(&tctx->in_idle);
3f48cf18
PB
10404 /* for exec all current's requests should be gone, kill tctx */
10405 __io_uring_free(current);
10406 }
44e728b8
PB
10407}
10408
f552a27a 10409void __io_uring_cancel(bool cancel_all)
78cc687b 10410{
f552a27a 10411 io_uring_cancel_generic(cancel_all, NULL);
78cc687b
PB
10412}
10413
e7a6c00d
JA
10414void io_uring_unreg_ringfd(void)
10415{
10416 struct io_uring_task *tctx = current->io_uring;
10417 int i;
10418
10419 for (i = 0; i < IO_RINGFD_REG_MAX; i++) {
10420 if (tctx->registered_rings[i]) {
10421 fput(tctx->registered_rings[i]);
10422 tctx->registered_rings[i] = NULL;
10423 }
10424 }
10425}
10426
10427static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
10428 int start, int end)
10429{
10430 struct file *file;
10431 int offset;
10432
10433 for (offset = start; offset < end; offset++) {
10434 offset = array_index_nospec(offset, IO_RINGFD_REG_MAX);
10435 if (tctx->registered_rings[offset])
10436 continue;
10437
10438 file = fget(fd);
10439 if (!file) {
10440 return -EBADF;
10441 } else if (file->f_op != &io_uring_fops) {
10442 fput(file);
10443 return -EOPNOTSUPP;
10444 }
10445 tctx->registered_rings[offset] = file;
10446 return offset;
10447 }
10448
10449 return -EBUSY;
10450}
10451
10452/*
10453 * Register a ring fd to avoid fdget/fdput for each io_uring_enter()
10454 * invocation. User passes in an array of struct io_uring_rsrc_update
10455 * with ->data set to the ring_fd, and ->offset given for the desired
10456 * index. If no index is desired, application may set ->offset == -1U
10457 * and we'll find an available index. Returns number of entries
10458 * successfully processed, or < 0 on error if none were processed.
10459 */
10460static int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
10461 unsigned nr_args)
10462{
10463 struct io_uring_rsrc_update __user *arg = __arg;
10464 struct io_uring_rsrc_update reg;
10465 struct io_uring_task *tctx;
10466 int ret, i;
10467
10468 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
10469 return -EINVAL;
10470
10471 mutex_unlock(&ctx->uring_lock);
10472 ret = io_uring_add_tctx_node(ctx);
10473 mutex_lock(&ctx->uring_lock);
10474 if (ret)
10475 return ret;
10476
10477 tctx = current->io_uring;
10478 for (i = 0; i < nr_args; i++) {
10479 int start, end;
10480
10481 if (copy_from_user(&reg, &arg[i], sizeof(reg))) {
10482 ret = -EFAULT;
10483 break;
10484 }
10485
6fb53cf8
DY
10486 if (reg.resv) {
10487 ret = -EINVAL;
10488 break;
10489 }
10490
e7a6c00d
JA
10491 if (reg.offset == -1U) {
10492 start = 0;
10493 end = IO_RINGFD_REG_MAX;
10494 } else {
10495 if (reg.offset >= IO_RINGFD_REG_MAX) {
10496 ret = -EINVAL;
10497 break;
10498 }
10499 start = reg.offset;
10500 end = start + 1;
10501 }
10502
10503 ret = io_ring_add_registered_fd(tctx, reg.data, start, end);
10504 if (ret < 0)
10505 break;
10506
10507 reg.offset = ret;
10508 if (copy_to_user(&arg[i], &reg, sizeof(reg))) {
10509 fput(tctx->registered_rings[reg.offset]);
10510 tctx->registered_rings[reg.offset] = NULL;
10511 ret = -EFAULT;
10512 break;
10513 }
10514 }
10515
10516 return i ? i : ret;
10517}
10518
10519static int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
10520 unsigned nr_args)
10521{
10522 struct io_uring_rsrc_update __user *arg = __arg;
10523 struct io_uring_task *tctx = current->io_uring;
10524 struct io_uring_rsrc_update reg;
10525 int ret = 0, i;
10526
10527 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
10528 return -EINVAL;
10529 if (!tctx)
10530 return 0;
10531
10532 for (i = 0; i < nr_args; i++) {
10533 if (copy_from_user(&reg, &arg[i], sizeof(reg))) {
10534 ret = -EFAULT;
10535 break;
10536 }
6fb53cf8 10537 if (reg.resv || reg.offset >= IO_RINGFD_REG_MAX) {
e7a6c00d
JA
10538 ret = -EINVAL;
10539 break;
10540 }
10541
10542 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX);
10543 if (tctx->registered_rings[reg.offset]) {
10544 fput(tctx->registered_rings[reg.offset]);
10545 tctx->registered_rings[reg.offset] = NULL;
10546 }
10547 }
10548
10549 return i ? i : ret;
10550}
10551
6c5c240e
RP
10552static void *io_uring_validate_mmap_request(struct file *file,
10553 loff_t pgoff, size_t sz)
2b188cc1 10554{
2b188cc1 10555 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 10556 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
10557 struct page *page;
10558 void *ptr;
10559
10560 switch (offset) {
10561 case IORING_OFF_SQ_RING:
75b28aff
HV
10562 case IORING_OFF_CQ_RING:
10563 ptr = ctx->rings;
2b188cc1
JA
10564 break;
10565 case IORING_OFF_SQES:
10566 ptr = ctx->sq_sqes;
10567 break;
2b188cc1 10568 default:
6c5c240e 10569 return ERR_PTR(-EINVAL);
2b188cc1
JA
10570 }
10571
10572 page = virt_to_head_page(ptr);
a50b854e 10573 if (sz > page_size(page))
6c5c240e
RP
10574 return ERR_PTR(-EINVAL);
10575
10576 return ptr;
10577}
10578
10579#ifdef CONFIG_MMU
10580
c072481d 10581static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
6c5c240e
RP
10582{
10583 size_t sz = vma->vm_end - vma->vm_start;
10584 unsigned long pfn;
10585 void *ptr;
10586
10587 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
10588 if (IS_ERR(ptr))
10589 return PTR_ERR(ptr);
2b188cc1
JA
10590
10591 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
10592 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
10593}
10594
6c5c240e
RP
10595#else /* !CONFIG_MMU */
10596
10597static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
10598{
10599 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
10600}
10601
10602static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
10603{
10604 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
10605}
10606
10607static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
10608 unsigned long addr, unsigned long len,
10609 unsigned long pgoff, unsigned long flags)
10610{
10611 void *ptr;
10612
10613 ptr = io_uring_validate_mmap_request(file, pgoff, len);
10614 if (IS_ERR(ptr))
10615 return PTR_ERR(ptr);
10616
10617 return (unsigned long) ptr;
10618}
10619
10620#endif /* !CONFIG_MMU */
10621
d9d05217 10622static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
90554200
JA
10623{
10624 DEFINE_WAIT(wait);
10625
10626 do {
10627 if (!io_sqring_full(ctx))
10628 break;
90554200
JA
10629 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
10630
10631 if (!io_sqring_full(ctx))
10632 break;
90554200
JA
10633 schedule();
10634 } while (!signal_pending(current));
10635
10636 finish_wait(&ctx->sqo_sq_wait, &wait);
5199328a 10637 return 0;
90554200
JA
10638}
10639
f81440d3
PB
10640static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
10641{
10642 if (flags & IORING_ENTER_EXT_ARG) {
10643 struct io_uring_getevents_arg arg;
10644
10645 if (argsz != sizeof(arg))
10646 return -EINVAL;
10647 if (copy_from_user(&arg, argp, sizeof(arg)))
10648 return -EFAULT;
10649 }
10650 return 0;
10651}
10652
c73ebb68
HX
10653static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
10654 struct __kernel_timespec __user **ts,
10655 const sigset_t __user **sig)
10656{
10657 struct io_uring_getevents_arg arg;
10658
10659 /*
10660 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
10661 * is just a pointer to the sigset_t.
10662 */
10663 if (!(flags & IORING_ENTER_EXT_ARG)) {
10664 *sig = (const sigset_t __user *) argp;
10665 *ts = NULL;
10666 return 0;
10667 }
10668
10669 /*
10670 * EXT_ARG is set - ensure we agree on the size of it and copy in our
10671 * timespec and sigset_t pointers if good.
10672 */
10673 if (*argsz != sizeof(arg))
10674 return -EINVAL;
10675 if (copy_from_user(&arg, argp, sizeof(arg)))
10676 return -EFAULT;
d2347b96
DY
10677 if (arg.pad)
10678 return -EINVAL;
c73ebb68
HX
10679 *sig = u64_to_user_ptr(arg.sigmask);
10680 *argsz = arg.sigmask_sz;
10681 *ts = u64_to_user_ptr(arg.ts);
10682 return 0;
10683}
10684
2b188cc1 10685SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
c73ebb68
HX
10686 u32, min_complete, u32, flags, const void __user *, argp,
10687 size_t, argsz)
2b188cc1
JA
10688{
10689 struct io_ring_ctx *ctx;
2b188cc1
JA
10690 int submitted = 0;
10691 struct fd f;
33f993da 10692 long ret;
2b188cc1 10693
4c6e277c 10694 io_run_task_work();
b41e9852 10695
33f993da 10696 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
e7a6c00d
JA
10697 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
10698 IORING_ENTER_REGISTERED_RING)))
2b188cc1
JA
10699 return -EINVAL;
10700
e7a6c00d
JA
10701 /*
10702 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
10703 * need only dereference our task private array to find it.
10704 */
10705 if (flags & IORING_ENTER_REGISTERED_RING) {
10706 struct io_uring_task *tctx = current->io_uring;
10707
10708 if (!tctx || fd >= IO_RINGFD_REG_MAX)
10709 return -EINVAL;
10710 fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
10711 f.file = tctx->registered_rings[fd];
10712 if (unlikely(!f.file))
10713 return -EBADF;
10714 } else {
10715 f = fdget(fd);
10716 if (unlikely(!f.file))
10717 return -EBADF;
10718 }
2b188cc1
JA
10719
10720 ret = -EOPNOTSUPP;
33f993da 10721 if (unlikely(f.file->f_op != &io_uring_fops))
2b188cc1
JA
10722 goto out_fput;
10723
10724 ret = -ENXIO;
10725 ctx = f.file->private_data;
33f993da 10726 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
2b188cc1
JA
10727 goto out_fput;
10728
7e84e1c7 10729 ret = -EBADFD;
33f993da 10730 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
7e84e1c7
SG
10731 goto out;
10732
6c271ce2
JA
10733 /*
10734 * For SQ polling, the thread will do all submissions and completions.
10735 * Just return the requested submit count, and wake the thread if
10736 * we were asked to.
10737 */
b2a9eada 10738 ret = 0;
6c271ce2 10739 if (ctx->flags & IORING_SETUP_SQPOLL) {
90f67366 10740 io_cqring_overflow_flush(ctx);
89448c47 10741
21f96522
JA
10742 if (unlikely(ctx->sq_data->thread == NULL)) {
10743 ret = -EOWNERDEAD;
04147488 10744 goto out;
21f96522 10745 }
6c271ce2 10746 if (flags & IORING_ENTER_SQ_WAKEUP)
534ca6d6 10747 wake_up(&ctx->sq_data->wait);
d9d05217
PB
10748 if (flags & IORING_ENTER_SQ_WAIT) {
10749 ret = io_sqpoll_wait_sq(ctx);
10750 if (ret)
10751 goto out;
10752 }
6c271ce2 10753 submitted = to_submit;
b2a9eada 10754 } else if (to_submit) {
eef51daa 10755 ret = io_uring_add_tctx_node(ctx);
0f212204
JA
10756 if (unlikely(ret))
10757 goto out;
d487b43c 10758
2b188cc1 10759 mutex_lock(&ctx->uring_lock);
0f212204 10760 submitted = io_submit_sqes(ctx, to_submit);
d487b43c
PB
10761 if (submitted != to_submit) {
10762 mutex_unlock(&ctx->uring_lock);
7c504e65 10763 goto out;
d487b43c
PB
10764 }
10765 if ((flags & IORING_ENTER_GETEVENTS) && ctx->syscall_iopoll)
10766 goto iopoll_locked;
10767 mutex_unlock(&ctx->uring_lock);
2b188cc1
JA
10768 }
10769 if (flags & IORING_ENTER_GETEVENTS) {
773697b6 10770 if (ctx->syscall_iopoll) {
d487b43c
PB
10771 /*
10772 * We disallow the app entering submit/complete with
10773 * polling, but we still need to lock the ring to
10774 * prevent racing with polled issue that got punted to
10775 * a workqueue.
10776 */
10777 mutex_lock(&ctx->uring_lock);
10778iopoll_locked:
f81440d3 10779 ret = io_validate_ext_arg(flags, argp, argsz);
d487b43c
PB
10780 if (likely(!ret)) {
10781 min_complete = min(min_complete, ctx->cq_entries);
10782 ret = io_iopoll_check(ctx, min_complete);
10783 }
10784 mutex_unlock(&ctx->uring_lock);
def596e9 10785 } else {
f81440d3
PB
10786 const sigset_t __user *sig;
10787 struct __kernel_timespec __user *ts;
10788
10789 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
10790 if (unlikely(ret))
10791 goto out;
d487b43c 10792 min_complete = min(min_complete, ctx->cq_entries);
c73ebb68 10793 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
def596e9 10794 }
2b188cc1
JA
10795 }
10796
7c504e65 10797out:
6805b32e 10798 percpu_ref_put(&ctx->refs);
2b188cc1 10799out_fput:
e7a6c00d
JA
10800 if (!(flags & IORING_ENTER_REGISTERED_RING))
10801 fdput(f);
2b188cc1
JA
10802 return submitted ? submitted : ret;
10803}
10804
bebdb65e 10805#ifdef CONFIG_PROC_FS
c072481d 10806static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
61cf9370 10807 const struct cred *cred)
87ce955b 10808{
87ce955b
JA
10809 struct user_namespace *uns = seq_user_ns(m);
10810 struct group_info *gi;
10811 kernel_cap_t cap;
10812 unsigned __capi;
10813 int g;
10814
10815 seq_printf(m, "%5d\n", id);
10816 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
10817 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
10818 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
10819 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
10820 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
10821 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
10822 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
10823 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
10824 seq_puts(m, "\n\tGroups:\t");
10825 gi = cred->group_info;
10826 for (g = 0; g < gi->ngroups; g++) {
10827 seq_put_decimal_ull(m, g ? " " : "",
10828 from_kgid_munged(uns, gi->gid[g]));
10829 }
10830 seq_puts(m, "\n\tCapEff:\t");
10831 cap = cred->cap_effective;
10832 CAP_FOR_EACH_U32(__capi)
10833 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
10834 seq_putc(m, '\n');
10835 return 0;
10836}
10837
c072481d
PB
10838static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
10839 struct seq_file *m)
87ce955b 10840{
dbbe9c64 10841 struct io_sq_data *sq = NULL;
83f84356
HX
10842 struct io_overflow_cqe *ocqe;
10843 struct io_rings *r = ctx->rings;
10844 unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
83f84356
HX
10845 unsigned int sq_head = READ_ONCE(r->sq.head);
10846 unsigned int sq_tail = READ_ONCE(r->sq.tail);
10847 unsigned int cq_head = READ_ONCE(r->cq.head);
10848 unsigned int cq_tail = READ_ONCE(r->cq.tail);
f75d1183 10849 unsigned int sq_entries, cq_entries;
fad8e0de 10850 bool has_lock;
83f84356
HX
10851 unsigned int i;
10852
10853 /*
10854 * we may get imprecise sqe and cqe info if uring is actively running
10855 * since we get cached_sq_head and cached_cq_tail without uring_lock
10856 * and sq_tail and cq_head are changed by userspace. But it's ok since
10857 * we usually use these info when it is stuck.
10858 */
c0235652 10859 seq_printf(m, "SqMask:\t0x%x\n", sq_mask);
f75d1183
JA
10860 seq_printf(m, "SqHead:\t%u\n", sq_head);
10861 seq_printf(m, "SqTail:\t%u\n", sq_tail);
10862 seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head);
10863 seq_printf(m, "CqMask:\t0x%x\n", cq_mask);
10864 seq_printf(m, "CqHead:\t%u\n", cq_head);
10865 seq_printf(m, "CqTail:\t%u\n", cq_tail);
10866 seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
10867 seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head);
10868 sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
10869 for (i = 0; i < sq_entries; i++) {
10870 unsigned int entry = i + sq_head;
10871 unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
a1957780 10872 struct io_uring_sqe *sqe;
f75d1183
JA
10873
10874 if (sq_idx > sq_mask)
10875 continue;
10876 sqe = &ctx->sq_sqes[sq_idx];
10877 seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n",
10878 sq_idx, sqe->opcode, sqe->fd, sqe->flags,
10879 sqe->user_data);
83f84356 10880 }
f75d1183
JA
10881 seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
10882 cq_entries = min(cq_tail - cq_head, ctx->cq_entries);
10883 for (i = 0; i < cq_entries; i++) {
10884 unsigned int entry = i + cq_head;
10885 struct io_uring_cqe *cqe = &r->cqes[entry & cq_mask];
83f84356
HX
10886
10887 seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n",
f75d1183
JA
10888 entry & cq_mask, cqe->user_data, cqe->res,
10889 cqe->flags);
83f84356 10890 }
87ce955b 10891
fad8e0de
JA
10892 /*
10893 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
10894 * since fdinfo case grabs it in the opposite direction of normal use
10895 * cases. If we fail to get the lock, we just don't iterate any
10896 * structures that could be going away outside the io_uring mutex.
10897 */
10898 has_lock = mutex_trylock(&ctx->uring_lock);
10899
5f3f26f9 10900 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
dbbe9c64 10901 sq = ctx->sq_data;
5f3f26f9
JA
10902 if (!sq->thread)
10903 sq = NULL;
10904 }
dbbe9c64
JQ
10905
10906 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
10907 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
87ce955b 10908 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
fad8e0de 10909 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
7b29f92d 10910 struct file *f = io_file_from_index(ctx, i);
87ce955b 10911
87ce955b
JA
10912 if (f)
10913 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
10914 else
10915 seq_printf(m, "%5u: <none>\n", i);
10916 }
10917 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
fad8e0de 10918 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
41edf1a5 10919 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
4751f53d 10920 unsigned int len = buf->ubuf_end - buf->ubuf;
87ce955b 10921
4751f53d 10922 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
87ce955b 10923 }
61cf9370
MWO
10924 if (has_lock && !xa_empty(&ctx->personalities)) {
10925 unsigned long index;
10926 const struct cred *cred;
10927
87ce955b 10928 seq_printf(m, "Personalities:\n");
61cf9370
MWO
10929 xa_for_each(&ctx->personalities, index, cred)
10930 io_uring_show_cred(m, index, cred);
87ce955b 10931 }
83f84356
HX
10932 if (has_lock)
10933 mutex_unlock(&ctx->uring_lock);
10934
10935 seq_puts(m, "PollList:\n");
79ebeaee 10936 spin_lock(&ctx->completion_lock);
d7718a9d
JA
10937 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
10938 struct hlist_head *list = &ctx->cancel_hash[i];
10939 struct io_kiocb *req;
10940
10941 hlist_for_each_entry(req, list, hash_node)
10942 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
7f62d40d 10943 task_work_pending(req->task));
d7718a9d 10944 }
83f84356
HX
10945
10946 seq_puts(m, "CqOverflowList:\n");
10947 list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
10948 struct io_uring_cqe *cqe = &ocqe->cqe;
10949
10950 seq_printf(m, " user_data=%llu, res=%d, flags=%x\n",
10951 cqe->user_data, cqe->res, cqe->flags);
10952
10953 }
10954
79ebeaee 10955 spin_unlock(&ctx->completion_lock);
87ce955b
JA
10956}
10957
c072481d 10958static __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
87ce955b
JA
10959{
10960 struct io_ring_ctx *ctx = f->private_data;
10961
10962 if (percpu_ref_tryget(&ctx->refs)) {
10963 __io_uring_show_fdinfo(ctx, m);
10964 percpu_ref_put(&ctx->refs);
10965 }
10966}
bebdb65e 10967#endif
87ce955b 10968
2b188cc1
JA
10969static const struct file_operations io_uring_fops = {
10970 .release = io_uring_release,
10971 .mmap = io_uring_mmap,
6c5c240e
RP
10972#ifndef CONFIG_MMU
10973 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
10974 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
10975#endif
2b188cc1 10976 .poll = io_uring_poll,
bebdb65e 10977#ifdef CONFIG_PROC_FS
87ce955b 10978 .show_fdinfo = io_uring_show_fdinfo,
bebdb65e 10979#endif
2b188cc1
JA
10980};
10981
c072481d
PB
10982static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
10983 struct io_uring_params *p)
2b188cc1 10984{
75b28aff
HV
10985 struct io_rings *rings;
10986 size_t size, sq_array_offset;
2b188cc1 10987
bd740481
JA
10988 /* make sure these are sane, as we already accounted them */
10989 ctx->sq_entries = p->sq_entries;
10990 ctx->cq_entries = p->cq_entries;
10991
75b28aff
HV
10992 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
10993 if (size == SIZE_MAX)
10994 return -EOVERFLOW;
10995
10996 rings = io_mem_alloc(size);
10997 if (!rings)
2b188cc1
JA
10998 return -ENOMEM;
10999
75b28aff
HV
11000 ctx->rings = rings;
11001 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
11002 rings->sq_ring_mask = p->sq_entries - 1;
11003 rings->cq_ring_mask = p->cq_entries - 1;
11004 rings->sq_ring_entries = p->sq_entries;
11005 rings->cq_ring_entries = p->cq_entries;
2b188cc1
JA
11006
11007 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
11008 if (size == SIZE_MAX) {
11009 io_mem_free(ctx->rings);
11010 ctx->rings = NULL;
2b188cc1 11011 return -EOVERFLOW;
eb065d30 11012 }
2b188cc1
JA
11013
11014 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
11015 if (!ctx->sq_sqes) {
11016 io_mem_free(ctx->rings);
11017 ctx->rings = NULL;
2b188cc1 11018 return -ENOMEM;
eb065d30 11019 }
2b188cc1 11020
2b188cc1
JA
11021 return 0;
11022}
11023
9faadcc8
PB
11024static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
11025{
11026 int ret, fd;
11027
11028 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
11029 if (fd < 0)
11030 return fd;
11031
eef51daa 11032 ret = io_uring_add_tctx_node(ctx);
9faadcc8
PB
11033 if (ret) {
11034 put_unused_fd(fd);
11035 return ret;
11036 }
11037 fd_install(fd, file);
11038 return fd;
11039}
11040
2b188cc1
JA
11041/*
11042 * Allocate an anonymous fd, this is what constitutes the application
11043 * visible backing of an io_uring instance. The application mmaps this
11044 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
11045 * we have to tie this fd to a socket for file garbage collection purposes.
11046 */
9faadcc8 11047static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
2b188cc1
JA
11048{
11049 struct file *file;
9faadcc8 11050#if defined(CONFIG_UNIX)
2b188cc1
JA
11051 int ret;
11052
2b188cc1
JA
11053 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
11054 &ctx->ring_sock);
11055 if (ret)
9faadcc8 11056 return ERR_PTR(ret);
2b188cc1
JA
11057#endif
11058
91a9ab7c
PM
11059 file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
11060 O_RDWR | O_CLOEXEC, NULL);
2b188cc1 11061#if defined(CONFIG_UNIX)
9faadcc8
PB
11062 if (IS_ERR(file)) {
11063 sock_release(ctx->ring_sock);
11064 ctx->ring_sock = NULL;
11065 } else {
11066 ctx->ring_sock->file = file;
0f212204 11067 }
2b188cc1 11068#endif
9faadcc8 11069 return file;
2b188cc1
JA
11070}
11071
c072481d
PB
11072static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
11073 struct io_uring_params __user *params)
2b188cc1 11074{
2b188cc1 11075 struct io_ring_ctx *ctx;
9faadcc8 11076 struct file *file;
2b188cc1
JA
11077 int ret;
11078
8110c1a6 11079 if (!entries)
2b188cc1 11080 return -EINVAL;
8110c1a6
JA
11081 if (entries > IORING_MAX_ENTRIES) {
11082 if (!(p->flags & IORING_SETUP_CLAMP))
11083 return -EINVAL;
11084 entries = IORING_MAX_ENTRIES;
11085 }
2b188cc1
JA
11086
11087 /*
11088 * Use twice as many entries for the CQ ring. It's possible for the
11089 * application to drive a higher depth than the size of the SQ ring,
11090 * since the sqes are only used at submission time. This allows for
33a107f0
JA
11091 * some flexibility in overcommitting a bit. If the application has
11092 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
11093 * of CQ ring entries manually.
2b188cc1
JA
11094 */
11095 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
11096 if (p->flags & IORING_SETUP_CQSIZE) {
11097 /*
11098 * If IORING_SETUP_CQSIZE is set, we do the same roundup
11099 * to a power-of-two, if it isn't already. We do NOT impose
11100 * any cq vs sq ring sizing.
11101 */
eb2667b3 11102 if (!p->cq_entries)
33a107f0 11103 return -EINVAL;
8110c1a6
JA
11104 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
11105 if (!(p->flags & IORING_SETUP_CLAMP))
11106 return -EINVAL;
11107 p->cq_entries = IORING_MAX_CQ_ENTRIES;
11108 }
eb2667b3
JQ
11109 p->cq_entries = roundup_pow_of_two(p->cq_entries);
11110 if (p->cq_entries < p->sq_entries)
11111 return -EINVAL;
33a107f0
JA
11112 } else {
11113 p->cq_entries = 2 * p->sq_entries;
11114 }
2b188cc1 11115
2b188cc1 11116 ctx = io_ring_ctx_alloc(p);
62e398be 11117 if (!ctx)
2b188cc1 11118 return -ENOMEM;
773697b6
PB
11119
11120 /*
11121 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
11122 * space applications don't need to do io completion events
11123 * polling again, they can rely on io_sq_thread to do polling
11124 * work, which can reduce cpu usage and uring_lock contention.
11125 */
11126 if (ctx->flags & IORING_SETUP_IOPOLL &&
11127 !(ctx->flags & IORING_SETUP_SQPOLL))
11128 ctx->syscall_iopoll = 1;
11129
2b188cc1 11130 ctx->compat = in_compat_syscall();
62e398be
JA
11131 if (!capable(CAP_IPC_LOCK))
11132 ctx->user = get_uid(current_user());
2aede0e4
JA
11133
11134 /*
11135 * This is just grabbed for accounting purposes. When a process exits,
11136 * the mm is exited and dropped before the files, hence we need to hang
11137 * on to this mm purely for the purposes of being able to unaccount
11138 * memory (locked/pinned vm). It's not used for anything else.
11139 */
6b7898eb 11140 mmgrab(current->mm);
2aede0e4 11141 ctx->mm_account = current->mm;
6b7898eb 11142
2b188cc1
JA
11143 ret = io_allocate_scq_urings(ctx, p);
11144 if (ret)
11145 goto err;
11146
7e84e1c7 11147 ret = io_sq_offload_create(ctx, p);
2b188cc1
JA
11148 if (ret)
11149 goto err;
eae071c9 11150 /* always set a rsrc node */
47b228ce
PB
11151 ret = io_rsrc_node_switch_start(ctx);
11152 if (ret)
11153 goto err;
eae071c9 11154 io_rsrc_node_switch(ctx, NULL);
2b188cc1 11155
2b188cc1 11156 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
11157 p->sq_off.head = offsetof(struct io_rings, sq.head);
11158 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
11159 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
11160 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
11161 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
11162 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
11163 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
11164
11165 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
11166 p->cq_off.head = offsetof(struct io_rings, cq.head);
11167 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
11168 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
11169 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
11170 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
11171 p->cq_off.cqes = offsetof(struct io_rings, cqes);
0d9b5b3a 11172 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
ac90f249 11173
7f13657d
XW
11174 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
11175 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
5769a351 11176 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
c73ebb68 11177 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
9690557e 11178 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
c4212f3e
JA
11179 IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
11180 IORING_FEAT_LINKED_FILE;
7f13657d
XW
11181
11182 if (copy_to_user(params, p, sizeof(*p))) {
11183 ret = -EFAULT;
11184 goto err;
11185 }
d1719f70 11186
9faadcc8
PB
11187 file = io_uring_get_file(ctx);
11188 if (IS_ERR(file)) {
11189 ret = PTR_ERR(file);
11190 goto err;
11191 }
11192
044c1ab3
JA
11193 /*
11194 * Install ring fd as the very last thing, so we don't risk someone
11195 * having closed it before we finish setup
11196 */
9faadcc8
PB
11197 ret = io_uring_install_fd(ctx, file);
11198 if (ret < 0) {
11199 /* fput will clean it up */
11200 fput(file);
11201 return ret;
11202 }
044c1ab3 11203
c826bd7a 11204 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
11205 return ret;
11206err:
11207 io_ring_ctx_wait_and_kill(ctx);
11208 return ret;
11209}
11210
11211/*
11212 * Sets up an aio uring context, and returns the fd. Applications asks for a
11213 * ring size, we return the actual sq/cq ring sizes (among other things) in the
11214 * params structure passed in.
11215 */
11216static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
11217{
11218 struct io_uring_params p;
2b188cc1
JA
11219 int i;
11220
11221 if (copy_from_user(&p, params, sizeof(p)))
11222 return -EFAULT;
11223 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
11224 if (p.resv[i])
11225 return -EINVAL;
11226 }
11227
6c271ce2 11228 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 11229 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
7e84e1c7 11230 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
bcbb7bf6 11231 IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL))
2b188cc1
JA
11232 return -EINVAL;
11233
7f13657d 11234 return io_uring_create(entries, &p, params);
2b188cc1
JA
11235}
11236
11237SYSCALL_DEFINE2(io_uring_setup, u32, entries,
11238 struct io_uring_params __user *, params)
11239{
11240 return io_uring_setup(entries, params);
11241}
11242
c072481d
PB
11243static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
11244 unsigned nr_args)
66f4af93
JA
11245{
11246 struct io_uring_probe *p;
11247 size_t size;
11248 int i, ret;
11249
11250 size = struct_size(p, ops, nr_args);
11251 if (size == SIZE_MAX)
11252 return -EOVERFLOW;
11253 p = kzalloc(size, GFP_KERNEL);
11254 if (!p)
11255 return -ENOMEM;
11256
11257 ret = -EFAULT;
11258 if (copy_from_user(p, arg, size))
11259 goto out;
11260 ret = -EINVAL;
11261 if (memchr_inv(p, 0, size))
11262 goto out;
11263
11264 p->last_op = IORING_OP_LAST - 1;
11265 if (nr_args > IORING_OP_LAST)
11266 nr_args = IORING_OP_LAST;
11267
11268 for (i = 0; i < nr_args; i++) {
11269 p->ops[i].op = i;
11270 if (!io_op_defs[i].not_supported)
11271 p->ops[i].flags = IO_URING_OP_SUPPORTED;
11272 }
11273 p->ops_len = i;
11274
11275 ret = 0;
11276 if (copy_to_user(arg, p, size))
11277 ret = -EFAULT;
11278out:
11279 kfree(p);
11280 return ret;
11281}
11282
071698e1
JA
11283static int io_register_personality(struct io_ring_ctx *ctx)
11284{
4379bf8b 11285 const struct cred *creds;
61cf9370 11286 u32 id;
1e6fa521 11287 int ret;
071698e1 11288
4379bf8b 11289 creds = get_current_cred();
1e6fa521 11290
61cf9370
MWO
11291 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
11292 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
a30f895a
JA
11293 if (ret < 0) {
11294 put_cred(creds);
11295 return ret;
11296 }
11297 return id;
071698e1
JA
11298}
11299
c072481d
PB
11300static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
11301 void __user *arg, unsigned int nr_args)
21b55dbc
SG
11302{
11303 struct io_uring_restriction *res;
11304 size_t size;
11305 int i, ret;
11306
7e84e1c7
SG
11307 /* Restrictions allowed only if rings started disabled */
11308 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
11309 return -EBADFD;
11310
21b55dbc 11311 /* We allow only a single restrictions registration */
7e84e1c7 11312 if (ctx->restrictions.registered)
21b55dbc
SG
11313 return -EBUSY;
11314
11315 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
11316 return -EINVAL;
11317
11318 size = array_size(nr_args, sizeof(*res));
11319 if (size == SIZE_MAX)
11320 return -EOVERFLOW;
11321
11322 res = memdup_user(arg, size);
11323 if (IS_ERR(res))
11324 return PTR_ERR(res);
11325
11326 ret = 0;
11327
11328 for (i = 0; i < nr_args; i++) {
11329 switch (res[i].opcode) {
11330 case IORING_RESTRICTION_REGISTER_OP:
11331 if (res[i].register_op >= IORING_REGISTER_LAST) {
11332 ret = -EINVAL;
11333 goto out;
11334 }
11335
11336 __set_bit(res[i].register_op,
11337 ctx->restrictions.register_op);
11338 break;
11339 case IORING_RESTRICTION_SQE_OP:
11340 if (res[i].sqe_op >= IORING_OP_LAST) {
11341 ret = -EINVAL;
11342 goto out;
11343 }
11344
11345 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
11346 break;
11347 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
11348 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
11349 break;
11350 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
11351 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
11352 break;
11353 default:
11354 ret = -EINVAL;
11355 goto out;
11356 }
11357 }
11358
11359out:
11360 /* Reset all restrictions if an error happened */
11361 if (ret != 0)
11362 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
11363 else
7e84e1c7 11364 ctx->restrictions.registered = true;
21b55dbc
SG
11365
11366 kfree(res);
11367 return ret;
11368}
11369
7e84e1c7
SG
11370static int io_register_enable_rings(struct io_ring_ctx *ctx)
11371{
11372 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
11373 return -EBADFD;
11374
11375 if (ctx->restrictions.registered)
11376 ctx->restricted = 1;
11377
0298ef96
PB
11378 ctx->flags &= ~IORING_SETUP_R_DISABLED;
11379 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
11380 wake_up(&ctx->sq_data->wait);
7e84e1c7
SG
11381 return 0;
11382}
11383
fdecb662 11384static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
c3bdad02 11385 struct io_uring_rsrc_update2 *up,
98f0b3b4
PB
11386 unsigned nr_args)
11387{
11388 __u32 tmp;
11389 int err;
11390
11391 if (check_add_overflow(up->offset, nr_args, &tmp))
11392 return -EOVERFLOW;
11393 err = io_rsrc_node_switch_start(ctx);
11394 if (err)
11395 return err;
11396
fdecb662
PB
11397 switch (type) {
11398 case IORING_RSRC_FILE:
98f0b3b4 11399 return __io_sqe_files_update(ctx, up, nr_args);
634d00df
PB
11400 case IORING_RSRC_BUFFER:
11401 return __io_sqe_buffers_update(ctx, up, nr_args);
98f0b3b4
PB
11402 }
11403 return -EINVAL;
11404}
11405
c3bdad02
PB
11406static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
11407 unsigned nr_args)
98f0b3b4 11408{
c3bdad02 11409 struct io_uring_rsrc_update2 up;
98f0b3b4
PB
11410
11411 if (!nr_args)
11412 return -EINVAL;
c3bdad02
PB
11413 memset(&up, 0, sizeof(up));
11414 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
11415 return -EFAULT;
d8a3ba9c 11416 if (up.resv || up.resv2)
565c5e61 11417 return -EINVAL;
c3bdad02
PB
11418 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
11419}
11420
11421static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
992da01a 11422 unsigned size, unsigned type)
c3bdad02
PB
11423{
11424 struct io_uring_rsrc_update2 up;
11425
11426 if (size != sizeof(up))
11427 return -EINVAL;
98f0b3b4
PB
11428 if (copy_from_user(&up, arg, sizeof(up)))
11429 return -EFAULT;
d8a3ba9c 11430 if (!up.nr || up.resv || up.resv2)
98f0b3b4 11431 return -EINVAL;
992da01a 11432 return __io_register_rsrc_update(ctx, type, &up, up.nr);
98f0b3b4
PB
11433}
11434
c072481d 11435static __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
992da01a 11436 unsigned int size, unsigned int type)
792e3582
PB
11437{
11438 struct io_uring_rsrc_register rr;
11439
11440 /* keep it extendible */
11441 if (size != sizeof(rr))
11442 return -EINVAL;
11443
11444 memset(&rr, 0, sizeof(rr));
11445 if (copy_from_user(&rr, arg, size))
11446 return -EFAULT;
992da01a 11447 if (!rr.nr || rr.resv || rr.resv2)
792e3582
PB
11448 return -EINVAL;
11449
992da01a 11450 switch (type) {
792e3582
PB
11451 case IORING_RSRC_FILE:
11452 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
11453 rr.nr, u64_to_user_ptr(rr.tags));
634d00df
PB
11454 case IORING_RSRC_BUFFER:
11455 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
11456 rr.nr, u64_to_user_ptr(rr.tags));
792e3582
PB
11457 }
11458 return -EINVAL;
11459}
11460
c072481d
PB
11461static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
11462 void __user *arg, unsigned len)
fe76421d
JA
11463{
11464 struct io_uring_task *tctx = current->io_uring;
11465 cpumask_var_t new_mask;
11466 int ret;
11467
11468 if (!tctx || !tctx->io_wq)
11469 return -EINVAL;
11470
11471 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
11472 return -ENOMEM;
11473
11474 cpumask_clear(new_mask);
11475 if (len > cpumask_size())
11476 len = cpumask_size();
11477
0f5e4b83
ES
11478 if (in_compat_syscall()) {
11479 ret = compat_get_bitmap(cpumask_bits(new_mask),
11480 (const compat_ulong_t __user *)arg,
11481 len * 8 /* CHAR_BIT */);
11482 } else {
11483 ret = copy_from_user(new_mask, arg, len);
11484 }
11485
11486 if (ret) {
fe76421d
JA
11487 free_cpumask_var(new_mask);
11488 return -EFAULT;
11489 }
11490
11491 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
11492 free_cpumask_var(new_mask);
11493 return ret;
11494}
11495
c072481d 11496static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
fe76421d
JA
11497{
11498 struct io_uring_task *tctx = current->io_uring;
11499
11500 if (!tctx || !tctx->io_wq)
11501 return -EINVAL;
11502
11503 return io_wq_cpu_affinity(tctx->io_wq, NULL);
11504}
11505
c072481d
PB
11506static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
11507 void __user *arg)
b22fa62a 11508 __must_hold(&ctx->uring_lock)
2e480058 11509{
b22fa62a 11510 struct io_tctx_node *node;
fa84693b
JA
11511 struct io_uring_task *tctx = NULL;
11512 struct io_sq_data *sqd = NULL;
2e480058
JA
11513 __u32 new_count[2];
11514 int i, ret;
11515
2e480058
JA
11516 if (copy_from_user(new_count, arg, sizeof(new_count)))
11517 return -EFAULT;
11518 for (i = 0; i < ARRAY_SIZE(new_count); i++)
11519 if (new_count[i] > INT_MAX)
11520 return -EINVAL;
11521
fa84693b
JA
11522 if (ctx->flags & IORING_SETUP_SQPOLL) {
11523 sqd = ctx->sq_data;
11524 if (sqd) {
009ad9f0
JA
11525 /*
11526 * Observe the correct sqd->lock -> ctx->uring_lock
11527 * ordering. Fine to drop uring_lock here, we hold
11528 * a ref to the ctx.
11529 */
41d3a6bd 11530 refcount_inc(&sqd->refs);
009ad9f0 11531 mutex_unlock(&ctx->uring_lock);
fa84693b 11532 mutex_lock(&sqd->lock);
009ad9f0 11533 mutex_lock(&ctx->uring_lock);
41d3a6bd
JA
11534 if (sqd->thread)
11535 tctx = sqd->thread->io_uring;
fa84693b
JA
11536 }
11537 } else {
11538 tctx = current->io_uring;
11539 }
11540
e139a1ec 11541 BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
fa84693b 11542
bad119b9
PB
11543 for (i = 0; i < ARRAY_SIZE(new_count); i++)
11544 if (new_count[i])
11545 ctx->iowq_limits[i] = new_count[i];
e139a1ec
PB
11546 ctx->iowq_limits_set = true;
11547
e139a1ec
PB
11548 if (tctx && tctx->io_wq) {
11549 ret = io_wq_max_workers(tctx->io_wq, new_count);
11550 if (ret)
11551 goto err;
11552 } else {
11553 memset(new_count, 0, sizeof(new_count));
11554 }
fa84693b 11555
41d3a6bd 11556 if (sqd) {
fa84693b 11557 mutex_unlock(&sqd->lock);
41d3a6bd
JA
11558 io_put_sq_data(sqd);
11559 }
2e480058
JA
11560
11561 if (copy_to_user(arg, new_count, sizeof(new_count)))
11562 return -EFAULT;
11563
b22fa62a
PB
11564 /* that's it for SQPOLL, only the SQPOLL task creates requests */
11565 if (sqd)
11566 return 0;
11567
11568 /* now propagate the restriction to all registered users */
11569 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
11570 struct io_uring_task *tctx = node->task->io_uring;
11571
11572 if (WARN_ON_ONCE(!tctx->io_wq))
11573 continue;
11574
11575 for (i = 0; i < ARRAY_SIZE(new_count); i++)
11576 new_count[i] = ctx->iowq_limits[i];
11577 /* ignore errors, it always returns zero anyway */
11578 (void)io_wq_max_workers(tctx->io_wq, new_count);
11579 }
2e480058 11580 return 0;
fa84693b 11581err:
41d3a6bd 11582 if (sqd) {
fa84693b 11583 mutex_unlock(&sqd->lock);
41d3a6bd
JA
11584 io_put_sq_data(sqd);
11585 }
fa84693b 11586 return ret;
2e480058
JA
11587}
11588
edafccee
JA
11589static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
11590 void __user *arg, unsigned nr_args)
b19062a5
JA
11591 __releases(ctx->uring_lock)
11592 __acquires(ctx->uring_lock)
edafccee
JA
11593{
11594 int ret;
11595
35fa71a0
JA
11596 /*
11597 * We're inside the ring mutex, if the ref is already dying, then
11598 * someone else killed the ctx or is already going through
11599 * io_uring_register().
11600 */
11601 if (percpu_ref_is_dying(&ctx->refs))
11602 return -ENXIO;
11603
75c4021a
PB
11604 if (ctx->restricted) {
11605 if (opcode >= IORING_REGISTER_LAST)
11606 return -EINVAL;
11607 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
11608 if (!test_bit(opcode, ctx->restrictions.register_op))
11609 return -EACCES;
11610 }
11611
edafccee
JA
11612 switch (opcode) {
11613 case IORING_REGISTER_BUFFERS:
634d00df 11614 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
edafccee
JA
11615 break;
11616 case IORING_UNREGISTER_BUFFERS:
11617 ret = -EINVAL;
11618 if (arg || nr_args)
11619 break;
0a96bbe4 11620 ret = io_sqe_buffers_unregister(ctx);
edafccee 11621 break;
6b06314c 11622 case IORING_REGISTER_FILES:
792e3582 11623 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
6b06314c
JA
11624 break;
11625 case IORING_UNREGISTER_FILES:
11626 ret = -EINVAL;
11627 if (arg || nr_args)
11628 break;
11629 ret = io_sqe_files_unregister(ctx);
11630 break;
c3a31e60 11631 case IORING_REGISTER_FILES_UPDATE:
c3bdad02 11632 ret = io_register_files_update(ctx, arg, nr_args);
c3a31e60 11633 break;
9b402849
JA
11634 case IORING_REGISTER_EVENTFD:
11635 ret = -EINVAL;
11636 if (nr_args != 1)
11637 break;
c75312dd
UA
11638 ret = io_eventfd_register(ctx, arg, 0);
11639 break;
11640 case IORING_REGISTER_EVENTFD_ASYNC:
11641 ret = -EINVAL;
11642 if (nr_args != 1)
f2842ab5 11643 break;
c75312dd 11644 ret = io_eventfd_register(ctx, arg, 1);
9b402849
JA
11645 break;
11646 case IORING_UNREGISTER_EVENTFD:
11647 ret = -EINVAL;
11648 if (arg || nr_args)
11649 break;
11650 ret = io_eventfd_unregister(ctx);
11651 break;
66f4af93
JA
11652 case IORING_REGISTER_PROBE:
11653 ret = -EINVAL;
11654 if (!arg || nr_args > 256)
11655 break;
11656 ret = io_probe(ctx, arg, nr_args);
11657 break;
071698e1
JA
11658 case IORING_REGISTER_PERSONALITY:
11659 ret = -EINVAL;
11660 if (arg || nr_args)
11661 break;
11662 ret = io_register_personality(ctx);
11663 break;
11664 case IORING_UNREGISTER_PERSONALITY:
11665 ret = -EINVAL;
11666 if (arg)
11667 break;
11668 ret = io_unregister_personality(ctx, nr_args);
11669 break;
7e84e1c7
SG
11670 case IORING_REGISTER_ENABLE_RINGS:
11671 ret = -EINVAL;
11672 if (arg || nr_args)
11673 break;
11674 ret = io_register_enable_rings(ctx);
11675 break;
21b55dbc
SG
11676 case IORING_REGISTER_RESTRICTIONS:
11677 ret = io_register_restrictions(ctx, arg, nr_args);
11678 break;
992da01a
PB
11679 case IORING_REGISTER_FILES2:
11680 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
11681 break;
11682 case IORING_REGISTER_FILES_UPDATE2:
11683 ret = io_register_rsrc_update(ctx, arg, nr_args,
11684 IORING_RSRC_FILE);
11685 break;
11686 case IORING_REGISTER_BUFFERS2:
11687 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
792e3582 11688 break;
992da01a
PB
11689 case IORING_REGISTER_BUFFERS_UPDATE:
11690 ret = io_register_rsrc_update(ctx, arg, nr_args,
11691 IORING_RSRC_BUFFER);
c3bdad02 11692 break;
fe76421d
JA
11693 case IORING_REGISTER_IOWQ_AFF:
11694 ret = -EINVAL;
11695 if (!arg || !nr_args)
11696 break;
11697 ret = io_register_iowq_aff(ctx, arg, nr_args);
11698 break;
11699 case IORING_UNREGISTER_IOWQ_AFF:
11700 ret = -EINVAL;
11701 if (arg || nr_args)
11702 break;
11703 ret = io_unregister_iowq_aff(ctx);
11704 break;
2e480058
JA
11705 case IORING_REGISTER_IOWQ_MAX_WORKERS:
11706 ret = -EINVAL;
11707 if (!arg || nr_args != 2)
11708 break;
11709 ret = io_register_iowq_max_workers(ctx, arg);
11710 break;
e7a6c00d
JA
11711 case IORING_REGISTER_RING_FDS:
11712 ret = io_ringfd_register(ctx, arg, nr_args);
11713 break;
11714 case IORING_UNREGISTER_RING_FDS:
11715 ret = io_ringfd_unregister(ctx, arg, nr_args);
11716 break;
edafccee
JA
11717 default:
11718 ret = -EINVAL;
11719 break;
11720 }
11721
edafccee
JA
11722 return ret;
11723}
11724
11725SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
11726 void __user *, arg, unsigned int, nr_args)
11727{
11728 struct io_ring_ctx *ctx;
11729 long ret = -EBADF;
11730 struct fd f;
11731
11732 f = fdget(fd);
11733 if (!f.file)
11734 return -EBADF;
11735
11736 ret = -EOPNOTSUPP;
11737 if (f.file->f_op != &io_uring_fops)
11738 goto out_fput;
11739
11740 ctx = f.file->private_data;
11741
b6c23dd5
PB
11742 io_run_task_work();
11743
edafccee
JA
11744 mutex_lock(&ctx->uring_lock);
11745 ret = __io_uring_register(ctx, opcode, arg, nr_args);
11746 mutex_unlock(&ctx->uring_lock);
2757be22 11747 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
edafccee
JA
11748out_fput:
11749 fdput(f);
11750 return ret;
11751}
11752
2b188cc1
JA
11753static int __init io_uring_init(void)
11754{
d7f62e82
SM
11755#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
11756 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
11757 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
11758} while (0)
11759
11760#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
11761 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
11762 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
11763 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
11764 BUILD_BUG_SQE_ELEM(1, __u8, flags);
11765 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
11766 BUILD_BUG_SQE_ELEM(4, __s32, fd);
11767 BUILD_BUG_SQE_ELEM(8, __u64, off);
11768 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
11769 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7d67af2c 11770 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
d7f62e82
SM
11771 BUILD_BUG_SQE_ELEM(24, __u32, len);
11772 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
11773 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
11774 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
11775 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
5769a351
JX
11776 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
11777 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
d7f62e82
SM
11778 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
11779 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
11780 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
11781 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
11782 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
11783 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
11784 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
11785 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7d67af2c 11786 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
d7f62e82
SM
11787 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
11788 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
16340eab 11789 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
d7f62e82 11790 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7d67af2c 11791 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
b9445598 11792 BUILD_BUG_SQE_ELEM(44, __u32, file_index);
d7f62e82 11793
b0d658ec
PB
11794 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
11795 sizeof(struct io_uring_rsrc_update));
11796 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
11797 sizeof(struct io_uring_rsrc_update2));
90499ad0
PB
11798
11799 /* ->buf_index is u16 */
11800 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
11801
b0d658ec
PB
11802 /* should fit into one byte */
11803 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
68fe256a
PB
11804 BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
11805 BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
b0d658ec 11806
d3656344 11807 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
32c2d33e 11808 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
16340eab 11809
91f245d5
JA
11810 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
11811 SLAB_ACCOUNT);
2b188cc1
JA
11812 return 0;
11813};
11814__initcall(io_uring_init);