Merge branch 'io_uring/io_uring-5.19' of https://github.com/isilence/linux into io_ur...
[linux-block.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
d068b506 14 * through a control-dependency in io_get_cqe (smp_store_release to
1e84b97b
SB
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
52de1fe1 47#include <net/compat.h>
2b188cc1
JA
48#include <linux/refcount.h>
49#include <linux/uio.h>
6b47ee6e 50#include <linux/bits.h>
2b188cc1
JA
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
2b188cc1
JA
58#include <linux/percpu.h>
59#include <linux/slab.h>
edce22e1 60#include <linux/blk-mq.h>
edafccee 61#include <linux/bvec.h>
2b188cc1
JA
62#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
6b06314c 65#include <net/scm.h>
2b188cc1
JA
66#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
edafccee
JA
70#include <linux/sizes.h>
71#include <linux/hugetlb.h>
aa4c3967 72#include <linux/highmem.h>
15b71abe
JA
73#include <linux/namei.h>
74#include <linux/fsnotify.h>
4840e418 75#include <linux/fadvise.h>
3e4827b0 76#include <linux/eventpoll.h>
7d67af2c 77#include <linux/splice.h>
b41e9852 78#include <linux/task_work.h>
bcf5a063 79#include <linux/pagemap.h>
0f212204 80#include <linux/io_uring.h>
5bd2182d 81#include <linux/audit.h>
cdc1404a 82#include <linux/security.h>
e9621e2b 83#include <linux/xattr.h>
2b188cc1 84
c826bd7a
DD
85#define CREATE_TRACE_POINTS
86#include <trace/events/io_uring.h>
87
2b188cc1
JA
88#include <uapi/linux/io_uring.h>
89
90#include "internal.h"
561fb04a 91#include "io-wq.h"
2b188cc1 92
5277deaa 93#define IORING_MAX_ENTRIES 32768
33a107f0 94#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
4ce8ad95 95#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
65e19f54 96
187f08c1 97/* only define max */
09893e15 98#define IORING_MAX_FIXED_FILES (1U << 20)
21b55dbc
SG
99#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
100 IORING_REGISTER_LAST + IORING_OP_LAST)
2b188cc1 101
187f08c1 102#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
2d091d62
PB
103#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
104#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
105
489809e2
PB
106#define IORING_MAX_REG_BUFFERS (1U << 14)
107
68fe256a
PB
108#define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
109 IOSQE_IO_HARDLINK | IOSQE_ASYNC)
110
5562a8d7
PB
111#define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
112 IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
68fe256a 113
c854357b 114#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
9cae36a0
JA
115 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
116 REQ_F_ASYNC_DATA)
b16fed66 117
a538be5b
PB
118#define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
119 IO_REQ_CLEAN_FLAGS)
120
227685eb
HX
121#define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
122
09899b19
PB
123#define IO_TCTX_REFS_CACHE_NR (1U << 10)
124
2b188cc1
JA
125struct io_uring {
126 u32 head ____cacheline_aligned_in_smp;
127 u32 tail ____cacheline_aligned_in_smp;
128};
129
1e84b97b 130/*
75b28aff
HV
131 * This data is shared with the application through the mmap at offsets
132 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
133 *
134 * The offsets to the member fields are published through struct
135 * io_sqring_offsets when calling io_uring_setup.
136 */
75b28aff 137struct io_rings {
1e84b97b
SB
138 /*
139 * Head and tail offsets into the ring; the offsets need to be
140 * masked to get valid indices.
141 *
75b28aff
HV
142 * The kernel controls head of the sq ring and the tail of the cq ring,
143 * and the application controls tail of the sq ring and the head of the
144 * cq ring.
1e84b97b 145 */
75b28aff 146 struct io_uring sq, cq;
1e84b97b 147 /*
75b28aff 148 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
149 * ring_entries - 1)
150 */
75b28aff
HV
151 u32 sq_ring_mask, cq_ring_mask;
152 /* Ring sizes (constant, power of 2) */
153 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
154 /*
155 * Number of invalid entries dropped by the kernel due to
156 * invalid index stored in array
157 *
158 * Written by the kernel, shouldn't be modified by the
159 * application (i.e. get number of "new events" by comparing to
160 * cached value).
161 *
162 * After a new SQ head value was read by the application this
163 * counter includes all submissions that were dropped reaching
164 * the new SQ head (and possibly more).
165 */
75b28aff 166 u32 sq_dropped;
1e84b97b 167 /*
0d9b5b3a 168 * Runtime SQ flags
1e84b97b
SB
169 *
170 * Written by the kernel, shouldn't be modified by the
171 * application.
172 *
173 * The application needs a full memory barrier before checking
174 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
175 */
3a4b89a2 176 atomic_t sq_flags;
0d9b5b3a
SG
177 /*
178 * Runtime CQ flags
179 *
180 * Written by the application, shouldn't be modified by the
181 * kernel.
182 */
fe7e3257 183 u32 cq_flags;
1e84b97b
SB
184 /*
185 * Number of completion events lost because the queue was full;
186 * this should be avoided by the application by making sure
0b4295b5 187 * there are not more requests pending than there is space in
1e84b97b
SB
188 * the completion queue.
189 *
190 * Written by the kernel, shouldn't be modified by the
191 * application (i.e. get number of "new events" by comparing to
192 * cached value).
193 *
194 * As completion events come in out of order this counter is not
195 * ordered with any other data.
196 */
75b28aff 197 u32 cq_overflow;
1e84b97b
SB
198 /*
199 * Ring buffer of completion events.
200 *
201 * The kernel writes completion events fresh every time they are
202 * produced, so the application is allowed to modify pending
203 * entries.
204 */
75b28aff 205 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
206};
207
edafccee
JA
208struct io_mapped_ubuf {
209 u64 ubuf;
4751f53d 210 u64 ubuf_end;
edafccee 211 unsigned int nr_bvecs;
de293938 212 unsigned long acct_pages;
41edf1a5 213 struct bio_vec bvec[];
edafccee
JA
214};
215
50238531
BM
216struct io_ring_ctx;
217
6c2450ae 218struct io_overflow_cqe {
6c2450ae 219 struct list_head list;
e45a3e05 220 struct io_uring_cqe cqe;
6c2450ae
PB
221};
222
5e45690a
JA
223/*
224 * FFS_SCM is only available on 64-bit archs, for 32-bit we just define it as 0
225 * and define IO_URING_SCM_ALL. For this case, we use SCM for all files as we
226 * can't safely always dereference the file when the task has exited and ring
227 * cleanup is done. If a file is tracked and part of SCM, then unix gc on
228 * process exit may reap it before __io_sqe_files_unregister() is run.
229 */
230#define FFS_NOWAIT 0x1UL
231#define FFS_ISREG 0x2UL
232#if defined(CONFIG_64BIT)
233#define FFS_SCM 0x4UL
234#else
235#define IO_URING_SCM_ALL
236#define FFS_SCM 0x0UL
237#endif
238#define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG|FFS_SCM)
239
a04b0ac0
PB
240struct io_fixed_file {
241 /* file * with additional FFS_* flags */
242 unsigned long file_ptr;
243};
244
269bbe5f
BM
245struct io_rsrc_put {
246 struct list_head list;
b60c8dce 247 u64 tag;
50238531
BM
248 union {
249 void *rsrc;
250 struct file *file;
bd54b6fe 251 struct io_mapped_ubuf *buf;
50238531 252 };
269bbe5f
BM
253};
254
aeca241b 255struct io_file_table {
042b0d85 256 struct io_fixed_file *files;
d78bd8ad 257 unsigned long *bitmap;
b70b8e33 258 unsigned int alloc_hint;
31b51510
JA
259};
260
b895c9a6 261struct io_rsrc_node {
05589553
XW
262 struct percpu_ref refs;
263 struct list_head node;
269bbe5f 264 struct list_head rsrc_list;
b895c9a6 265 struct io_rsrc_data *rsrc_data;
4a38aed2 266 struct llist_node llist;
e297822b 267 bool done;
05589553
XW
268};
269
40ae0ff7
PB
270typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
271
b895c9a6 272struct io_rsrc_data {
05f3fb3c
JA
273 struct io_ring_ctx *ctx;
274
2d091d62
PB
275 u64 **tags;
276 unsigned int nr;
40ae0ff7 277 rsrc_put_fn *do_put;
3e942498 278 atomic_t refs;
05f3fb3c 279 struct completion done;
8bad28d8 280 bool quiesce;
05f3fb3c
JA
281};
282
c7fb1942 283#define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
dbc7d452 284struct io_buffer_list {
c7fb1942
JA
285 /*
286 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
287 * then these are classic provided buffers and ->buf_list is used.
288 */
289 union {
290 struct list_head buf_list;
291 struct {
292 struct page **buf_pages;
293 struct io_uring_buf_ring *buf_ring;
294 };
295 };
dbc7d452 296 __u16 bgid;
c7fb1942
JA
297
298 /* below is for ring provided buffers */
299 __u16 buf_nr_pages;
300 __u16 nr_entries;
c6e9fa5c
DY
301 __u16 head;
302 __u16 mask;
dbc7d452
JA
303};
304
5a2e745d
JA
305struct io_buffer {
306 struct list_head list;
307 __u64 addr;
d1f82808 308 __u32 len;
5a2e745d 309 __u16 bid;
b1c62645 310 __u16 bgid;
5a2e745d
JA
311};
312
21b55dbc
SG
313struct io_restriction {
314 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
315 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
316 u8 sqe_flags_allowed;
317 u8 sqe_flags_required;
7e84e1c7 318 bool registered;
21b55dbc
SG
319};
320
37d1e2e3
JA
321enum {
322 IO_SQ_THREAD_SHOULD_STOP = 0,
323 IO_SQ_THREAD_SHOULD_PARK,
324};
325
534ca6d6
JA
326struct io_sq_data {
327 refcount_t refs;
9e138a48 328 atomic_t park_pending;
09a6f4ef 329 struct mutex lock;
69fb2131
JA
330
331 /* ctx's that are using this sqd */
332 struct list_head ctx_list;
69fb2131 333
534ca6d6
JA
334 struct task_struct *thread;
335 struct wait_queue_head wait;
08369246
XW
336
337 unsigned sq_thread_idle;
37d1e2e3
JA
338 int sq_cpu;
339 pid_t task_pid;
5c2469e0 340 pid_t task_tgid;
37d1e2e3
JA
341
342 unsigned long state;
37d1e2e3 343 struct completion exited;
534ca6d6
JA
344};
345
6dd0be1e 346#define IO_COMPL_BATCH 32
6ff119a6 347#define IO_REQ_CACHE_SIZE 32
bf019da7 348#define IO_REQ_ALLOC_BATCH 8
258b29a9 349
a1ab7b35
PB
350struct io_submit_link {
351 struct io_kiocb *head;
352 struct io_kiocb *last;
353};
354
258b29a9 355struct io_submit_state {
5a158c6b
PB
356 /* inline/task_work completion list, under ->uring_lock */
357 struct io_wq_work_node free_list;
358 /* batch completion logic */
359 struct io_wq_work_list compl_reqs;
a1ab7b35 360 struct io_submit_link link;
258b29a9 361
258b29a9 362 bool plug_started;
4b628aeb 363 bool need_plug;
3d4aeb9f 364 bool flush_cqes;
5ca7a8b3 365 unsigned short submit_nr;
5a158c6b 366 struct blk_plug plug;
258b29a9
PB
367};
368
77bc59b4
UA
369struct io_ev_fd {
370 struct eventfd_ctx *cq_ev_fd;
c75312dd 371 unsigned int eventfd_async: 1;
77bc59b4
UA
372 struct rcu_head rcu;
373};
374
9cfc7e94 375#define BGID_ARRAY 64
dbc7d452 376
2b188cc1 377struct io_ring_ctx {
b52ecf8c 378 /* const or read-mostly hot data */
2b188cc1
JA
379 struct {
380 struct percpu_ref refs;
2b188cc1 381
b52ecf8c 382 struct io_rings *rings;
2b188cc1 383 unsigned int flags;
9f010507 384 enum task_work_notify_mode notify_method;
e1d85334 385 unsigned int compat: 1;
e1d85334 386 unsigned int drain_next: 1;
21b55dbc 387 unsigned int restricted: 1;
f18ee4cf 388 unsigned int off_timeout_used: 1;
10c66904 389 unsigned int drain_active: 1;
5562a8d7 390 unsigned int drain_disabled: 1;
9aa8dfde 391 unsigned int has_evfd: 1;
773697b6 392 unsigned int syscall_iopoll: 1;
b52ecf8c 393 } ____cacheline_aligned_in_smp;
2b188cc1 394
7f1129d2 395 /* submission data */
b52ecf8c 396 struct {
0499e582
PB
397 struct mutex uring_lock;
398
75b28aff
HV
399 /*
400 * Ring buffer of indices into array of io_uring_sqe, which is
401 * mmapped by the application using the IORING_OFF_SQES offset.
402 *
403 * This indirection could e.g. be used to assign fixed
404 * io_uring_sqe entries to operations and only submit them to
405 * the queue when needed.
406 *
407 * The kernel modifies neither the indices array nor the entries
408 * array.
409 */
410 u32 *sq_array;
c7af47cf 411 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
412 unsigned cached_sq_head;
413 unsigned sq_entries;
de0617e4 414 struct list_head defer_list;
7f1129d2
PB
415
416 /*
417 * Fixed resources fast path, should be accessed only under
418 * uring_lock, and updated through io_uring_register(2)
419 */
420 struct io_rsrc_node *rsrc_node;
ab409402 421 int rsrc_cached_refs;
8e29da69 422 atomic_t cancel_seq;
7f1129d2
PB
423 struct io_file_table file_table;
424 unsigned nr_user_files;
425 unsigned nr_user_bufs;
426 struct io_mapped_ubuf **user_bufs;
427
428 struct io_submit_state submit_state;
660cbfa2 429
9cfc7e94
JA
430 struct io_buffer_list *io_bl;
431 struct xarray io_bl_xa;
cc3cec83 432 struct list_head io_buffers_cache;
660cbfa2 433
5262f567 434 struct list_head timeout_list;
ef9dd637 435 struct list_head ltimeout_list;
1d7bb1d5 436 struct list_head cq_overflow_list;
4d9237e3 437 struct list_head apoll_cache;
7f1129d2
PB
438 struct xarray personalities;
439 u32 pers_next;
440 unsigned sq_thread_idle;
2b188cc1
JA
441 } ____cacheline_aligned_in_smp;
442
d0acdee2 443 /* IRQ completion list, under ->completion_lock */
c2b6c6bc 444 struct io_wq_work_list locked_free_list;
d0acdee2 445 unsigned int locked_free_nr;
3c1a2ead 446
7c30f36a 447 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
534ca6d6
JA
448 struct io_sq_data *sq_data; /* if using sq thread polling */
449
90554200 450 struct wait_queue_head sqo_sq_wait;
69fb2131 451 struct list_head sqd_list;
75b28aff 452
10988a0a 453 unsigned long check_cq;
5ed7a37d 454
206aefde 455 struct {
d8da428b
PB
456 /*
457 * We cache a range of free CQEs we can use, once exhausted it
458 * should go through a slower range setup, see __io_get_cqe()
459 */
460 struct io_uring_cqe *cqe_cached;
461 struct io_uring_cqe *cqe_sentinel;
462
206aefde
JA
463 unsigned cached_cq_tail;
464 unsigned cq_entries;
77bc59b4 465 struct io_ev_fd __rcu *io_ev_fd;
0499e582
PB
466 struct wait_queue_head cq_wait;
467 unsigned cq_extra;
468 atomic_t cq_timeouts;
0499e582 469 unsigned cq_last_tm_flush;
206aefde 470 } ____cacheline_aligned_in_smp;
2b188cc1 471
2b188cc1
JA
472 struct {
473 spinlock_t completion_lock;
e94f141b 474
89850fce
JA
475 spinlock_t timeout_lock;
476
def596e9 477 /*
540e32a0 478 * ->iopoll_list is protected by the ctx->uring_lock for
def596e9
JA
479 * io_uring instances that don't use IORING_SETUP_SQPOLL.
480 * For SQPOLL, only the single threaded io_sq_thread() will
481 * manipulate the list, hence no extra locking is needed there.
482 */
5eef4e87 483 struct io_wq_work_list iopoll_list;
78076bb6
JA
484 struct hlist_head *cancel_hash;
485 unsigned cancel_hash_bits;
915b3dde 486 bool poll_multi_queue;
cc3cec83
JA
487
488 struct list_head io_buffers_comp;
2b188cc1 489 } ____cacheline_aligned_in_smp;
85faa7b8 490
21b55dbc 491 struct io_restriction restrictions;
3c1a2ead 492
b13a8918
PB
493 /* slow path rsrc auxilary data, used by update/register */
494 struct {
495 struct io_rsrc_node *rsrc_backup_node;
496 struct io_mapped_ubuf *dummy_ubuf;
497 struct io_rsrc_data *file_data;
498 struct io_rsrc_data *buf_data;
499
500 struct delayed_work rsrc_put_work;
501 struct llist_head rsrc_put_llist;
502 struct list_head rsrc_ref_list;
503 spinlock_t rsrc_ref_lock;
cc3cec83
JA
504
505 struct list_head io_buffers_pages;
b13a8918
PB
506 };
507
3c1a2ead 508 /* Keep this last, we don't need it for the fast path */
b986af7e
PB
509 struct {
510 #if defined(CONFIG_UNIX)
511 struct socket *ring_sock;
512 #endif
513 /* hashed buffered write serialization */
514 struct io_wq_hash *hash_map;
515
516 /* Only used for accounting purposes */
517 struct user_struct *user;
518 struct mm_struct *mm_account;
519
520 /* ctx exit and cancelation */
9011bf9a
PB
521 struct llist_head fallback_llist;
522 struct delayed_work fallback_work;
b986af7e
PB
523 struct work_struct exit_work;
524 struct list_head tctx_list;
525 struct completion ref_comp;
e139a1ec
PB
526 u32 iowq_limits[2];
527 bool iowq_limits_set;
b986af7e 528 };
2b188cc1
JA
529};
530
e7a6c00d
JA
531/*
532 * Arbitrary limit, can be raised if need be
533 */
534#define IO_RINGFD_REG_MAX 16
535
53e043b2
SM
536struct io_uring_task {
537 /* submission side */
09899b19 538 int cached_refs;
53e043b2
SM
539 struct xarray xa;
540 struct wait_queue_head wait;
ee53fb2b
SM
541 const struct io_ring_ctx *last;
542 struct io_wq *io_wq;
53e043b2 543 struct percpu_counter inflight;
9cae36a0 544 atomic_t inflight_tracked;
53e043b2 545 atomic_t in_idle;
53e043b2
SM
546
547 spinlock_t task_lock;
548 struct io_wq_work_list task_list;
3fe07bcd 549 struct io_wq_work_list prio_task_list;
53e043b2 550 struct callback_head task_work;
e7a6c00d 551 struct file **registered_rings;
6294f368 552 bool task_running;
53e043b2
SM
553};
554
09bb8394
JA
555/*
556 * First field must be the file pointer in all the
557 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
558 */
221c5eb2
JA
559struct io_poll_iocb {
560 struct file *file;
018043be 561 struct wait_queue_head *head;
221c5eb2 562 __poll_t events;
392edb45 563 struct wait_queue_entry wait;
221c5eb2
JA
564};
565
9d805892 566struct io_poll_update {
018043be 567 struct file *file;
9d805892
PB
568 u64 old_user_data;
569 u64 new_user_data;
570 __poll_t events;
b69de288
JA
571 bool update_events;
572 bool update_user_data;
018043be
PB
573};
574
b5dba59e
JA
575struct io_close {
576 struct file *file;
b5dba59e 577 int fd;
7df778be 578 u32 file_slot;
a7c41b46 579 u32 flags;
b5dba59e
JA
580};
581
ad8a48ac
JA
582struct io_timeout_data {
583 struct io_kiocb *req;
584 struct hrtimer timer;
585 struct timespec64 ts;
586 enum hrtimer_mode mode;
50c1df2b 587 u32 flags;
ad8a48ac
JA
588};
589
8ed8d3c3
JA
590struct io_accept {
591 struct file *file;
592 struct sockaddr __user *addr;
593 int __user *addr_len;
594 int flags;
aaa4db12 595 u32 file_slot;
09952e3e 596 unsigned long nofile;
8ed8d3c3
JA
597};
598
1374e08e
JA
599struct io_socket {
600 struct file *file;
601 int domain;
602 int type;
603 int protocol;
604 int flags;
605 u32 file_slot;
606 unsigned long nofile;
607};
608
8ed8d3c3
JA
609struct io_sync {
610 struct file *file;
611 loff_t len;
612 loff_t off;
613 int flags;
d63d1b5e 614 int mode;
8ed8d3c3
JA
615};
616
fbf23849
JA
617struct io_cancel {
618 struct file *file;
619 u64 addr;
8e29da69 620 u32 flags;
4bf94615 621 s32 fd;
fbf23849
JA
622};
623
b29472ee
JA
624struct io_timeout {
625 struct file *file;
bfe68a22
PB
626 u32 off;
627 u32 target_seq;
135fcde8 628 struct list_head list;
90cd7e42
PB
629 /* head of the link, used by linked timeouts only */
630 struct io_kiocb *head;
89b263f6
JA
631 /* for linked completions */
632 struct io_kiocb *prev;
b29472ee
JA
633};
634
0bdf7a2d
PB
635struct io_timeout_rem {
636 struct file *file;
637 u64 addr;
9c8e11b3
PB
638
639 /* timeout update */
640 struct timespec64 ts;
641 u32 flags;
f1042b6c 642 bool ltimeout;
0bdf7a2d
PB
643};
644
9adbd45d
JA
645struct io_rw {
646 /* NOTE: kiocb has the file as the first member, so don't do it here */
647 struct kiocb kiocb;
648 u64 addr;
584b0180 649 u32 len;
20cbd21d 650 rwf_t flags;
9adbd45d
JA
651};
652
3fbb51c1
JA
653struct io_connect {
654 struct file *file;
655 struct sockaddr __user *addr;
656 int addr_len;
657};
658
e47293fd
JA
659struct io_sr_msg {
660 struct file *file;
fddaface 661 union {
4af3417a
PB
662 struct compat_msghdr __user *umsg_compat;
663 struct user_msghdr __user *umsg;
664 void __user *buf;
fddaface 665 };
e47293fd 666 int msg_flags;
fddaface 667 size_t len;
7ba89d2a 668 size_t done_io;
0455d4cc 669 unsigned int flags;
e47293fd
JA
670};
671
15b71abe
JA
672struct io_open {
673 struct file *file;
674 int dfd;
b9445598 675 u32 file_slot;
15b71abe 676 struct filename *filename;
c12cedf2 677 struct open_how how;
4022e7af 678 unsigned long nofile;
15b71abe
JA
679};
680
269bbe5f 681struct io_rsrc_update {
05f3fb3c
JA
682 struct file *file;
683 u64 arg;
684 u32 nr_args;
685 u32 offset;
686};
687
4840e418
JA
688struct io_fadvise {
689 struct file *file;
690 u64 offset;
691 u32 len;
692 u32 advice;
693};
694
c1ca757b
JA
695struct io_madvise {
696 struct file *file;
697 u64 addr;
698 u32 len;
699 u32 advice;
700};
701
3e4827b0
JA
702struct io_epoll {
703 struct file *file;
704 int epfd;
705 int op;
706 int fd;
707 struct epoll_event event;
e47293fd
JA
708};
709
7d67af2c
PB
710struct io_splice {
711 struct file *file_out;
7d67af2c
PB
712 loff_t off_out;
713 loff_t off_in;
714 u64 len;
a3e4bc23 715 int splice_fd_in;
7d67af2c
PB
716 unsigned int flags;
717};
718
ddf0322d
JA
719struct io_provide_buf {
720 struct file *file;
721 __u64 addr;
38134ada 722 __u32 len;
ddf0322d
JA
723 __u32 bgid;
724 __u16 nbufs;
725 __u16 bid;
726};
727
1d9e1288
BM
728struct io_statx {
729 struct file *file;
730 int dfd;
731 unsigned int mask;
732 unsigned int flags;
1b6fe6e0 733 struct filename *filename;
1d9e1288
BM
734 struct statx __user *buffer;
735};
736
36f4fa68
JA
737struct io_shutdown {
738 struct file *file;
739 int how;
740};
741
80a261fd
JA
742struct io_rename {
743 struct file *file;
744 int old_dfd;
745 int new_dfd;
746 struct filename *oldpath;
747 struct filename *newpath;
748 int flags;
749};
750
14a1143b
JA
751struct io_unlink {
752 struct file *file;
753 int dfd;
754 int flags;
755 struct filename *filename;
756};
757
e34a02dc
DK
758struct io_mkdir {
759 struct file *file;
760 int dfd;
761 umode_t mode;
762 struct filename *filename;
763};
764
7a8721f8
DK
765struct io_symlink {
766 struct file *file;
767 int new_dfd;
768 struct filename *oldpath;
769 struct filename *newpath;
770};
771
cf30da90
DK
772struct io_hardlink {
773 struct file *file;
774 int old_dfd;
775 int new_dfd;
776 struct filename *oldpath;
777 struct filename *newpath;
778 int flags;
779};
780
4f57f06c
JA
781struct io_msg {
782 struct file *file;
783 u64 user_data;
784 u32 len;
785};
786
2bb04df7
SR
787struct io_nop {
788 struct file *file;
789 u64 extra1;
790 u64 extra2;
791};
792
f499a021
JA
793struct io_async_connect {
794 struct sockaddr_storage address;
795};
796
03b1230c
JA
797struct io_async_msghdr {
798 struct iovec fast_iov[UIO_FASTIOV];
257e84a5
PB
799 /* points to an allocated iov, if NULL we use fast_iov instead */
800 struct iovec *free_iov;
03b1230c
JA
801 struct sockaddr __user *uaddr;
802 struct msghdr msg;
b537916c 803 struct sockaddr_storage addr;
03b1230c
JA
804};
805
538941e2 806struct io_rw_state {
ff6165b2 807 struct iov_iter iter;
cd658695 808 struct iov_iter_state iter_state;
c88598a9 809 struct iovec fast_iov[UIO_FASTIOV];
538941e2
PB
810};
811
812struct io_async_rw {
813 struct io_rw_state s;
814 const struct iovec *free_iovec;
227c0c96 815 size_t bytes_done;
bcf5a063 816 struct wait_page_queue wpq;
f67676d1
JA
817};
818
e9621e2b
SR
819struct io_xattr {
820 struct file *file;
821 struct xattr_ctx ctx;
822 struct filename *filename;
823};
824
6b47ee6e
PB
825enum {
826 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
827 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
828 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
829 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
830 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
bcda7baa 831 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
04c76b41 832 REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
6b47ee6e 833
dddca226 834 /* first byte is taken by user flags, shift it to not overlap */
93d2bcd2 835 REQ_F_FAIL_BIT = 8,
6b47ee6e
PB
836 REQ_F_INFLIGHT_BIT,
837 REQ_F_CUR_POS_BIT,
838 REQ_F_NOWAIT_BIT,
6b47ee6e 839 REQ_F_LINK_TIMEOUT_BIT,
99bc4c38 840 REQ_F_NEED_CLEANUP_BIT,
d7718a9d 841 REQ_F_POLLED_BIT,
bcda7baa 842 REQ_F_BUFFER_SELECTED_BIT,
c7fb1942 843 REQ_F_BUFFER_RING_BIT,
e342c807 844 REQ_F_COMPLETE_INLINE_BIT,
230d50d4 845 REQ_F_REISSUE_BIT,
b8e64b53 846 REQ_F_CREDS_BIT,
20e60a38 847 REQ_F_REFCOUNT_BIT,
4d13d1a4 848 REQ_F_ARM_LTIMEOUT_BIT,
d886e185 849 REQ_F_ASYNC_DATA_BIT,
04c76b41 850 REQ_F_SKIP_LINK_CQES_BIT,
91eac1c6
JA
851 REQ_F_SINGLE_POLL_BIT,
852 REQ_F_DOUBLE_POLL_BIT,
8a3e8ee5 853 REQ_F_PARTIAL_IO_BIT,
227685eb 854 REQ_F_APOLL_MULTISHOT_BIT,
7b29f92d 855 /* keep async read/write and isreg together and in order */
35645ac3 856 REQ_F_SUPPORT_NOWAIT_BIT,
7b29f92d 857 REQ_F_ISREG_BIT,
84557871
JA
858
859 /* not a real bit, just to check we're not overflowing the space */
860 __REQ_F_LAST_BIT,
6b47ee6e
PB
861};
862
863enum {
864 /* ctx owns file */
865 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
866 /* drain existing IO first */
867 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
868 /* linked sqes */
869 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
870 /* doesn't sever on completion < 0 */
871 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
872 /* IOSQE_ASYNC */
873 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
bcda7baa
JA
874 /* IOSQE_BUFFER_SELECT */
875 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
04c76b41
PB
876 /* IOSQE_CQE_SKIP_SUCCESS */
877 REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT),
6b47ee6e 878
6b47ee6e 879 /* fail rest of links */
93d2bcd2 880 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
b05a1bcd 881 /* on inflight list, should be cancelled and waited on exit reliably */
6b47ee6e
PB
882 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
883 /* read/write uses file position */
884 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
885 /* must not punt to workers */
886 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
900fad45 887 /* has or had linked timeout */
6b47ee6e 888 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
99bc4c38
PB
889 /* needs cleanup */
890 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
d7718a9d
JA
891 /* already went through poll handler */
892 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
bcda7baa
JA
893 /* buffer already selected */
894 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
c7fb1942
JA
895 /* buffer selected from ring, needs commit */
896 REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT),
e342c807
PB
897 /* completion is deferred through io_comp_state */
898 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
230d50d4
JA
899 /* caller should reissue async */
900 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
35645ac3
PB
901 /* supports async reads/writes */
902 REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT),
7b29f92d
JA
903 /* regular file */
904 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
b8e64b53
PB
905 /* has creds assigned */
906 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
20e60a38
PB
907 /* skip refcounting if not set */
908 REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
4d13d1a4
PB
909 /* there is a linked timeout that has to be armed */
910 REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
d886e185
PB
911 /* ->async_data allocated */
912 REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT),
04c76b41
PB
913 /* don't post CQEs while failing linked requests */
914 REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT),
91eac1c6
JA
915 /* single poll may be active */
916 REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT),
917 /* double poll may active */
918 REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT),
8a3e8ee5
JA
919 /* request has already done partial IO */
920 REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
227685eb
HX
921 /* fast poll multishot mode */
922 REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT),
d7718a9d
JA
923};
924
925struct async_poll {
926 struct io_poll_iocb poll;
807abcb0 927 struct io_poll_iocb *double_poll;
6b47ee6e
PB
928};
929
f237c30a 930typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
5b0a6acc 931
7cbf1722 932struct io_task_work {
5b0a6acc
PB
933 union {
934 struct io_wq_work_node node;
935 struct llist_node fallback_node;
936 };
937 io_req_tw_func_t func;
7cbf1722
JA
938};
939
992da01a
PB
940enum {
941 IORING_RSRC_FILE = 0,
942 IORING_RSRC_BUFFER = 1,
943};
944
cef216fc
PB
945struct io_cqe {
946 __u64 user_data;
947 __s32 res;
948 /* fd initially, then cflags for completion */
949 union {
950 __u32 flags;
951 int fd;
952 };
953};
954
10988a0a
DY
955enum {
956 IO_CHECK_CQ_OVERFLOW_BIT,
155bc950 957 IO_CHECK_CQ_DROPPED_BIT,
10988a0a
DY
958};
959
09bb8394
JA
960/*
961 * NOTE! Each of the iocb union members has the file pointer
962 * as the first entry in their struct definition. So you can
963 * access the file pointer through any of the sub-structs,
63c36549 964 * or directly as just 'file' in this struct.
09bb8394 965 */
2b188cc1 966struct io_kiocb {
221c5eb2 967 union {
09bb8394 968 struct file *file;
9adbd45d 969 struct io_rw rw;
221c5eb2 970 struct io_poll_iocb poll;
9d805892 971 struct io_poll_update poll_update;
8ed8d3c3
JA
972 struct io_accept accept;
973 struct io_sync sync;
fbf23849 974 struct io_cancel cancel;
b29472ee 975 struct io_timeout timeout;
0bdf7a2d 976 struct io_timeout_rem timeout_rem;
3fbb51c1 977 struct io_connect connect;
e47293fd 978 struct io_sr_msg sr_msg;
15b71abe 979 struct io_open open;
b5dba59e 980 struct io_close close;
269bbe5f 981 struct io_rsrc_update rsrc_update;
4840e418 982 struct io_fadvise fadvise;
c1ca757b 983 struct io_madvise madvise;
3e4827b0 984 struct io_epoll epoll;
7d67af2c 985 struct io_splice splice;
ddf0322d 986 struct io_provide_buf pbuf;
1d9e1288 987 struct io_statx statx;
36f4fa68 988 struct io_shutdown shutdown;
80a261fd 989 struct io_rename rename;
14a1143b 990 struct io_unlink unlink;
e34a02dc 991 struct io_mkdir mkdir;
7a8721f8 992 struct io_symlink symlink;
cf30da90 993 struct io_hardlink hardlink;
4f57f06c 994 struct io_msg msg;
e9621e2b 995 struct io_xattr xattr;
1374e08e 996 struct io_socket sock;
2bb04df7 997 struct io_nop nop;
ee692a21 998 struct io_uring_cmd uring_cmd;
221c5eb2 999 };
2b188cc1 1000
d625c6ee 1001 u8 opcode;
65a6543d
XW
1002 /* polled IO has completed */
1003 u8 iopoll_completed;
1dbd023e
JA
1004 /*
1005 * Can be either a fixed buffer index, or used with provided buffers.
1006 * For the latter, before issue it points to the buffer group ID,
1007 * and after selection it points to the buffer ID itself.
1008 */
4f4eeba8 1009 u16 buf_index;
d17e56eb
PB
1010 unsigned int flags;
1011
cef216fc 1012 struct io_cqe cqe;
4f4eeba8 1013
010e8e6b 1014 struct io_ring_ctx *ctx;
010e8e6b 1015 struct task_struct *task;
d7718a9d 1016
c1bdf8ed 1017 struct io_rsrc_node *rsrc_node;
a4f8d94c
JA
1018
1019 union {
1020 /* store used ubuf, so we can prevent reloading */
1021 struct io_mapped_ubuf *imu;
1022
1023 /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
1024 struct io_buffer *kbuf;
c7fb1942
JA
1025
1026 /*
1027 * stores buffer ID for ring provided buffers, valid IFF
1028 * REQ_F_BUFFER_RING is set.
1029 */
1030 struct io_buffer_list *buf_list;
a4f8d94c 1031 };
fcb323cc 1032
2804ecd8
JA
1033 union {
1034 /* used by request caches, completion batching and iopoll */
1035 struct io_wq_work_node comp_list;
1036 /* cache ->apoll->events */
58f5c8d3 1037 __poll_t apoll_events;
2804ecd8 1038 };
d17e56eb 1039 atomic_t refs;
521d61fc 1040 atomic_t poll_refs;
5b0a6acc 1041 struct io_task_work io_task_work;
010e8e6b 1042 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
4e5bc0a9
SR
1043 union {
1044 struct hlist_node hash_node;
1045 struct {
1046 u64 extra1;
1047 u64 extra2;
1048 };
1049 };
7e3709d5 1050 /* internal polling, see IORING_FEAT_FAST_POLL */
010e8e6b 1051 struct async_poll *apoll;
d886e185
PB
1052 /* opcode allocated if it needs to store data for async defer */
1053 void *async_data;
41cdcc22 1054 /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
34d2bfe7 1055 struct io_kiocb *link;
41cdcc22 1056 /* custom credentials, valid IFF REQ_F_CREDS is set */
521d61fc
JA
1057 const struct cred *creds;
1058 struct io_wq_work work;
2b188cc1 1059};
05589553 1060
13bf43f5
PB
1061struct io_tctx_node {
1062 struct list_head ctx_node;
1063 struct task_struct *task;
13bf43f5
PB
1064 struct io_ring_ctx *ctx;
1065};
1066
27dc8338
PB
1067struct io_defer_entry {
1068 struct list_head list;
1069 struct io_kiocb *req;
9cf7c104 1070 u32 seq;
2b188cc1
JA
1071};
1072
b21432b4
JA
1073struct io_cancel_data {
1074 struct io_ring_ctx *ctx;
4bf94615
JA
1075 union {
1076 u64 data;
1077 struct file *file;
1078 };
8e29da69
JA
1079 u32 flags;
1080 int seq;
b21432b4
JA
1081};
1082
ee692a21
JA
1083/*
1084 * The URING_CMD payload starts at 'cmd' in the first sqe, and continues into
1085 * the following sqe if SQE128 is used.
1086 */
1087#define uring_cmd_pdu_size(is_sqe128) \
1088 ((1 + !!(is_sqe128)) * sizeof(struct io_uring_sqe) - \
1089 offsetof(struct io_uring_sqe, cmd))
1090
d3656344 1091struct io_op_def {
d3656344
JA
1092 /* needs req->file assigned */
1093 unsigned needs_file : 1;
6d63416d
PB
1094 /* should block plug */
1095 unsigned plug : 1;
d3656344
JA
1096 /* hash wq insertion if file is a regular file */
1097 unsigned hash_reg_file : 1;
1098 /* unbound wq insertion if file is a non-regular file */
1099 unsigned unbound_nonreg_file : 1;
8a72758c
JA
1100 /* set if opcode supports polled "wait" */
1101 unsigned pollin : 1;
1102 unsigned pollout : 1;
52dd8640 1103 unsigned poll_exclusive : 1;
bcda7baa
JA
1104 /* op supports buffer selection */
1105 unsigned buffer_select : 1;
26f0505a
PB
1106 /* do prep async if is going to be punted */
1107 unsigned needs_async_setup : 1;
6d63416d
PB
1108 /* opcode is not supported by this kernel */
1109 unsigned not_supported : 1;
5bd2182d
PM
1110 /* skip auditing */
1111 unsigned audit_skip : 1;
73911426
JA
1112 /* supports ioprio */
1113 unsigned ioprio : 1;
1114 /* supports iopoll */
1115 unsigned iopoll : 1;
e8c2bc1f
JA
1116 /* size of async data needed, if any */
1117 unsigned short async_size;
d3656344
JA
1118};
1119
0918682b 1120static const struct io_op_def io_op_defs[] = {
73911426
JA
1121 [IORING_OP_NOP] = {
1122 .audit_skip = 1,
1123 .iopoll = 1,
3d200242 1124 .buffer_select = 1,
73911426 1125 },
0463b6c5 1126 [IORING_OP_READV] = {
d3656344
JA
1127 .needs_file = 1,
1128 .unbound_nonreg_file = 1,
8a72758c 1129 .pollin = 1,
4d954c25 1130 .buffer_select = 1,
26f0505a 1131 .needs_async_setup = 1,
27926b68 1132 .plug = 1,
5bd2182d 1133 .audit_skip = 1,
73911426
JA
1134 .ioprio = 1,
1135 .iopoll = 1,
e8c2bc1f 1136 .async_size = sizeof(struct io_async_rw),
d3656344 1137 },
0463b6c5 1138 [IORING_OP_WRITEV] = {
d3656344
JA
1139 .needs_file = 1,
1140 .hash_reg_file = 1,
1141 .unbound_nonreg_file = 1,
8a72758c 1142 .pollout = 1,
26f0505a 1143 .needs_async_setup = 1,
27926b68 1144 .plug = 1,
5bd2182d 1145 .audit_skip = 1,
73911426
JA
1146 .ioprio = 1,
1147 .iopoll = 1,
e8c2bc1f 1148 .async_size = sizeof(struct io_async_rw),
d3656344 1149 },
0463b6c5 1150 [IORING_OP_FSYNC] = {
d3656344 1151 .needs_file = 1,
5bd2182d 1152 .audit_skip = 1,
d3656344 1153 },
0463b6c5 1154 [IORING_OP_READ_FIXED] = {
d3656344
JA
1155 .needs_file = 1,
1156 .unbound_nonreg_file = 1,
8a72758c 1157 .pollin = 1,
27926b68 1158 .plug = 1,
5bd2182d 1159 .audit_skip = 1,
73911426
JA
1160 .ioprio = 1,
1161 .iopoll = 1,
e8c2bc1f 1162 .async_size = sizeof(struct io_async_rw),
d3656344 1163 },
0463b6c5 1164 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
1165 .needs_file = 1,
1166 .hash_reg_file = 1,
1167 .unbound_nonreg_file = 1,
8a72758c 1168 .pollout = 1,
27926b68 1169 .plug = 1,
5bd2182d 1170 .audit_skip = 1,
73911426
JA
1171 .ioprio = 1,
1172 .iopoll = 1,
e8c2bc1f 1173 .async_size = sizeof(struct io_async_rw),
d3656344 1174 },
0463b6c5 1175 [IORING_OP_POLL_ADD] = {
d3656344
JA
1176 .needs_file = 1,
1177 .unbound_nonreg_file = 1,
5bd2182d
PM
1178 .audit_skip = 1,
1179 },
1180 [IORING_OP_POLL_REMOVE] = {
1181 .audit_skip = 1,
d3656344 1182 },
0463b6c5 1183 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344 1184 .needs_file = 1,
5bd2182d 1185 .audit_skip = 1,
d3656344 1186 },
0463b6c5 1187 [IORING_OP_SENDMSG] = {
d3656344
JA
1188 .needs_file = 1,
1189 .unbound_nonreg_file = 1,
8a72758c 1190 .pollout = 1,
26f0505a 1191 .needs_async_setup = 1,
e8c2bc1f 1192 .async_size = sizeof(struct io_async_msghdr),
d3656344 1193 },
0463b6c5 1194 [IORING_OP_RECVMSG] = {
d3656344
JA
1195 .needs_file = 1,
1196 .unbound_nonreg_file = 1,
8a72758c 1197 .pollin = 1,
52de1fe1 1198 .buffer_select = 1,
26f0505a 1199 .needs_async_setup = 1,
e8c2bc1f 1200 .async_size = sizeof(struct io_async_msghdr),
d3656344 1201 },
0463b6c5 1202 [IORING_OP_TIMEOUT] = {
5bd2182d 1203 .audit_skip = 1,
e8c2bc1f 1204 .async_size = sizeof(struct io_timeout_data),
d3656344 1205 },
9c8e11b3
PB
1206 [IORING_OP_TIMEOUT_REMOVE] = {
1207 /* used by timeout updates' prep() */
5bd2182d 1208 .audit_skip = 1,
9c8e11b3 1209 },
0463b6c5 1210 [IORING_OP_ACCEPT] = {
d3656344
JA
1211 .needs_file = 1,
1212 .unbound_nonreg_file = 1,
8a72758c 1213 .pollin = 1,
52dd8640 1214 .poll_exclusive = 1,
4e86a2c9 1215 .ioprio = 1, /* used for flags */
d3656344 1216 },
5bd2182d
PM
1217 [IORING_OP_ASYNC_CANCEL] = {
1218 .audit_skip = 1,
1219 },
0463b6c5 1220 [IORING_OP_LINK_TIMEOUT] = {
5bd2182d 1221 .audit_skip = 1,
e8c2bc1f 1222 .async_size = sizeof(struct io_timeout_data),
d3656344 1223 },
0463b6c5 1224 [IORING_OP_CONNECT] = {
d3656344
JA
1225 .needs_file = 1,
1226 .unbound_nonreg_file = 1,
8a72758c 1227 .pollout = 1,
26f0505a 1228 .needs_async_setup = 1,
e8c2bc1f 1229 .async_size = sizeof(struct io_async_connect),
d3656344 1230 },
0463b6c5 1231 [IORING_OP_FALLOCATE] = {
d3656344 1232 .needs_file = 1,
d3656344 1233 },
44526bed
JA
1234 [IORING_OP_OPENAT] = {},
1235 [IORING_OP_CLOSE] = {},
5bd2182d
PM
1236 [IORING_OP_FILES_UPDATE] = {
1237 .audit_skip = 1,
73911426 1238 .iopoll = 1,
5bd2182d
PM
1239 },
1240 [IORING_OP_STATX] = {
1241 .audit_skip = 1,
1242 },
0463b6c5 1243 [IORING_OP_READ] = {
3a6820f2
JA
1244 .needs_file = 1,
1245 .unbound_nonreg_file = 1,
8a72758c 1246 .pollin = 1,
bcda7baa 1247 .buffer_select = 1,
27926b68 1248 .plug = 1,
5bd2182d 1249 .audit_skip = 1,
73911426
JA
1250 .ioprio = 1,
1251 .iopoll = 1,
e8c2bc1f 1252 .async_size = sizeof(struct io_async_rw),
3a6820f2 1253 },
0463b6c5 1254 [IORING_OP_WRITE] = {
3a6820f2 1255 .needs_file = 1,
7b3188e7 1256 .hash_reg_file = 1,
3a6820f2 1257 .unbound_nonreg_file = 1,
8a72758c 1258 .pollout = 1,
27926b68 1259 .plug = 1,
5bd2182d 1260 .audit_skip = 1,
73911426
JA
1261 .ioprio = 1,
1262 .iopoll = 1,
e8c2bc1f 1263 .async_size = sizeof(struct io_async_rw),
3a6820f2 1264 },
0463b6c5 1265 [IORING_OP_FADVISE] = {
4840e418 1266 .needs_file = 1,
5bd2182d 1267 .audit_skip = 1,
c1ca757b 1268 },
44526bed 1269 [IORING_OP_MADVISE] = {},
0463b6c5 1270 [IORING_OP_SEND] = {
fddaface
JA
1271 .needs_file = 1,
1272 .unbound_nonreg_file = 1,
8a72758c 1273 .pollout = 1,
5bd2182d 1274 .audit_skip = 1,
fddaface 1275 },
0463b6c5 1276 [IORING_OP_RECV] = {
fddaface
JA
1277 .needs_file = 1,
1278 .unbound_nonreg_file = 1,
8a72758c 1279 .pollin = 1,
bcda7baa 1280 .buffer_select = 1,
5bd2182d 1281 .audit_skip = 1,
fddaface 1282 },
0463b6c5 1283 [IORING_OP_OPENAT2] = {
cebdb986 1284 },
3e4827b0
JA
1285 [IORING_OP_EPOLL_CTL] = {
1286 .unbound_nonreg_file = 1,
5bd2182d 1287 .audit_skip = 1,
3e4827b0 1288 },
7d67af2c
PB
1289 [IORING_OP_SPLICE] = {
1290 .needs_file = 1,
1291 .hash_reg_file = 1,
1292 .unbound_nonreg_file = 1,
5bd2182d
PM
1293 .audit_skip = 1,
1294 },
1295 [IORING_OP_PROVIDE_BUFFERS] = {
1296 .audit_skip = 1,
73911426 1297 .iopoll = 1,
5bd2182d
PM
1298 },
1299 [IORING_OP_REMOVE_BUFFERS] = {
1300 .audit_skip = 1,
73911426 1301 .iopoll = 1,
ddf0322d 1302 },
f2a8d5c7
PB
1303 [IORING_OP_TEE] = {
1304 .needs_file = 1,
1305 .hash_reg_file = 1,
1306 .unbound_nonreg_file = 1,
5bd2182d 1307 .audit_skip = 1,
f2a8d5c7 1308 },
36f4fa68
JA
1309 [IORING_OP_SHUTDOWN] = {
1310 .needs_file = 1,
1311 },
44526bed
JA
1312 [IORING_OP_RENAMEAT] = {},
1313 [IORING_OP_UNLINKAT] = {},
e34a02dc 1314 [IORING_OP_MKDIRAT] = {},
7a8721f8 1315 [IORING_OP_SYMLINKAT] = {},
cf30da90 1316 [IORING_OP_LINKAT] = {},
4f57f06c
JA
1317 [IORING_OP_MSG_RING] = {
1318 .needs_file = 1,
73911426 1319 .iopoll = 1,
4f57f06c 1320 },
e9621e2b
SR
1321 [IORING_OP_FSETXATTR] = {
1322 .needs_file = 1
1323 },
1324 [IORING_OP_SETXATTR] = {},
a56834e0
SR
1325 [IORING_OP_FGETXATTR] = {
1326 .needs_file = 1
1327 },
1328 [IORING_OP_GETXATTR] = {},
1374e08e
JA
1329 [IORING_OP_SOCKET] = {
1330 .audit_skip = 1,
1331 },
ee692a21
JA
1332 [IORING_OP_URING_CMD] = {
1333 .needs_file = 1,
1334 .plug = 1,
1335 .needs_async_setup = 1,
1336 .async_size = uring_cmd_pdu_size(1),
1337 },
d3656344
JA
1338};
1339
0756a869
PB
1340/* requests with any of those set should undergo io_disarm_next() */
1341#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
da1a08c5 1342#define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
0756a869 1343
7a612350 1344static bool io_disarm_next(struct io_kiocb *req);
eef51daa 1345static void io_uring_del_tctx_node(unsigned long index);
9936c7c2
PB
1346static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1347 struct task_struct *task,
3dd0c97a 1348 bool cancel_all);
78cc687b 1349static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
1ffc5422 1350
4e118cd9 1351static void __io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags);
c7dae4ba 1352static void io_dismantle_req(struct io_kiocb *req);
94ae5e77 1353static void io_queue_linked_timeout(struct io_kiocb *req);
fdecb662 1354static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
c3bdad02 1355 struct io_uring_rsrc_update2 *up,
98f0b3b4 1356 unsigned nr_args);
68fb8979 1357static void io_clean_op(struct io_kiocb *req);
5106dd6e
JA
1358static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
1359 unsigned issue_flags);
ee67ba3b 1360static struct file *io_file_get_normal(struct io_kiocb *req, int fd);
cbc2e203 1361static void io_queue_sqe(struct io_kiocb *req);
269bbe5f 1362static void io_rsrc_put_work(struct work_struct *work);
de0617e4 1363
907d1df3 1364static void io_req_task_queue(struct io_kiocb *req);
c450178d 1365static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
179ae0d1 1366static int io_req_prep_async(struct io_kiocb *req);
de0617e4 1367
b9445598
PB
1368static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
1369 unsigned int issue_flags, u32 slot_index);
a7c41b46
XW
1370static int __io_close_fixed(struct io_kiocb *req, unsigned int issue_flags,
1371 unsigned int offset);
1372static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
7df778be 1373
f1042b6c 1374static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
9aa8dfde 1375static void io_eventfd_signal(struct io_ring_ctx *ctx);
4e118cd9 1376static void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
b9445598 1377
2b188cc1
JA
1378static struct kmem_cache *req_cachep;
1379
0918682b 1380static const struct file_operations io_uring_fops;
2b188cc1 1381
33337d03
DY
1382const char *io_uring_get_opcode(u8 opcode)
1383{
1384 switch ((enum io_uring_op)opcode) {
1385 case IORING_OP_NOP:
1386 return "NOP";
1387 case IORING_OP_READV:
1388 return "READV";
1389 case IORING_OP_WRITEV:
1390 return "WRITEV";
1391 case IORING_OP_FSYNC:
1392 return "FSYNC";
1393 case IORING_OP_READ_FIXED:
1394 return "READ_FIXED";
1395 case IORING_OP_WRITE_FIXED:
1396 return "WRITE_FIXED";
1397 case IORING_OP_POLL_ADD:
1398 return "POLL_ADD";
1399 case IORING_OP_POLL_REMOVE:
1400 return "POLL_REMOVE";
1401 case IORING_OP_SYNC_FILE_RANGE:
1402 return "SYNC_FILE_RANGE";
1403 case IORING_OP_SENDMSG:
1404 return "SENDMSG";
1405 case IORING_OP_RECVMSG:
1406 return "RECVMSG";
1407 case IORING_OP_TIMEOUT:
1408 return "TIMEOUT";
1409 case IORING_OP_TIMEOUT_REMOVE:
1410 return "TIMEOUT_REMOVE";
1411 case IORING_OP_ACCEPT:
1412 return "ACCEPT";
1413 case IORING_OP_ASYNC_CANCEL:
1414 return "ASYNC_CANCEL";
1415 case IORING_OP_LINK_TIMEOUT:
1416 return "LINK_TIMEOUT";
1417 case IORING_OP_CONNECT:
1418 return "CONNECT";
1419 case IORING_OP_FALLOCATE:
1420 return "FALLOCATE";
1421 case IORING_OP_OPENAT:
1422 return "OPENAT";
1423 case IORING_OP_CLOSE:
1424 return "CLOSE";
1425 case IORING_OP_FILES_UPDATE:
1426 return "FILES_UPDATE";
1427 case IORING_OP_STATX:
1428 return "STATX";
1429 case IORING_OP_READ:
1430 return "READ";
1431 case IORING_OP_WRITE:
1432 return "WRITE";
1433 case IORING_OP_FADVISE:
1434 return "FADVISE";
1435 case IORING_OP_MADVISE:
1436 return "MADVISE";
1437 case IORING_OP_SEND:
1438 return "SEND";
1439 case IORING_OP_RECV:
1440 return "RECV";
1441 case IORING_OP_OPENAT2:
1442 return "OPENAT2";
1443 case IORING_OP_EPOLL_CTL:
1444 return "EPOLL_CTL";
1445 case IORING_OP_SPLICE:
1446 return "SPLICE";
1447 case IORING_OP_PROVIDE_BUFFERS:
1448 return "PROVIDE_BUFFERS";
1449 case IORING_OP_REMOVE_BUFFERS:
1450 return "REMOVE_BUFFERS";
1451 case IORING_OP_TEE:
1452 return "TEE";
1453 case IORING_OP_SHUTDOWN:
1454 return "SHUTDOWN";
1455 case IORING_OP_RENAMEAT:
1456 return "RENAMEAT";
1457 case IORING_OP_UNLINKAT:
1458 return "UNLINKAT";
1459 case IORING_OP_MKDIRAT:
1460 return "MKDIRAT";
1461 case IORING_OP_SYMLINKAT:
1462 return "SYMLINKAT";
1463 case IORING_OP_LINKAT:
1464 return "LINKAT";
1465 case IORING_OP_MSG_RING:
1466 return "MSG_RING";
1467 case IORING_OP_FSETXATTR:
1468 return "FSETXATTR";
1469 case IORING_OP_SETXATTR:
1470 return "SETXATTR";
1471 case IORING_OP_FGETXATTR:
1472 return "FGETXATTR";
1473 case IORING_OP_GETXATTR:
1474 return "GETXATTR";
1475 case IORING_OP_SOCKET:
1476 return "SOCKET";
ee692a21
JA
1477 case IORING_OP_URING_CMD:
1478 return "URING_CMD";
33337d03
DY
1479 case IORING_OP_LAST:
1480 return "INVALID";
1481 }
1482 return "INVALID";
1483}
1484
2b188cc1
JA
1485struct sock *io_uring_get_socket(struct file *file)
1486{
1487#if defined(CONFIG_UNIX)
1488 if (file->f_op == &io_uring_fops) {
1489 struct io_ring_ctx *ctx = file->private_data;
1490
1491 return ctx->ring_sock->sk;
1492 }
1493#endif
1494 return NULL;
1495}
1496EXPORT_SYMBOL(io_uring_get_socket);
1497
1f59bc0f
PB
1498#if defined(CONFIG_UNIX)
1499static inline bool io_file_need_scm(struct file *filp)
1500{
5e45690a
JA
1501#if defined(IO_URING_SCM_ALL)
1502 return true;
1503#else
1f59bc0f 1504 return !!unix_get_socket(filp);
5e45690a 1505#endif
1f59bc0f
PB
1506}
1507#else
1508static inline bool io_file_need_scm(struct file *filp)
1509{
5e45690a 1510 return false;
1f59bc0f
PB
1511}
1512#endif
1513
f8929630
PB
1514static void io_ring_submit_unlock(struct io_ring_ctx *ctx, unsigned issue_flags)
1515{
1516 lockdep_assert_held(&ctx->uring_lock);
1517 if (issue_flags & IO_URING_F_UNLOCKED)
1518 mutex_unlock(&ctx->uring_lock);
1519}
1520
1521static void io_ring_submit_lock(struct io_ring_ctx *ctx, unsigned issue_flags)
1522{
1523 /*
1524 * "Normal" inline submissions always hold the uring_lock, since we
1525 * grab it from the system call. Same is true for the SQPOLL offload.
1526 * The only exception is when we've detached the request and issue it
1527 * from an async worker thread, grab the lock for that case.
1528 */
1529 if (issue_flags & IO_URING_F_UNLOCKED)
1530 mutex_lock(&ctx->uring_lock);
1531 lockdep_assert_held(&ctx->uring_lock);
1532}
1533
f237c30a
PB
1534static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
1535{
1536 if (!*locked) {
1537 mutex_lock(&ctx->uring_lock);
1538 *locked = true;
1539 }
1540}
1541
f2f87370
PB
1542#define io_for_each_link(pos, head) \
1543 for (pos = (head); pos; pos = pos->link)
1544
21c843d5
PB
1545/*
1546 * Shamelessly stolen from the mm implementation of page reference checking,
1547 * see commit f958d7b528b1 for details.
1548 */
1549#define req_ref_zero_or_close_to_overflow(req) \
1550 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1551
1552static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1553{
20e60a38 1554 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
21c843d5
PB
1555 return atomic_inc_not_zero(&req->refs);
1556}
1557
21c843d5
PB
1558static inline bool req_ref_put_and_test(struct io_kiocb *req)
1559{
20e60a38
PB
1560 if (likely(!(req->flags & REQ_F_REFCOUNT)))
1561 return true;
1562
21c843d5
PB
1563 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1564 return atomic_dec_and_test(&req->refs);
1565}
1566
21c843d5
PB
1567static inline void req_ref_get(struct io_kiocb *req)
1568{
20e60a38 1569 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
21c843d5
PB
1570 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1571 atomic_inc(&req->refs);
1572}
1573
c450178d
PB
1574static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
1575{
6f33b0bc 1576 if (!wq_list_empty(&ctx->submit_state.compl_reqs))
c450178d
PB
1577 __io_submit_flush_completions(ctx);
1578}
1579
48dcd38d 1580static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
20e60a38
PB
1581{
1582 if (!(req->flags & REQ_F_REFCOUNT)) {
1583 req->flags |= REQ_F_REFCOUNT;
48dcd38d 1584 atomic_set(&req->refs, nr);
20e60a38
PB
1585 }
1586}
1587
48dcd38d
PB
1588static inline void io_req_set_refcount(struct io_kiocb *req)
1589{
1590 __io_req_set_refcount(req, 1);
1591}
1592
ab409402
PB
1593#define IO_RSRC_REF_BATCH 100
1594
25a15d3c
PB
1595static void io_rsrc_put_node(struct io_rsrc_node *node, int nr)
1596{
1597 percpu_ref_put_many(&node->refs, nr);
1598}
1599
ab409402
PB
1600static inline void io_req_put_rsrc_locked(struct io_kiocb *req,
1601 struct io_ring_ctx *ctx)
1602 __must_hold(&ctx->uring_lock)
36f72fe2 1603{
c1bdf8ed 1604 struct io_rsrc_node *node = req->rsrc_node;
ab409402 1605
c1bdf8ed
PB
1606 if (node) {
1607 if (node == ctx->rsrc_node)
ab409402
PB
1608 ctx->rsrc_cached_refs++;
1609 else
25a15d3c 1610 io_rsrc_put_node(node, 1);
ab409402
PB
1611 }
1612}
1613
7ac1edc4 1614static inline void io_req_put_rsrc(struct io_kiocb *req)
ab409402 1615{
c1bdf8ed 1616 if (req->rsrc_node)
25a15d3c 1617 io_rsrc_put_node(req->rsrc_node, 1);
ab409402
PB
1618}
1619
1620static __cold void io_rsrc_refs_drop(struct io_ring_ctx *ctx)
1621 __must_hold(&ctx->uring_lock)
1622{
1623 if (ctx->rsrc_cached_refs) {
25a15d3c 1624 io_rsrc_put_node(ctx->rsrc_node, ctx->rsrc_cached_refs);
ab409402
PB
1625 ctx->rsrc_cached_refs = 0;
1626 }
1627}
1628
1629static void io_rsrc_refs_refill(struct io_ring_ctx *ctx)
1630 __must_hold(&ctx->uring_lock)
1631{
1632 ctx->rsrc_cached_refs += IO_RSRC_REF_BATCH;
1633 percpu_ref_get_many(&ctx->rsrc_node->refs, IO_RSRC_REF_BATCH);
1634}
36f72fe2 1635
a46be971 1636static inline void io_req_set_rsrc_node(struct io_kiocb *req,
5106dd6e
JA
1637 struct io_ring_ctx *ctx,
1638 unsigned int issue_flags)
36f72fe2 1639{
c1bdf8ed
PB
1640 if (!req->rsrc_node) {
1641 req->rsrc_node = ctx->rsrc_node;
5106dd6e
JA
1642
1643 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1644 lockdep_assert_held(&ctx->uring_lock);
1645 ctx->rsrc_cached_refs--;
1646 if (unlikely(ctx->rsrc_cached_refs < 0))
1647 io_rsrc_refs_refill(ctx);
1648 } else {
c1bdf8ed 1649 percpu_ref_get(&req->rsrc_node->refs);
5106dd6e 1650 }
36f72fe2
PB
1651 }
1652}
1653
cc3cec83 1654static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list)
3648e526 1655{
c7fb1942
JA
1656 if (req->flags & REQ_F_BUFFER_RING) {
1657 if (req->buf_list)
1658 req->buf_list->head++;
1659 req->flags &= ~REQ_F_BUFFER_RING;
1660 } else {
1661 list_add(&req->kbuf->list, list);
1662 req->flags &= ~REQ_F_BUFFER_SELECTED;
1663 }
3648e526 1664
1dbd023e 1665 return IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
3648e526
HX
1666}
1667
cc3cec83 1668static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
3648e526 1669{
8197b053
PB
1670 lockdep_assert_held(&req->ctx->completion_lock);
1671
c7fb1942 1672 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
3648e526 1673 return 0;
cc3cec83
JA
1674 return __io_put_kbuf(req, &req->ctx->io_buffers_comp);
1675}
1676
1677static inline unsigned int io_put_kbuf(struct io_kiocb *req,
1678 unsigned issue_flags)
1679{
1680 unsigned int cflags;
1681
c7fb1942 1682 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
cc3cec83
JA
1683 return 0;
1684
1685 /*
1686 * We can add this buffer back to two lists:
1687 *
1688 * 1) The io_buffers_cache list. This one is protected by the
1689 * ctx->uring_lock. If we already hold this lock, add back to this
1690 * list as we can grab it from issue as well.
1691 * 2) The io_buffers_comp list. This one is protected by the
1692 * ctx->completion_lock.
1693 *
1694 * We migrate buffers from the comp_list to the issue cache list
1695 * when we need one.
1696 */
c7fb1942
JA
1697 if (req->flags & REQ_F_BUFFER_RING) {
1698 /* no buffers to recycle for this case */
1699 cflags = __io_put_kbuf(req, NULL);
1700 } else if (issue_flags & IO_URING_F_UNLOCKED) {
cc3cec83
JA
1701 struct io_ring_ctx *ctx = req->ctx;
1702
1703 spin_lock(&ctx->completion_lock);
1704 cflags = __io_put_kbuf(req, &ctx->io_buffers_comp);
1705 spin_unlock(&ctx->completion_lock);
1706 } else {
ab0ac095
PB
1707 lockdep_assert_held(&req->ctx->uring_lock);
1708
cc3cec83
JA
1709 cflags = __io_put_kbuf(req, &req->ctx->io_buffers_cache);
1710 }
1711
1712 return cflags;
3648e526
HX
1713}
1714
dbc7d452
JA
1715static struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
1716 unsigned int bgid)
1717{
9cfc7e94
JA
1718 if (ctx->io_bl && bgid < BGID_ARRAY)
1719 return &ctx->io_bl[bgid];
dbc7d452 1720
9cfc7e94 1721 return xa_load(&ctx->io_bl_xa, bgid);
dbc7d452
JA
1722}
1723
4d55f238 1724static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
b1c62645
JA
1725{
1726 struct io_ring_ctx *ctx = req->ctx;
dbc7d452
JA
1727 struct io_buffer_list *bl;
1728 struct io_buffer *buf;
b1c62645 1729
c7fb1942 1730 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
b1c62645 1731 return;
42db0c00
HX
1732 /*
1733 * For legacy provided buffer mode, don't recycle if we already did
1734 * IO to this buffer. For ring-mapped provided buffer mode, we should
1735 * increment ring->head to explicitly monopolize the buffer to avoid
1736 * multiple use.
1737 */
1738 if ((req->flags & REQ_F_BUFFER_SELECTED) &&
1739 (req->flags & REQ_F_PARTIAL_IO))
8a3e8ee5 1740 return;
42db0c00 1741
c7fb1942
JA
1742 /*
1743 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
1744 * the flag and hence ensure that bl->head doesn't get incremented.
1745 * If the tail has already been incremented, hang on to it.
1746 */
1747 if (req->flags & REQ_F_BUFFER_RING) {
1748 if (req->buf_list) {
42db0c00
HX
1749 if (req->flags & REQ_F_PARTIAL_IO) {
1750 req->buf_list->head++;
1751 req->buf_list = NULL;
1752 } else {
1753 req->buf_index = req->buf_list->bgid;
1754 req->flags &= ~REQ_F_BUFFER_RING;
1755 }
c7fb1942
JA
1756 }
1757 return;
1758 }
b1c62645 1759
f8929630 1760 io_ring_submit_lock(ctx, issue_flags);
b1c62645
JA
1761
1762 buf = req->kbuf;
dbc7d452
JA
1763 bl = io_buffer_get_list(ctx, buf->bgid);
1764 list_add(&buf->list, &bl->buf_list);
b1c62645 1765 req->flags &= ~REQ_F_BUFFER_SELECTED;
1dbd023e 1766 req->buf_index = buf->bgid;
4d55f238 1767
f8929630 1768 io_ring_submit_unlock(ctx, issue_flags);
b1c62645
JA
1769}
1770
3dd0c97a
PB
1771static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1772 bool cancel_all)
6af3f48b 1773 __must_hold(&req->ctx->timeout_lock)
08d23634 1774{
9cae36a0
JA
1775 struct io_kiocb *req;
1776
68207680 1777 if (task && head->task != task)
08d23634 1778 return false;
9cae36a0
JA
1779 if (cancel_all)
1780 return true;
1781
1782 io_for_each_link(req, head) {
1783 if (req->flags & REQ_F_INFLIGHT)
1784 return true;
1785 }
1786 return false;
1787}
1788
1789static bool io_match_linked(struct io_kiocb *head)
1790{
1791 struct io_kiocb *req;
1792
1793 io_for_each_link(req, head) {
1794 if (req->flags & REQ_F_INFLIGHT)
1795 return true;
1796 }
1797 return false;
6af3f48b
PB
1798}
1799
1800/*
1801 * As io_match_task() but protected against racing with linked timeouts.
1802 * User must not hold timeout_lock.
1803 */
1804static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
1805 bool cancel_all)
1806{
9cae36a0
JA
1807 bool matched;
1808
6af3f48b
PB
1809 if (task && head->task != task)
1810 return false;
9cae36a0
JA
1811 if (cancel_all)
1812 return true;
1813
1814 if (head->flags & REQ_F_LINK_TIMEOUT) {
1815 struct io_ring_ctx *ctx = head->ctx;
1816
1817 /* protect against races with linked timeouts */
1818 spin_lock_irq(&ctx->timeout_lock);
1819 matched = io_match_linked(head);
1820 spin_unlock_irq(&ctx->timeout_lock);
1821 } else {
1822 matched = io_match_linked(head);
1823 }
1824 return matched;
6af3f48b
PB
1825}
1826
d886e185
PB
1827static inline bool req_has_async_data(struct io_kiocb *req)
1828{
1829 return req->flags & REQ_F_ASYNC_DATA;
1830}
1831
93d2bcd2 1832static inline void req_set_fail(struct io_kiocb *req)
c40f6379 1833{
93d2bcd2 1834 req->flags |= REQ_F_FAIL;
04c76b41
PB
1835 if (req->flags & REQ_F_CQE_SKIP) {
1836 req->flags &= ~REQ_F_CQE_SKIP;
1837 req->flags |= REQ_F_SKIP_LINK_CQES;
1838 }
c40f6379 1839}
4a38aed2 1840
a8295b98
HX
1841static inline void req_fail_link_node(struct io_kiocb *req, int res)
1842{
1843 req_set_fail(req);
cef216fc 1844 req->cqe.res = res;
a8295b98
HX
1845}
1846
fa05457a
PB
1847static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
1848{
1849 wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
a8295b98
HX
1850}
1851
c072481d 1852static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
2b188cc1
JA
1853{
1854 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1855
0f158b4c 1856 complete(&ctx->ref_comp);
2b188cc1
JA
1857}
1858
8eb7e2d0
PB
1859static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1860{
1861 return !req->timeout.off;
1862}
1863
c072481d 1864static __cold void io_fallback_req_func(struct work_struct *work)
f56165e6
PB
1865{
1866 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
1867 fallback_work.work);
1868 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
1869 struct io_kiocb *req, *tmp;
f237c30a 1870 bool locked = false;
f56165e6
PB
1871
1872 percpu_ref_get(&ctx->refs);
1873 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
f237c30a 1874 req->io_task_work.func(req, &locked);
5636c00d 1875
f237c30a 1876 if (locked) {
c450178d 1877 io_submit_flush_completions(ctx);
f237c30a
PB
1878 mutex_unlock(&ctx->uring_lock);
1879 }
f56165e6
PB
1880 percpu_ref_put(&ctx->refs);
1881}
1882
c072481d 1883static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
2b188cc1
JA
1884{
1885 struct io_ring_ctx *ctx;
9cfc7e94 1886 int hash_bits;
2b188cc1
JA
1887
1888 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1889 if (!ctx)
1890 return NULL;
1891
9cfc7e94
JA
1892 xa_init(&ctx->io_bl_xa);
1893
78076bb6
JA
1894 /*
1895 * Use 5 bits less than the max cq entries, that should give us around
1896 * 32 entries per hash list if totally full and uniformly spread.
1897 */
1898 hash_bits = ilog2(p->cq_entries);
1899 hash_bits -= 5;
1900 if (hash_bits <= 0)
1901 hash_bits = 1;
1902 ctx->cancel_hash_bits = hash_bits;
1903 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1904 GFP_KERNEL);
1905 if (!ctx->cancel_hash)
1906 goto err;
1907 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1908
6224843d
PB
1909 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1910 if (!ctx->dummy_ubuf)
1911 goto err;
1912 /* set invalid range, so io_import_fixed() fails meeting it */
1913 ctx->dummy_ubuf->ubuf = -1UL;
1914
21482896 1915 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
1916 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1917 goto err;
2b188cc1
JA
1918
1919 ctx->flags = p->flags;
90554200 1920 init_waitqueue_head(&ctx->sqo_sq_wait);
69fb2131 1921 INIT_LIST_HEAD(&ctx->sqd_list);
1d7bb1d5 1922 INIT_LIST_HEAD(&ctx->cq_overflow_list);
cc3cec83 1923 INIT_LIST_HEAD(&ctx->io_buffers_cache);
4d9237e3 1924 INIT_LIST_HEAD(&ctx->apoll_cache);
0f158b4c 1925 init_completion(&ctx->ref_comp);
61cf9370 1926 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
2b188cc1 1927 mutex_init(&ctx->uring_lock);
311997b3 1928 init_waitqueue_head(&ctx->cq_wait);
2b188cc1 1929 spin_lock_init(&ctx->completion_lock);
89850fce 1930 spin_lock_init(&ctx->timeout_lock);
5eef4e87 1931 INIT_WQ_LIST(&ctx->iopoll_list);
cc3cec83
JA
1932 INIT_LIST_HEAD(&ctx->io_buffers_pages);
1933 INIT_LIST_HEAD(&ctx->io_buffers_comp);
de0617e4 1934 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 1935 INIT_LIST_HEAD(&ctx->timeout_list);
ef9dd637 1936 INIT_LIST_HEAD(&ctx->ltimeout_list);
d67d2263
BM
1937 spin_lock_init(&ctx->rsrc_ref_lock);
1938 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
269bbe5f
BM
1939 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1940 init_llist_head(&ctx->rsrc_put_llist);
13bf43f5 1941 INIT_LIST_HEAD(&ctx->tctx_list);
c2b6c6bc
PB
1942 ctx->submit_state.free_list.next = NULL;
1943 INIT_WQ_LIST(&ctx->locked_free_list);
9011bf9a 1944 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
6f33b0bc 1945 INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
2b188cc1 1946 return ctx;
206aefde 1947err:
6224843d 1948 kfree(ctx->dummy_ubuf);
78076bb6 1949 kfree(ctx->cancel_hash);
9cfc7e94
JA
1950 kfree(ctx->io_bl);
1951 xa_destroy(&ctx->io_bl_xa);
206aefde
JA
1952 kfree(ctx);
1953 return NULL;
2b188cc1
JA
1954}
1955
8f6ed49a
PB
1956static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1957{
1958 struct io_rings *r = ctx->rings;
1959
1960 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1961 ctx->cq_extra--;
1962}
1963
9cf7c104 1964static bool req_need_defer(struct io_kiocb *req, u32 seq)
7adf4eaf 1965{
2bc9930e
JA
1966 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1967 struct io_ring_ctx *ctx = req->ctx;
a197f664 1968
8f6ed49a 1969 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
2bc9930e 1970 }
de0617e4 1971
9d858b21 1972 return false;
de0617e4
JA
1973}
1974
c97d8a0f
PB
1975static inline bool io_req_ffs_set(struct io_kiocb *req)
1976{
35645ac3 1977 return req->flags & REQ_F_FIXED_FILE;
c97d8a0f
PB
1978}
1979
9cae36a0
JA
1980static inline void io_req_track_inflight(struct io_kiocb *req)
1981{
1982 if (!(req->flags & REQ_F_INFLIGHT)) {
1983 req->flags |= REQ_F_INFLIGHT;
1984 atomic_inc(&current->io_uring->inflight_tracked);
1985 }
1986}
1987
fd08e530
PB
1988static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
1989{
906c6caa
PB
1990 if (WARN_ON_ONCE(!req->link))
1991 return NULL;
1992
4d13d1a4
PB
1993 req->flags &= ~REQ_F_ARM_LTIMEOUT;
1994 req->flags |= REQ_F_LINK_TIMEOUT;
fd08e530
PB
1995
1996 /* linked timeouts should have two refs once prep'ed */
48dcd38d 1997 io_req_set_refcount(req);
4d13d1a4
PB
1998 __io_req_set_refcount(req->link, 2);
1999 return req->link;
fd08e530
PB
2000}
2001
2002static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
2003{
4d13d1a4 2004 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
fd08e530
PB
2005 return NULL;
2006 return __io_prep_linked_timeout(req);
2007}
2008
cb2d344c
PB
2009static noinline void __io_arm_ltimeout(struct io_kiocb *req)
2010{
2011 io_queue_linked_timeout(__io_prep_linked_timeout(req));
2012}
2013
2014static inline void io_arm_ltimeout(struct io_kiocb *req)
2015{
2016 if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
2017 __io_arm_ltimeout(req);
2018}
2019
1e6fa521
JA
2020static void io_prep_async_work(struct io_kiocb *req)
2021{
2022 const struct io_op_def *def = &io_op_defs[req->opcode];
1e6fa521
JA
2023 struct io_ring_ctx *ctx = req->ctx;
2024
b8e64b53
PB
2025 if (!(req->flags & REQ_F_CREDS)) {
2026 req->flags |= REQ_F_CREDS;
c10d1f98 2027 req->creds = get_current_cred();
b8e64b53 2028 }
003e8dcc 2029
e1d675df
PB
2030 req->work.list.next = NULL;
2031 req->work.flags = 0;
8e29da69 2032 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
feaadc4f
PB
2033 if (req->flags & REQ_F_FORCE_ASYNC)
2034 req->work.flags |= IO_WQ_WORK_CONCURRENT;
2035
1e6fa521
JA
2036 if (req->flags & REQ_F_ISREG) {
2037 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
2038 io_wq_hash_work(&req->work, file_inode(req->file));
4b982bd0 2039 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
1e6fa521
JA
2040 if (def->unbound_nonreg_file)
2041 req->work.flags |= IO_WQ_WORK_UNBOUND;
2042 }
561fb04a 2043}
cccf0ee8 2044
cbdcb435 2045static void io_prep_async_link(struct io_kiocb *req)
561fb04a 2046{
cbdcb435 2047 struct io_kiocb *cur;
54a91f3b 2048
44eff40a
PB
2049 if (req->flags & REQ_F_LINK_TIMEOUT) {
2050 struct io_ring_ctx *ctx = req->ctx;
2051
674ee8e1 2052 spin_lock_irq(&ctx->timeout_lock);
44eff40a
PB
2053 io_for_each_link(cur, req)
2054 io_prep_async_work(cur);
674ee8e1 2055 spin_unlock_irq(&ctx->timeout_lock);
44eff40a
PB
2056 } else {
2057 io_for_each_link(cur, req)
2058 io_prep_async_work(cur);
2059 }
561fb04a
JA
2060}
2061
fff4e40e
PB
2062static inline void io_req_add_compl_list(struct io_kiocb *req)
2063{
775a1f2f 2064 struct io_submit_state *state = &req->ctx->submit_state;
fff4e40e 2065
3d4aeb9f 2066 if (!(req->flags & REQ_F_CQE_SKIP))
775a1f2f 2067 state->flush_cqes = true;
fff4e40e
PB
2068 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
2069}
2070
77955efb 2071static void io_queue_iowq(struct io_kiocb *req, bool *dont_use)
561fb04a 2072{
cbdcb435 2073 struct io_kiocb *link = io_prep_linked_timeout(req);
5aa75ed5 2074 struct io_uring_task *tctx = req->task->io_uring;
561fb04a 2075
3bfe6106
JA
2076 BUG_ON(!tctx);
2077 BUG_ON(!tctx->io_wq);
561fb04a 2078
cbdcb435
PB
2079 /* init ->work of the whole link before punting */
2080 io_prep_async_link(req);
991468dc
JA
2081
2082 /*
2083 * Not expected to happen, but if we do have a bug where this _can_
2084 * happen, catch it here and ensure the request is marked as
2085 * canceled. That will make io-wq go through the usual work cancel
2086 * procedure rather than attempt to run this request (or create a new
2087 * worker for it).
2088 */
2089 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
2090 req->work.flags |= IO_WQ_WORK_CANCEL;
2091
971cf9c1
PB
2092 trace_io_uring_queue_async_work(req->ctx, req, req->cqe.user_data,
2093 req->opcode, req->flags, &req->work,
2094 io_wq_is_hashed(&req->work));
ebf93667 2095 io_wq_enqueue(tctx->io_wq, &req->work);
7271ef3a
JA
2096 if (link)
2097 io_queue_linked_timeout(link);
cbdcb435
PB
2098}
2099
1ee4160c 2100static void io_kill_timeout(struct io_kiocb *req, int status)
8c855885 2101 __must_hold(&req->ctx->completion_lock)
89850fce 2102 __must_hold(&req->ctx->timeout_lock)
5262f567 2103{
e8c2bc1f 2104 struct io_timeout_data *io = req->async_data;
5262f567 2105
fd9c7bc5 2106 if (hrtimer_try_to_cancel(&io->timer) != -1) {
2ae2eb9d
PB
2107 if (status)
2108 req_set_fail(req);
01cec8c1
PB
2109 atomic_set(&req->ctx->cq_timeouts,
2110 atomic_read(&req->ctx->cq_timeouts) + 1);
135fcde8 2111 list_del_init(&req->timeout.list);
4e118cd9 2112 io_req_tw_post_queue(req, status, 0);
5262f567
JA
2113 }
2114}
2115
c072481d 2116static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
de0617e4 2117{
441b8a78 2118 while (!list_empty(&ctx->defer_list)) {
27dc8338
PB
2119 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
2120 struct io_defer_entry, list);
de0617e4 2121
9cf7c104 2122 if (req_need_defer(de->req, de->seq))
04518945 2123 break;
27dc8338 2124 list_del_init(&de->list);
907d1df3 2125 io_req_task_queue(de->req);
27dc8338 2126 kfree(de);
441b8a78 2127 }
04518945
PB
2128}
2129
c072481d 2130static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
89850fce 2131 __must_hold(&ctx->completion_lock)
de0617e4 2132{
441b8a78 2133 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
e677edbc 2134 struct io_kiocb *req, *tmp;
f010505b 2135
79ebeaee 2136 spin_lock_irq(&ctx->timeout_lock);
e677edbc 2137 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
f010505b 2138 u32 events_needed, events_got;
de0617e4 2139
8eb7e2d0 2140 if (io_is_timeout_noseq(req))
360428f8 2141 break;
f010505b
MDG
2142
2143 /*
2144 * Since seq can easily wrap around over time, subtract
2145 * the last seq at which timeouts were flushed before comparing.
2146 * Assuming not more than 2^31-1 events have happened since,
2147 * these subtractions won't have wrapped, so we can check if
2148 * target is in [last_seq, current_seq] by comparing the two.
2149 */
2150 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
2151 events_got = seq - ctx->cq_last_tm_flush;
2152 if (events_got < events_needed)
360428f8 2153 break;
bfe68a22 2154
1ee4160c 2155 io_kill_timeout(req, 0);
f18ee4cf 2156 }
f010505b 2157 ctx->cq_last_tm_flush = seq;
79ebeaee 2158 spin_unlock_irq(&ctx->timeout_lock);
360428f8 2159}
5262f567 2160
9333f6b4
PB
2161static inline void io_commit_cqring(struct io_ring_ctx *ctx)
2162{
2163 /* order cqe stores with ring update */
2164 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
2165}
2166
9aa8dfde 2167static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
360428f8 2168{
9aa8dfde
PB
2169 if (ctx->off_timeout_used || ctx->drain_active) {
2170 spin_lock(&ctx->completion_lock);
2171 if (ctx->off_timeout_used)
2172 io_flush_timeouts(ctx);
2173 if (ctx->drain_active)
2174 io_queue_deferred(ctx);
2175 io_commit_cqring(ctx);
2176 spin_unlock(&ctx->completion_lock);
2177 }
2178 if (ctx->has_evfd)
2179 io_eventfd_signal(ctx);
de0617e4
JA
2180}
2181
90554200
JA
2182static inline bool io_sqring_full(struct io_ring_ctx *ctx)
2183{
2184 struct io_rings *r = ctx->rings;
2185
a566c556 2186 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
90554200
JA
2187}
2188
888aae2e
PB
2189static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
2190{
2191 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
2192}
2193
d8da428b
PB
2194/*
2195 * writes to the cq entry need to come after reading head; the
2196 * control dependency is enough as we're using WRITE_ONCE to
2197 * fill the cq entry
2198 */
2199static noinline struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
2b188cc1 2200{
75b28aff 2201 struct io_rings *rings = ctx->rings;
d8da428b 2202 unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
2fee6bc6 2203 unsigned int shift = 0;
d8da428b
PB
2204 unsigned int free, queued, len;
2205
2fee6bc6
SR
2206 if (ctx->flags & IORING_SETUP_CQE32)
2207 shift = 1;
2208
d8da428b
PB
2209 /* userspace may cheat modifying the tail, be safe and do min */
2210 queued = min(__io_cqring_events(ctx), ctx->cq_entries);
2211 free = ctx->cq_entries - queued;
2212 /* we need a contiguous range, limit based on the current array offset */
2213 len = min(free, ctx->cq_entries - off);
2214 if (!len)
2b188cc1
JA
2215 return NULL;
2216
d8da428b
PB
2217 ctx->cached_cq_tail++;
2218 ctx->cqe_cached = &rings->cqes[off];
2219 ctx->cqe_sentinel = ctx->cqe_cached + len;
2fee6bc6
SR
2220 ctx->cqe_cached++;
2221 return &rings->cqes[off << shift];
d8da428b
PB
2222}
2223
2224static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
2225{
2226 if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
2fee6bc6
SR
2227 struct io_uring_cqe *cqe = ctx->cqe_cached;
2228
2229 if (ctx->flags & IORING_SETUP_CQE32) {
2230 unsigned int off = ctx->cqe_cached - ctx->rings->cqes;
2231
2232 cqe += off;
2233 }
2234
d8da428b 2235 ctx->cached_cq_tail++;
2fee6bc6
SR
2236 ctx->cqe_cached++;
2237 return cqe;
d8da428b 2238 }
2fee6bc6 2239
d8da428b 2240 return __io_get_cqe(ctx);
2b188cc1
JA
2241}
2242
77bc59b4 2243static void io_eventfd_signal(struct io_ring_ctx *ctx)
f2842ab5 2244{
77bc59b4
UA
2245 struct io_ev_fd *ev_fd;
2246
77bc59b4
UA
2247 rcu_read_lock();
2248 /*
2249 * rcu_dereference ctx->io_ev_fd once and use it for both for checking
2250 * and eventfd_signal
2251 */
2252 ev_fd = rcu_dereference(ctx->io_ev_fd);
2253
2254 /*
2255 * Check again if ev_fd exists incase an io_eventfd_unregister call
2256 * completed between the NULL check of ctx->io_ev_fd at the start of
2257 * the function and rcu_read_lock.
2258 */
2259 if (unlikely(!ev_fd))
2260 goto out;
7e55a19c 2261 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
77bc59b4
UA
2262 goto out;
2263
c75312dd 2264 if (!ev_fd->eventfd_async || io_wq_current_is_worker())
77bc59b4 2265 eventfd_signal(ev_fd->cq_ev_fd, 1);
77bc59b4
UA
2266out:
2267 rcu_read_unlock();
f2842ab5
JA
2268}
2269
9aa8dfde
PB
2270static inline void io_cqring_wake(struct io_ring_ctx *ctx)
2271{
2272 /*
2273 * wake_up_all() may seem excessive, but io_wake_function() and
2274 * io_should_wake() handle the termination of the loop and only
2275 * wake as many waiters as we need to.
2276 */
2277 if (wq_has_sleeper(&ctx->cq_wait))
2278 wake_up_all(&ctx->cq_wait);
2279}
2280
2c5d763c
JA
2281/*
2282 * This should only get called when at least one event has been posted.
2283 * Some applications rely on the eventfd notification count only changing
2284 * IFF a new CQE has been added to the CQ ring. There's no depedency on
2285 * 1:1 relationship between how many times this function is called (and
2286 * hence the eventfd count) and number of CQEs posted to the CQ ring.
2287 */
66fc25ca 2288static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1d7bb1d5 2289{
9aa8dfde
PB
2290 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
2291 ctx->has_evfd))
9333f6b4
PB
2292 __io_commit_cqring_flush(ctx);
2293
9aa8dfde 2294 io_cqring_wake(ctx);
1d7bb1d5
JA
2295}
2296
80c18e4a
PB
2297static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
2298{
9aa8dfde
PB
2299 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
2300 ctx->has_evfd))
9333f6b4
PB
2301 __io_commit_cqring_flush(ctx);
2302
9aa8dfde
PB
2303 if (ctx->flags & IORING_SETUP_SQPOLL)
2304 io_cqring_wake(ctx);
80c18e4a
PB
2305}
2306
c4a2ed72 2307/* Returns true if there are no backlogged entries after the flush */
6c2450ae 2308static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1d7bb1d5 2309{
b18032bb 2310 bool all_flushed, posted;
e45a3e05 2311 size_t cqe_size = sizeof(struct io_uring_cqe);
1d7bb1d5 2312
a566c556 2313 if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
e23de15f 2314 return false;
1d7bb1d5 2315
e45a3e05
SR
2316 if (ctx->flags & IORING_SETUP_CQE32)
2317 cqe_size <<= 1;
2318
b18032bb 2319 posted = false;
79ebeaee 2320 spin_lock(&ctx->completion_lock);
6c2450ae 2321 while (!list_empty(&ctx->cq_overflow_list)) {
d068b506 2322 struct io_uring_cqe *cqe = io_get_cqe(ctx);
6c2450ae 2323 struct io_overflow_cqe *ocqe;
e6c8aa9a 2324
1d7bb1d5
JA
2325 if (!cqe && !force)
2326 break;
6c2450ae
PB
2327 ocqe = list_first_entry(&ctx->cq_overflow_list,
2328 struct io_overflow_cqe, list);
2329 if (cqe)
e45a3e05 2330 memcpy(cqe, &ocqe->cqe, cqe_size);
6c2450ae 2331 else
8f6ed49a
PB
2332 io_account_cq_overflow(ctx);
2333
b18032bb 2334 posted = true;
6c2450ae
PB
2335 list_del(&ocqe->list);
2336 kfree(ocqe);
1d7bb1d5
JA
2337 }
2338
09e88404
PB
2339 all_flushed = list_empty(&ctx->cq_overflow_list);
2340 if (all_flushed) {
10988a0a 2341 clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
3a4b89a2 2342 atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
09e88404 2343 }
46930143 2344
60053be8 2345 io_commit_cqring(ctx);
79ebeaee 2346 spin_unlock(&ctx->completion_lock);
b18032bb
JA
2347 if (posted)
2348 io_cqring_ev_posted(ctx);
09e88404 2349 return all_flushed;
1d7bb1d5
JA
2350}
2351
90f67366 2352static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
6c503150 2353{
ca0a2651
JA
2354 bool ret = true;
2355
10988a0a 2356 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
6c503150
PB
2357 /* iopoll syncs against uring_lock, not completion_lock */
2358 if (ctx->flags & IORING_SETUP_IOPOLL)
2359 mutex_lock(&ctx->uring_lock);
90f67366 2360 ret = __io_cqring_overflow_flush(ctx, false);
6c503150
PB
2361 if (ctx->flags & IORING_SETUP_IOPOLL)
2362 mutex_unlock(&ctx->uring_lock);
2363 }
ca0a2651
JA
2364
2365 return ret;
6c503150
PB
2366}
2367
9d170164 2368static void __io_put_task(struct task_struct *task, int nr)
6a290a14
PB
2369{
2370 struct io_uring_task *tctx = task->io_uring;
2371
9d170164
PB
2372 percpu_counter_sub(&tctx->inflight, nr);
2373 if (unlikely(atomic_read(&tctx->in_idle)))
2374 wake_up(&tctx->wait);
2375 put_task_struct_many(task, nr);
2376}
2377
2378/* must to be called somewhat shortly after putting a request */
2379static inline void io_put_task(struct task_struct *task, int nr)
2380{
2381 if (likely(task == current))
2382 task->io_uring->cached_refs += nr;
2383 else
2384 __io_put_task(task, nr);
6a290a14
PB
2385}
2386
9a10867a
PB
2387static void io_task_refs_refill(struct io_uring_task *tctx)
2388{
2389 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
2390
2391 percpu_counter_add(&tctx->inflight, refill);
2392 refcount_add(refill, &current->usage);
2393 tctx->cached_refs += refill;
2394}
2395
2396static inline void io_get_task_refs(int nr)
2397{
2398 struct io_uring_task *tctx = current->io_uring;
2399
2400 tctx->cached_refs -= nr;
2401 if (unlikely(tctx->cached_refs < 0))
2402 io_task_refs_refill(tctx);
2403}
2404
3cc7fdb9
PB
2405static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
2406{
2407 struct io_uring_task *tctx = task->io_uring;
2408 unsigned int refs = tctx->cached_refs;
2409
2410 if (refs) {
2411 tctx->cached_refs = 0;
2412 percpu_counter_sub(&tctx->inflight, refs);
2413 put_task_struct_many(task, refs);
2414 }
2415}
2416
d4d19c19 2417static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
e45a3e05
SR
2418 s32 res, u32 cflags, u64 extra1,
2419 u64 extra2)
2b188cc1 2420{
cce4b8b0 2421 struct io_overflow_cqe *ocqe;
e45a3e05
SR
2422 size_t ocq_size = sizeof(struct io_overflow_cqe);
2423 bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
2b188cc1 2424
e45a3e05
SR
2425 if (is_cqe32)
2426 ocq_size += sizeof(struct io_uring_cqe);
2b188cc1 2427
e45a3e05 2428 ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT);
08dcd028 2429 trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
cce4b8b0
PB
2430 if (!ocqe) {
2431 /*
2432 * If we're in ring overflow flush mode, or in task cancel mode,
2433 * or cannot allocate an overflow entry, then we need to drop it
2434 * on the floor.
2435 */
8f6ed49a 2436 io_account_cq_overflow(ctx);
155bc950 2437 set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq);
cce4b8b0 2438 return false;
2b188cc1 2439 }
cce4b8b0 2440 if (list_empty(&ctx->cq_overflow_list)) {
10988a0a 2441 set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
3a4b89a2 2442 atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
20c0b380 2443
cce4b8b0 2444 }
d4d19c19 2445 ocqe->cqe.user_data = user_data;
cce4b8b0
PB
2446 ocqe->cqe.res = res;
2447 ocqe->cqe.flags = cflags;
e45a3e05
SR
2448 if (is_cqe32) {
2449 ocqe->cqe.big_cqe[0] = extra1;
2450 ocqe->cqe.big_cqe[1] = extra2;
2451 }
cce4b8b0
PB
2452 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
2453 return true;
2b188cc1
JA
2454}
2455
ae4da189 2456static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
913a571a 2457 s32 res, u32 cflags)
2b188cc1
JA
2458{
2459 struct io_uring_cqe *cqe;
2460
2461 /*
2462 * If we can't get a cq entry, userspace overflowed the
2463 * submission (by quite a lot). Increment the overflow count in
2464 * the ring.
2465 */
d068b506 2466 cqe = io_get_cqe(ctx);
1d7bb1d5 2467 if (likely(cqe)) {
d4d19c19 2468 WRITE_ONCE(cqe->user_data, user_data);
2b188cc1 2469 WRITE_ONCE(cqe->res, res);
bcda7baa 2470 WRITE_ONCE(cqe->flags, cflags);
8d13326e 2471 return true;
2b188cc1 2472 }
e45a3e05 2473 return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
2b188cc1
JA
2474}
2475
90e7c35f
PB
2476static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
2477 struct io_kiocb *req)
d5ec1dfa 2478{
90e7c35f
PB
2479 struct io_uring_cqe *cqe;
2480
2481 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
c4bb964f 2482 req->cqe.res, req->cqe.flags, 0, 0);
90e7c35f
PB
2483
2484 /*
2485 * If we can't get a cq entry, userspace overflowed the
2486 * submission (by quite a lot). Increment the overflow count in
2487 * the ring.
2488 */
2489 cqe = io_get_cqe(ctx);
2490 if (likely(cqe)) {
2491 memcpy(cqe, &req->cqe, sizeof(*cqe));
2492 return true;
2493 }
2494 return io_cqring_event_overflow(ctx, req->cqe.user_data,
e45a3e05 2495 req->cqe.res, req->cqe.flags, 0, 0);
d5ec1dfa
SR
2496}
2497
91658798
SR
2498static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx,
2499 struct io_kiocb *req)
2500{
2501 struct io_uring_cqe *cqe;
2502 u64 extra1 = req->extra1;
2503 u64 extra2 = req->extra2;
2504
2505 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
c4bb964f 2506 req->cqe.res, req->cqe.flags, extra1, extra2);
91658798
SR
2507
2508 /*
2509 * If we can't get a cq entry, userspace overflowed the
2510 * submission (by quite a lot). Increment the overflow count in
2511 * the ring.
2512 */
2513 cqe = io_get_cqe(ctx);
2514 if (likely(cqe)) {
2515 memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe));
2516 cqe->big_cqe[0] = extra1;
2517 cqe->big_cqe[1] = extra2;
2518 return true;
2519 }
2520
e45a3e05
SR
2521 return io_cqring_event_overflow(ctx, req->cqe.user_data, req->cqe.res,
2522 req->cqe.flags, extra1, extra2);
d5ec1dfa
SR
2523}
2524
ae4da189 2525static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
bcda7baa 2526{
c4bb964f 2527 trace_io_uring_complete(req->ctx, req, req->cqe.user_data, res, cflags, 0, 0);
cef216fc 2528 return __io_fill_cqe(req->ctx, req->cqe.user_data, res, cflags);
bcda7baa
JA
2529}
2530
91658798
SR
2531static inline void __io_fill_cqe32_req(struct io_kiocb *req, s32 res, u32 cflags,
2532 u64 extra1, u64 extra2)
2533{
2534 struct io_ring_ctx *ctx = req->ctx;
2535 struct io_uring_cqe *cqe;
2536
2537 if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_CQE32)))
2538 return;
2539 if (req->flags & REQ_F_CQE_SKIP)
2540 return;
2541
c4bb964f
SR
2542 trace_io_uring_complete(ctx, req, req->cqe.user_data, res, cflags,
2543 extra1, extra2);
91658798
SR
2544
2545 /*
2546 * If we can't get a cq entry, userspace overflowed the
2547 * submission (by quite a lot). Increment the overflow count in
2548 * the ring.
2549 */
2550 cqe = io_get_cqe(ctx);
2551 if (likely(cqe)) {
2552 WRITE_ONCE(cqe->user_data, req->cqe.user_data);
2553 WRITE_ONCE(cqe->res, res);
2554 WRITE_ONCE(cqe->flags, cflags);
2555 WRITE_ONCE(cqe->big_cqe[0], extra1);
2556 WRITE_ONCE(cqe->big_cqe[1], extra2);
2557 return;
2558 }
2559
e45a3e05 2560 io_cqring_event_overflow(ctx, req->cqe.user_data, res, cflags, extra1, extra2);
91658798
SR
2561}
2562
913a571a
PB
2563static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
2564 s32 res, u32 cflags)
bcda7baa 2565{
913a571a 2566 ctx->cq_extra++;
c4bb964f 2567 trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
ae4da189 2568 return __io_fill_cqe(ctx, user_data, res, cflags);
bcda7baa
JA
2569}
2570
effcf8bd 2571static void __io_req_complete_put(struct io_kiocb *req)
2b188cc1 2572{
c7dae4ba
JA
2573 /*
2574 * If we're the last reference to this request, add to our locked
2575 * free_list cache.
2576 */
de9b4cca 2577 if (req_ref_put_and_test(req)) {
effcf8bd
SR
2578 struct io_ring_ctx *ctx = req->ctx;
2579
da1a08c5 2580 if (req->flags & IO_REQ_LINK_FLAGS) {
0756a869 2581 if (req->flags & IO_DISARM_MASK)
7a612350
PB
2582 io_disarm_next(req);
2583 if (req->link) {
2584 io_req_task_queue(req->link);
2585 req->link = NULL;
2586 }
2587 }
7ac1edc4 2588 io_req_put_rsrc(req);
8197b053
PB
2589 /*
2590 * Selected buffer deallocation in io_clean_op() assumes that
2591 * we don't hold ->completion_lock. Clean them here to avoid
2592 * deadlocks.
2593 */
2594 io_put_kbuf_comp(req);
c7dae4ba
JA
2595 io_dismantle_req(req);
2596 io_put_task(req->task, 1);
c2b6c6bc 2597 wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
d0acdee2 2598 ctx->locked_free_nr++;
180f829f 2599 }
a37fae8a
HX
2600}
2601
effcf8bd
SR
2602static void __io_req_complete_post(struct io_kiocb *req, s32 res,
2603 u32 cflags)
2604{
2605 if (!(req->flags & REQ_F_CQE_SKIP))
2606 __io_fill_cqe_req(req, res, cflags);
2607 __io_req_complete_put(req);
2608}
2609
2610static void __io_req_complete_post32(struct io_kiocb *req, s32 res,
2611 u32 cflags, u64 extra1, u64 extra2)
2612{
2613 if (!(req->flags & REQ_F_CQE_SKIP))
2614 __io_fill_cqe32_req(req, res, cflags, extra1, extra2);
2615 __io_req_complete_put(req);
2616}
2617
2618static void io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags)
a37fae8a
HX
2619{
2620 struct io_ring_ctx *ctx = req->ctx;
2621
2622 spin_lock(&ctx->completion_lock);
2623 __io_req_complete_post(req, res, cflags);
7a612350 2624 io_commit_cqring(ctx);
79ebeaee 2625 spin_unlock(&ctx->completion_lock);
a3f34907 2626 io_cqring_ev_posted(ctx);
4e3d9ff9
JA
2627}
2628
effcf8bd
SR
2629static void io_req_complete_post32(struct io_kiocb *req, s32 res,
2630 u32 cflags, u64 extra1, u64 extra2)
2631{
2632 struct io_ring_ctx *ctx = req->ctx;
2633
2634 spin_lock(&ctx->completion_lock);
2635 __io_req_complete_post32(req, res, cflags, extra1, extra2);
2636 io_commit_cqring(ctx);
2637 spin_unlock(&ctx->completion_lock);
2638 io_cqring_ev_posted(ctx);
2639}
2640
54daa9b2
PB
2641static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
2642 u32 cflags)
229a7b63 2643{
cef216fc
PB
2644 req->cqe.res = res;
2645 req->cqe.flags = cflags;
e342c807 2646 req->flags |= REQ_F_COMPLETE_INLINE;
e1e16097
JA
2647}
2648
889fca73 2649static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
54daa9b2 2650 s32 res, u32 cflags)
bcda7baa 2651{
889fca73
PB
2652 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
2653 io_req_complete_state(req, res, cflags);
a38d68db 2654 else
c7dae4ba 2655 io_req_complete_post(req, res, cflags);
bcda7baa
JA
2656}
2657
effcf8bd
SR
2658static inline void __io_req_complete32(struct io_kiocb *req,
2659 unsigned int issue_flags, s32 res,
2660 u32 cflags, u64 extra1, u64 extra2)
2661{
2662 if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
2663 io_req_complete_state(req, res, cflags);
2664 req->extra1 = extra1;
2665 req->extra2 = extra2;
2666 } else {
2667 io_req_complete_post32(req, res, cflags, extra1, extra2);
2668 }
2669}
2670
54daa9b2 2671static inline void io_req_complete(struct io_kiocb *req, s32 res)
0ddf92e8 2672{
4ffaa94b
KJ
2673 if (res < 0)
2674 req_set_fail(req);
889fca73 2675 __io_req_complete(req, 0, res, 0);
0ddf92e8
JA
2676}
2677
54daa9b2 2678static void io_req_complete_failed(struct io_kiocb *req, s32 res)
f41db273 2679{
93d2bcd2 2680 req_set_fail(req);
ab0ac095 2681 io_req_complete_post(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
f41db273
PB
2682}
2683
864ea921
PB
2684/*
2685 * Don't initialise the fields below on every allocation, but do that in
2686 * advance and keep them valid across allocations.
2687 */
2688static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
2689{
2690 req->ctx = ctx;
2691 req->link = NULL;
2692 req->async_data = NULL;
2693 /* not necessary, but safer to zero */
cef216fc 2694 req->cqe.res = 0;
864ea921
PB
2695}
2696
dac7a098 2697static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
cd0ca2e0 2698 struct io_submit_state *state)
dac7a098 2699{
79ebeaee 2700 spin_lock(&ctx->completion_lock);
c2b6c6bc 2701 wq_list_splice(&ctx->locked_free_list, &state->free_list);
d0acdee2 2702 ctx->locked_free_nr = 0;
79ebeaee 2703 spin_unlock(&ctx->completion_lock);
dac7a098
PB
2704}
2705
88ab95be 2706static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
0ddf92e8 2707{
88ab95be 2708 return !ctx->submit_state.free_list.next;
0ddf92e8
JA
2709}
2710
5d5901a3
PB
2711/*
2712 * A request might get retired back into the request caches even before opcode
2713 * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
2714 * Because of that, io_alloc_req() should be called only under ->uring_lock
2715 * and with extra caution to not get a request that is still worked on.
2716 */
c072481d 2717static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
5d5901a3 2718 __must_hold(&ctx->uring_lock)
2b188cc1 2719{
864ea921 2720 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
3ab665b7 2721 void *reqs[IO_REQ_ALLOC_BATCH];
864ea921 2722 int ret, i;
e5d1bc0a 2723
23a5c43b
PB
2724 /*
2725 * If we have more than a batch's worth of requests in our IRQ side
2726 * locked cache, grab the lock and move them over to our submission
2727 * side cache.
2728 */
a6d97a8a 2729 if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) {
23a5c43b 2730 io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
88ab95be 2731 if (!io_req_cache_empty(ctx))
23a5c43b
PB
2732 return true;
2733 }
e5d1bc0a 2734
3ab665b7 2735 ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
fd6fab2c 2736
864ea921
PB
2737 /*
2738 * Bulk alloc is all-or-nothing. If we fail to get a batch,
2739 * retry single alloc to be on the safe side.
2740 */
2741 if (unlikely(ret <= 0)) {
3ab665b7
PB
2742 reqs[0] = kmem_cache_alloc(req_cachep, gfp);
2743 if (!reqs[0])
a33ae9ce 2744 return false;
864ea921 2745 ret = 1;
2b188cc1 2746 }
864ea921 2747
37f0e767 2748 percpu_ref_get_many(&ctx->refs, ret);
3ab665b7 2749 for (i = 0; i < ret; i++) {
23a5c43b 2750 struct io_kiocb *req = reqs[i];
3ab665b7
PB
2751
2752 io_preinit_req(req, ctx);
fa05457a 2753 io_req_add_to_cache(req, ctx);
3ab665b7 2754 }
a33ae9ce
PB
2755 return true;
2756}
2757
2758static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
2759{
88ab95be 2760 if (unlikely(io_req_cache_empty(ctx)))
a33ae9ce
PB
2761 return __io_alloc_req_refill(ctx);
2762 return true;
2763}
2764
2765static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
2766{
2767 struct io_wq_work_node *node;
2768
2769 node = wq_stack_extract(&ctx->submit_state.free_list);
c2b6c6bc 2770 return container_of(node, struct io_kiocb, comp_list);
2b188cc1
JA
2771}
2772
e1d767f0 2773static inline void io_put_file(struct file *file)
8da11c19 2774{
e1d767f0 2775 if (file)
8da11c19
PB
2776 fput(file);
2777}
2778
6b639522 2779static inline void io_dismantle_req(struct io_kiocb *req)
2b188cc1 2780{
094bae49 2781 unsigned int flags = req->flags;
929a3af9 2782
867f8fa5 2783 if (unlikely(flags & IO_REQ_CLEAN_FLAGS))
3a0a6902 2784 io_clean_op(req);
e1d767f0
PB
2785 if (!(flags & REQ_F_FIXED_FILE))
2786 io_put_file(req->file);
e65ef56d
JA
2787}
2788
f5c6cf2a 2789static __cold void io_free_req(struct io_kiocb *req)
c6ca97b3 2790{
51a4cc11 2791 struct io_ring_ctx *ctx = req->ctx;
c6ca97b3 2792
7ac1edc4 2793 io_req_put_rsrc(req);
216578e5 2794 io_dismantle_req(req);
7c660731 2795 io_put_task(req->task, 1);
c6ca97b3 2796
79ebeaee 2797 spin_lock(&ctx->completion_lock);
c2b6c6bc 2798 wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
c34b025f 2799 ctx->locked_free_nr++;
79ebeaee 2800 spin_unlock(&ctx->completion_lock);
e65ef56d
JA
2801}
2802
f2f87370
PB
2803static inline void io_remove_next_linked(struct io_kiocb *req)
2804{
2805 struct io_kiocb *nxt = req->link;
2806
2807 req->link = nxt->link;
2808 nxt->link = NULL;
2809}
2810
81ec803b 2811static struct io_kiocb *io_disarm_linked_timeout(struct io_kiocb *req)
33cc89a9 2812 __must_hold(&req->ctx->completion_lock)
89b263f6 2813 __must_hold(&req->ctx->timeout_lock)
2665abfd 2814{
33cc89a9 2815 struct io_kiocb *link = req->link;
f2f87370 2816
b97e736a 2817 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
c9abd7ad 2818 struct io_timeout_data *io = link->async_data;
7c86ffee 2819
f2f87370 2820 io_remove_next_linked(req);
90cd7e42 2821 link->timeout.head = NULL;
fd9c7bc5 2822 if (hrtimer_try_to_cancel(&io->timer) != -1) {
ef9dd637 2823 list_del(&link->timeout.list);
81ec803b 2824 return link;
c9abd7ad
PB
2825 }
2826 }
81ec803b 2827 return NULL;
7c86ffee
PB
2828}
2829
d148ca4b 2830static void io_fail_links(struct io_kiocb *req)
33cc89a9 2831 __must_hold(&req->ctx->completion_lock)
9e645e11 2832{
33cc89a9 2833 struct io_kiocb *nxt, *link = req->link;
04c76b41 2834 bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES;
9e645e11 2835
f2f87370 2836 req->link = NULL;
f2f87370 2837 while (link) {
a8295b98
HX
2838 long res = -ECANCELED;
2839
2840 if (link->flags & REQ_F_FAIL)
cef216fc 2841 res = link->cqe.res;
a8295b98 2842
f2f87370
PB
2843 nxt = link->link;
2844 link->link = NULL;
2665abfd 2845
cef216fc 2846 trace_io_uring_fail_link(req->ctx, req, req->cqe.user_data,
502c87d6
SR
2847 req->opcode, link);
2848
4e118cd9
PB
2849 if (ignore_cqes)
2850 link->flags |= REQ_F_CQE_SKIP;
2851 else
04c76b41 2852 link->flags &= ~REQ_F_CQE_SKIP;
4e118cd9 2853 __io_req_complete_post(link, res, 0);
f2f87370 2854 link = nxt;
9e645e11 2855 }
33cc89a9 2856}
9e645e11 2857
33cc89a9
PB
2858static bool io_disarm_next(struct io_kiocb *req)
2859 __must_hold(&req->ctx->completion_lock)
2860{
81ec803b 2861 struct io_kiocb *link = NULL;
33cc89a9
PB
2862 bool posted = false;
2863
0756a869 2864 if (req->flags & REQ_F_ARM_LTIMEOUT) {
81ec803b 2865 link = req->link;
906c6caa 2866 req->flags &= ~REQ_F_ARM_LTIMEOUT;
0756a869
PB
2867 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
2868 io_remove_next_linked(req);
4e118cd9 2869 io_req_tw_post_queue(link, -ECANCELED, 0);
0756a869
PB
2870 posted = true;
2871 }
2872 } else if (req->flags & REQ_F_LINK_TIMEOUT) {
89b263f6
JA
2873 struct io_ring_ctx *ctx = req->ctx;
2874
2875 spin_lock_irq(&ctx->timeout_lock);
81ec803b 2876 link = io_disarm_linked_timeout(req);
89b263f6 2877 spin_unlock_irq(&ctx->timeout_lock);
81ec803b
PB
2878 if (link) {
2879 posted = true;
2880 io_req_tw_post_queue(link, -ECANCELED, 0);
2881 }
89b263f6 2882 }
93d2bcd2 2883 if (unlikely((req->flags & REQ_F_FAIL) &&
e4335ed3 2884 !(req->flags & REQ_F_HARDLINK))) {
33cc89a9
PB
2885 posted |= (req->link != NULL);
2886 io_fail_links(req);
2887 }
2888 return posted;
9e645e11
JA
2889}
2890
d81499bf
PB
2891static void __io_req_find_next_prep(struct io_kiocb *req)
2892{
2893 struct io_ring_ctx *ctx = req->ctx;
2894 bool posted;
2895
2896 spin_lock(&ctx->completion_lock);
2897 posted = io_disarm_next(req);
60053be8 2898 io_commit_cqring(ctx);
d81499bf
PB
2899 spin_unlock(&ctx->completion_lock);
2900 if (posted)
2901 io_cqring_ev_posted(ctx);
2902}
2903
2904static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
c69f8dbe 2905{
33cc89a9 2906 struct io_kiocb *nxt;
944e58bf 2907
9e645e11
JA
2908 /*
2909 * If LINK is set, we have dependent requests in this chain. If we
2910 * didn't fail this request, queue the first one up, moving any other
2911 * dependencies to the next request. In case of failure, fail the rest
2912 * of the chain.
2913 */
d81499bf
PB
2914 if (unlikely(req->flags & IO_DISARM_MASK))
2915 __io_req_find_next_prep(req);
33cc89a9
PB
2916 nxt = req->link;
2917 req->link = NULL;
2918 return nxt;
4d7dd462 2919}
9e645e11 2920
f237c30a 2921static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
2c32395d
PB
2922{
2923 if (!ctx)
2924 return;
ef060ea9
JA
2925 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
2926 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
f237c30a 2927 if (*locked) {
c450178d 2928 io_submit_flush_completions(ctx);
2c32395d 2929 mutex_unlock(&ctx->uring_lock);
f237c30a 2930 *locked = false;
2c32395d
PB
2931 }
2932 percpu_ref_put(&ctx->refs);
2933}
2934
f28c240e
HX
2935static inline void ctx_commit_and_unlock(struct io_ring_ctx *ctx)
2936{
2937 io_commit_cqring(ctx);
2938 spin_unlock(&ctx->completion_lock);
2939 io_cqring_ev_posted(ctx);
2940}
2941
2942static void handle_prev_tw_list(struct io_wq_work_node *node,
2943 struct io_ring_ctx **ctx, bool *uring_locked)
2944{
2945 if (*ctx && !*uring_locked)
2946 spin_lock(&(*ctx)->completion_lock);
2947
2948 do {
2949 struct io_wq_work_node *next = node->next;
2950 struct io_kiocb *req = container_of(node, struct io_kiocb,
2951 io_task_work.node);
2952
34d2bfe7
JA
2953 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
2954
f28c240e
HX
2955 if (req->ctx != *ctx) {
2956 if (unlikely(!*uring_locked && *ctx))
2957 ctx_commit_and_unlock(*ctx);
2958
2959 ctx_flush_and_put(*ctx, uring_locked);
2960 *ctx = req->ctx;
2961 /* if not contended, grab and improve batching */
2962 *uring_locked = mutex_trylock(&(*ctx)->uring_lock);
2963 percpu_ref_get(&(*ctx)->refs);
2964 if (unlikely(!*uring_locked))
2965 spin_lock(&(*ctx)->completion_lock);
2966 }
2967 if (likely(*uring_locked))
2968 req->io_task_work.func(req, uring_locked);
2969 else
cef216fc 2970 __io_req_complete_post(req, req->cqe.res,
cc3cec83 2971 io_put_kbuf_comp(req));
f28c240e
HX
2972 node = next;
2973 } while (node);
2974
2975 if (unlikely(!*uring_locked))
2976 ctx_commit_and_unlock(*ctx);
2977}
2978
2979static void handle_tw_list(struct io_wq_work_node *node,
2980 struct io_ring_ctx **ctx, bool *locked)
9f8d032a
HX
2981{
2982 do {
2983 struct io_wq_work_node *next = node->next;
2984 struct io_kiocb *req = container_of(node, struct io_kiocb,
2985 io_task_work.node);
2986
34d2bfe7
JA
2987 prefetch(container_of(next, struct io_kiocb, io_task_work.node));
2988
9f8d032a
HX
2989 if (req->ctx != *ctx) {
2990 ctx_flush_and_put(*ctx, locked);
2991 *ctx = req->ctx;
2992 /* if not contended, grab and improve batching */
2993 *locked = mutex_trylock(&(*ctx)->uring_lock);
2994 percpu_ref_get(&(*ctx)->refs);
2995 }
2996 req->io_task_work.func(req, locked);
2997 node = next;
2998 } while (node);
2999}
3000
7cbf1722 3001static void tctx_task_work(struct callback_head *cb)
c40f6379 3002{
f28c240e 3003 bool uring_locked = false;
ebd0df2e 3004 struct io_ring_ctx *ctx = NULL;
3f18407d
PB
3005 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
3006 task_work);
c40f6379 3007
16f72070 3008 while (1) {
f28c240e 3009 struct io_wq_work_node *node1, *node2;
3f18407d
PB
3010
3011 spin_lock_irq(&tctx->task_lock);
3fe07bcd 3012 node1 = tctx->prio_task_list.first;
f28c240e 3013 node2 = tctx->task_list.first;
3f18407d 3014 INIT_WQ_LIST(&tctx->task_list);
3fe07bcd 3015 INIT_WQ_LIST(&tctx->prio_task_list);
f28c240e 3016 if (!node2 && !node1)
6294f368 3017 tctx->task_running = false;
3f18407d 3018 spin_unlock_irq(&tctx->task_lock);
f28c240e 3019 if (!node2 && !node1)
6294f368 3020 break;
3f18407d 3021
f28c240e
HX
3022 if (node1)
3023 handle_prev_tw_list(node1, &ctx, &uring_locked);
f28c240e
HX
3024 if (node2)
3025 handle_tw_list(node2, &ctx, &uring_locked);
7cbf1722 3026 cond_resched();
68ca8fc0 3027
a6d97a8a 3028 if (data_race(!tctx->task_list.first) &&
3fe07bcd 3029 data_race(!tctx->prio_task_list.first) && uring_locked)
68ca8fc0 3030 io_submit_flush_completions(ctx);
3f18407d 3031 }
ebd0df2e 3032
f28c240e 3033 ctx_flush_and_put(ctx, &uring_locked);
3cc7fdb9
PB
3034
3035 /* relaxed read is enough as only the task itself sets ->in_idle */
3036 if (unlikely(atomic_read(&tctx->in_idle)))
3037 io_uring_drop_tctx_refs(current);
7cbf1722
JA
3038}
3039
3fe07bcd
JA
3040static void __io_req_task_work_add(struct io_kiocb *req,
3041 struct io_uring_task *tctx,
3042 struct io_wq_work_list *list)
7cbf1722 3043{
9f010507 3044 struct io_ring_ctx *ctx = req->ctx;
e09ee510 3045 struct io_wq_work_node *node;
0b81e80c 3046 unsigned long flags;
6294f368 3047 bool running;
7cbf1722 3048
0b81e80c 3049 spin_lock_irqsave(&tctx->task_lock, flags);
3fe07bcd 3050 wq_list_add_tail(&req->io_task_work.node, list);
6294f368
PB
3051 running = tctx->task_running;
3052 if (!running)
3053 tctx->task_running = true;
0b81e80c 3054 spin_unlock_irqrestore(&tctx->task_lock, flags);
7cbf1722
JA
3055
3056 /* task_work already pending, we're done */
6294f368 3057 if (running)
e09ee510 3058 return;
7cbf1722 3059
ef060ea9
JA
3060 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
3061 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
3062
3fe07bcd 3063 if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
e09ee510 3064 return;
2215bed9 3065
0b81e80c 3066 spin_lock_irqsave(&tctx->task_lock, flags);
6294f368 3067 tctx->task_running = false;
3fe07bcd 3068 node = wq_list_merge(&tctx->prio_task_list, &tctx->task_list);
0b81e80c 3069 spin_unlock_irqrestore(&tctx->task_lock, flags);
7cbf1722 3070
e09ee510
PB
3071 while (node) {
3072 req = container_of(node, struct io_kiocb, io_task_work.node);
3073 node = node->next;
3074 if (llist_add(&req->io_task_work.fallback_node,
3075 &req->ctx->fallback_llist))
3076 schedule_delayed_work(&req->ctx->fallback_work, 1);
3077 }
eab30c4d
PB
3078}
3079
3fe07bcd
JA
3080static void io_req_task_work_add(struct io_kiocb *req)
3081{
3082 struct io_uring_task *tctx = req->task->io_uring;
3083
3084 __io_req_task_work_add(req, tctx, &tctx->task_list);
3085}
3086
3087static void io_req_task_prio_work_add(struct io_kiocb *req)
3088{
3089 struct io_uring_task *tctx = req->task->io_uring;
3090
3091 if (req->ctx->flags & IORING_SETUP_SQPOLL)
3092 __io_req_task_work_add(req, tctx, &tctx->prio_task_list);
3093 else
3094 __io_req_task_work_add(req, tctx, &tctx->task_list);
3095}
3096
4e118cd9 3097static void io_req_tw_post(struct io_kiocb *req, bool *locked)
c40f6379 3098{
4e118cd9
PB
3099 io_req_complete_post(req, req->cqe.res, req->cqe.flags);
3100}
c40f6379 3101
4e118cd9
PB
3102static void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags)
3103{
3104 req->cqe.res = res;
3105 req->cqe.flags = cflags;
3106 req->io_task_work.func = io_req_tw_post;
3fe07bcd 3107 io_req_task_work_add(req);
4e118cd9
PB
3108}
3109
f237c30a 3110static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
c40f6379 3111{
b18a1a45 3112 /* not needed for normal modes, but SQPOLL depends on it */
971cf9c1 3113 io_tw_lock(req->ctx, locked);
cef216fc 3114 io_req_complete_failed(req, req->cqe.res);
c40f6379
JA
3115}
3116
f237c30a 3117static void io_req_task_submit(struct io_kiocb *req, bool *locked)
c40f6379 3118{
971cf9c1 3119 io_tw_lock(req->ctx, locked);
316319e8 3120 /* req->task == current here, checking PF_EXITING is safe */
af066f31 3121 if (likely(!(req->task->flags & PF_EXITING)))
cbc2e203 3122 io_queue_sqe(req);
81b6d05c 3123 else
2593553a 3124 io_req_complete_failed(req, -EFAULT);
c40f6379
JA
3125}
3126
2c4b8eb6 3127static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
c40f6379 3128{
cef216fc 3129 req->cqe.res = ret;
5b0a6acc 3130 req->io_task_work.func = io_req_task_cancel;
3fe07bcd 3131 io_req_task_work_add(req);
c40f6379
JA
3132}
3133
2c4b8eb6 3134static void io_req_task_queue(struct io_kiocb *req)
a3df7698 3135{
5b0a6acc 3136 req->io_task_work.func = io_req_task_submit;
3fe07bcd 3137 io_req_task_work_add(req);
a3df7698
PB
3138}
3139
773af691
JA
3140static void io_req_task_queue_reissue(struct io_kiocb *req)
3141{
77955efb 3142 req->io_task_work.func = io_queue_iowq;
3fe07bcd 3143 io_req_task_work_add(req);
773af691
JA
3144}
3145
57859f4d 3146static void io_queue_next(struct io_kiocb *req)
c69f8dbe 3147{
9b5f7bd9 3148 struct io_kiocb *nxt = io_req_find_next(req);
944e58bf
PB
3149
3150 if (nxt)
906a8c3f 3151 io_req_task_queue(nxt);
c69f8dbe
JL
3152}
3153
3aa83bfb 3154static void io_free_batch_list(struct io_ring_ctx *ctx,
1cce17ac 3155 struct io_wq_work_node *node)
3aa83bfb 3156 __must_hold(&ctx->uring_lock)
5af1d13e 3157{
d4b7a5ef 3158 struct task_struct *task = NULL;
37f0e767 3159 int task_refs = 0;
5af1d13e 3160
3aa83bfb
PB
3161 do {
3162 struct io_kiocb *req = container_of(node, struct io_kiocb,
3163 comp_list);
2d6500d4 3164
a538be5b
PB
3165 if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
3166 if (req->flags & REQ_F_REFCOUNT) {
3167 node = req->comp_list.next;
3168 if (!req_ref_put_and_test(req))
3169 continue;
3170 }
b605a7fa
PB
3171 if ((req->flags & REQ_F_POLLED) && req->apoll) {
3172 struct async_poll *apoll = req->apoll;
3173
3174 if (apoll->double_poll)
3175 kfree(apoll->double_poll);
3176 list_add(&apoll->poll.wait.entry,
3177 &ctx->apoll_cache);
3178 req->flags &= ~REQ_F_POLLED;
3179 }
da1a08c5 3180 if (req->flags & IO_REQ_LINK_FLAGS)
57859f4d 3181 io_queue_next(req);
a538be5b
PB
3182 if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
3183 io_clean_op(req);
c1e53a69 3184 }
a538be5b
PB
3185 if (!(req->flags & REQ_F_FIXED_FILE))
3186 io_put_file(req->file);
2d6500d4 3187
ab409402 3188 io_req_put_rsrc_locked(req, ctx);
5af1d13e 3189
d4b7a5ef
PB
3190 if (req->task != task) {
3191 if (task)
3192 io_put_task(task, task_refs);
3193 task = req->task;
3194 task_refs = 0;
3195 }
3196 task_refs++;
c1e53a69 3197 node = req->comp_list.next;
fa05457a 3198 io_req_add_to_cache(req, ctx);
3aa83bfb 3199 } while (node);
d4b7a5ef 3200
d4b7a5ef
PB
3201 if (task)
3202 io_put_task(task, task_refs);
7a743e22
PB
3203}
3204
c450178d 3205static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
a141dd89 3206 __must_hold(&ctx->uring_lock)
905c172f 3207{
6f33b0bc 3208 struct io_wq_work_node *node, *prev;
cd0ca2e0 3209 struct io_submit_state *state = &ctx->submit_state;
905c172f 3210
3d4aeb9f
PB
3211 if (state->flush_cqes) {
3212 spin_lock(&ctx->completion_lock);
3213 wq_list_for_each(node, prev, &state->compl_reqs) {
3214 struct io_kiocb *req = container_of(node, struct io_kiocb,
6f33b0bc 3215 comp_list);
5182ed2e 3216
0e2e5c47
SR
3217 if (!(req->flags & REQ_F_CQE_SKIP)) {
3218 if (!(ctx->flags & IORING_SETUP_CQE32))
3219 __io_fill_cqe_req_filled(ctx, req);
3220 else
3221 __io_fill_cqe32_req_filled(ctx, req);
3222 }
3d4aeb9f
PB
3223 }
3224
3225 io_commit_cqring(ctx);
3226 spin_unlock(&ctx->completion_lock);
3227 io_cqring_ev_posted(ctx);
3228 state->flush_cqes = false;
905c172f 3229 }
5182ed2e 3230
1cce17ac 3231 io_free_batch_list(ctx, state->compl_reqs.first);
6f33b0bc 3232 INIT_WQ_LIST(&state->compl_reqs);
7a743e22
PB
3233}
3234
ba816ad6
JA
3235/*
3236 * Drop reference to request, return next in chain (if there is one) if this
3237 * was the last reference to this request.
3238 */
0d85035a 3239static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
e65ef56d 3240{
9b5f7bd9
PB
3241 struct io_kiocb *nxt = NULL;
3242
de9b4cca 3243 if (req_ref_put_and_test(req)) {
da1a08c5 3244 if (unlikely(req->flags & IO_REQ_LINK_FLAGS))
7819a1f6 3245 nxt = io_req_find_next(req);
f5c6cf2a 3246 io_free_req(req);
2a44f467 3247 }
9b5f7bd9 3248 return nxt;
2b188cc1
JA
3249}
3250
0d85035a 3251static inline void io_put_req(struct io_kiocb *req)
e65ef56d 3252{
f5c6cf2a
PB
3253 if (req_ref_put_and_test(req)) {
3254 io_queue_next(req);
e65ef56d 3255 io_free_req(req);
543af3a1 3256 }
216578e5
PB
3257}
3258
6c503150 3259static unsigned io_cqring_events(struct io_ring_ctx *ctx)
a3a0e43f
JA
3260{
3261 /* See comment at the top of this file */
3262 smp_rmb();
e23de15f 3263 return __io_cqring_events(ctx);
a3a0e43f
JA
3264}
3265
fb5ccc98
PB
3266static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
3267{
3268 struct io_rings *rings = ctx->rings;
3269
3270 /* make sure SQ entry isn't read before tail */
3271 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
3272}
3273
4c6e277c
JA
3274static inline bool io_run_task_work(void)
3275{
7f62d40d 3276 if (test_thread_flag(TIF_NOTIFY_SIGNAL) || task_work_pending(current)) {
4c6e277c 3277 __set_current_state(TASK_RUNNING);
7c5d8fa6
EB
3278 clear_notify_signal();
3279 if (task_work_pending(current))
3280 task_work_run();
4c6e277c
JA
3281 return true;
3282 }
3283
3284 return false;
bcda7baa
JA
3285}
3286
5ba3c874 3287static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
def596e9 3288{
5eef4e87 3289 struct io_wq_work_node *pos, *start, *prev;
d729cf9a 3290 unsigned int poll_flags = BLK_POLL_NOSLEEP;
b688f11e 3291 DEFINE_IO_COMP_BATCH(iob);
5ba3c874 3292 int nr_events = 0;
def596e9
JA
3293
3294 /*
3295 * Only spin for completions if we don't have multiple devices hanging
87a115fb 3296 * off our complete list.
def596e9 3297 */
87a115fb 3298 if (ctx->poll_multi_queue || force_nonspin)
ef99b2d3 3299 poll_flags |= BLK_POLL_ONESHOT;
def596e9 3300
5eef4e87
PB
3301 wq_list_for_each(pos, start, &ctx->iopoll_list) {
3302 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
9adbd45d 3303 struct kiocb *kiocb = &req->rw.kiocb;
a2416e1e 3304 int ret;
def596e9
JA
3305
3306 /*
581f9810
BM
3307 * Move completed and retryable entries to our local lists.
3308 * If we find a request that requires polling, break out
3309 * and complete those lists first, if we have entries there.
def596e9 3310 */
e3f721e6 3311 if (READ_ONCE(req->iopoll_completed))
def596e9
JA
3312 break;
3313
b688f11e 3314 ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags);
a2416e1e
PB
3315 if (unlikely(ret < 0))
3316 return ret;
3317 else if (ret)
ef99b2d3 3318 poll_flags |= BLK_POLL_ONESHOT;
def596e9 3319
3aadc23e 3320 /* iopoll may have completed current req */
b688f11e
JA
3321 if (!rq_list_empty(iob.req_list) ||
3322 READ_ONCE(req->iopoll_completed))
e3f721e6 3323 break;
def596e9
JA
3324 }
3325
b688f11e
JA
3326 if (!rq_list_empty(iob.req_list))
3327 iob.complete(&iob);
5eef4e87
PB
3328 else if (!pos)
3329 return 0;
def596e9 3330
5eef4e87
PB
3331 prev = start;
3332 wq_list_for_each_resume(pos, prev) {
3333 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
3334
b3fa03fd
PB
3335 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
3336 if (!smp_load_acquire(&req->iopoll_completed))
e3f721e6 3337 break;
c0713540 3338 nr_events++;
83a13a41
PB
3339 if (unlikely(req->flags & REQ_F_CQE_SKIP))
3340 continue;
cef216fc 3341 __io_fill_cqe_req(req, req->cqe.res, io_put_kbuf(req, 0));
e3f721e6 3342 }
def596e9 3343
f5ed3bcd
PB
3344 if (unlikely(!nr_events))
3345 return 0;
3346
3347 io_commit_cqring(ctx);
3348 io_cqring_ev_posted_iopoll(ctx);
1cce17ac 3349 pos = start ? start->next : ctx->iopoll_list.first;
5eef4e87 3350 wq_list_cut(&ctx->iopoll_list, prev, start);
1cce17ac 3351 io_free_batch_list(ctx, pos);
5ba3c874 3352 return nr_events;
def596e9
JA
3353}
3354
def596e9
JA
3355/*
3356 * We can't just wait for polled events to come to us, we have to actively
3357 * find and complete them.
3358 */
c072481d 3359static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
def596e9
JA
3360{
3361 if (!(ctx->flags & IORING_SETUP_IOPOLL))
3362 return;
3363
3364 mutex_lock(&ctx->uring_lock);
5eef4e87 3365 while (!wq_list_empty(&ctx->iopoll_list)) {
b2edc0a7 3366 /* let it sleep and repeat later if can't complete a request */
5ba3c874 3367 if (io_do_iopoll(ctx, true) == 0)
b2edc0a7 3368 break;
08f5439f
JA
3369 /*
3370 * Ensure we allow local-to-the-cpu processing to take place,
3371 * in this case we need to ensure that we reap all events.
3fcee5a6 3372 * Also let task_work, etc. to progress by releasing the mutex
08f5439f 3373 */
3fcee5a6
PB
3374 if (need_resched()) {
3375 mutex_unlock(&ctx->uring_lock);
3376 cond_resched();
3377 mutex_lock(&ctx->uring_lock);
3378 }
def596e9
JA
3379 }
3380 mutex_unlock(&ctx->uring_lock);
3381}
3382
7668b92a 3383static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
def596e9 3384{
7668b92a 3385 unsigned int nr_events = 0;
e9979b36 3386 int ret = 0;
155bc950 3387 unsigned long check_cq;
500f9fba 3388
f39c8a5b
PB
3389 /*
3390 * Don't enter poll loop if we already have events pending.
3391 * If we do, we can potentially be spinning for commands that
3392 * already triggered a CQE (eg in error).
3393 */
155bc950
DY
3394 check_cq = READ_ONCE(ctx->check_cq);
3395 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
f39c8a5b
PB
3396 __io_cqring_overflow_flush(ctx, false);
3397 if (io_cqring_events(ctx))
d487b43c 3398 return 0;
155bc950
DY
3399
3400 /*
3401 * Similarly do not spin if we have not informed the user of any
3402 * dropped CQE.
3403 */
3404 if (unlikely(check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)))
3405 return -EBADR;
3406
def596e9 3407 do {
500f9fba
JA
3408 /*
3409 * If a submit got punted to a workqueue, we can have the
3410 * application entering polling for a command before it gets
3411 * issued. That app will hold the uring_lock for the duration
3412 * of the poll right here, so we need to take a breather every
3413 * now and then to ensure that the issue has a chance to add
3414 * the poll to the issued list. Otherwise we can spin here
3415 * forever, while the workqueue is stuck trying to acquire the
3416 * very same mutex.
3417 */
5eef4e87 3418 if (wq_list_empty(&ctx->iopoll_list)) {
8f487ef2
PB
3419 u32 tail = ctx->cached_cq_tail;
3420
500f9fba 3421 mutex_unlock(&ctx->uring_lock);
4c6e277c 3422 io_run_task_work();
500f9fba 3423 mutex_lock(&ctx->uring_lock);
def596e9 3424
8f487ef2
PB
3425 /* some requests don't go through iopoll_list */
3426 if (tail != ctx->cached_cq_tail ||
5eef4e87 3427 wq_list_empty(&ctx->iopoll_list))
e9979b36 3428 break;
500f9fba 3429 }
5ba3c874
PB
3430 ret = io_do_iopoll(ctx, !min);
3431 if (ret < 0)
3432 break;
3433 nr_events += ret;
3434 ret = 0;
3435 } while (nr_events < min && !need_resched());
d487b43c 3436
def596e9
JA
3437 return ret;
3438}
3439
491381ce 3440static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 3441{
491381ce
JA
3442 /*
3443 * Tell lockdep we inherited freeze protection from submission
3444 * thread.
3445 */
3446 if (req->flags & REQ_F_ISREG) {
1c98679d 3447 struct super_block *sb = file_inode(req->file)->i_sb;
2b188cc1 3448
1c98679d
PB
3449 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
3450 sb_end_write(sb);
2b188cc1
JA
3451 }
3452}
3453
b63534c4 3454#ifdef CONFIG_BLOCK
dc2a6e9a 3455static bool io_resubmit_prep(struct io_kiocb *req)
b63534c4 3456{
ab454438 3457 struct io_async_rw *rw = req->async_data;
b63534c4 3458
d886e185 3459 if (!req_has_async_data(req))
ab454438 3460 return !io_req_prep_async(req);
538941e2 3461 iov_iter_restore(&rw->s.iter, &rw->s.iter_state);
ab454438 3462 return true;
b63534c4 3463}
b63534c4 3464
3e6a0d3c 3465static bool io_rw_should_reissue(struct io_kiocb *req)
b63534c4 3466{
355afaeb 3467 umode_t mode = file_inode(req->file)->i_mode;
3e6a0d3c 3468 struct io_ring_ctx *ctx = req->ctx;
b63534c4 3469
355afaeb
JA
3470 if (!S_ISBLK(mode) && !S_ISREG(mode))
3471 return false;
3e6a0d3c
JA
3472 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
3473 !(ctx->flags & IORING_SETUP_IOPOLL)))
b63534c4 3474 return false;
7c977a58
JA
3475 /*
3476 * If ref is dying, we might be running poll reap from the exit work.
3477 * Don't attempt to reissue from that path, just let it fail with
3478 * -EAGAIN.
3479 */
3e6a0d3c
JA
3480 if (percpu_ref_is_dying(&ctx->refs))
3481 return false;
ef046888
JA
3482 /*
3483 * Play it safe and assume not safe to re-import and reissue if we're
3484 * not in the original thread group (or in task context).
3485 */
3486 if (!same_thread_group(req->task, current) || !in_task())
3487 return false;
3e6a0d3c
JA
3488 return true;
3489}
e82ad485 3490#else
a1ff1e3f 3491static bool io_resubmit_prep(struct io_kiocb *req)
e82ad485
JA
3492{
3493 return false;
3494}
e82ad485 3495static bool io_rw_should_reissue(struct io_kiocb *req)
3e6a0d3c 3496{
b63534c4
JA
3497 return false;
3498}
3e6a0d3c 3499#endif
b63534c4 3500
8ef12efe 3501static bool __io_complete_rw_common(struct io_kiocb *req, long res)
a1d7c393 3502{
f63cf519 3503 if (req->rw.kiocb.ki_flags & IOCB_WRITE) {
b65c128f 3504 kiocb_end_write(req);
f63cf519
JA
3505 fsnotify_modify(req->file);
3506 } else {
3507 fsnotify_access(req->file);
3508 }
cef216fc 3509 if (unlikely(res != req->cqe.res)) {
9532b99b
PB
3510 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
3511 io_rw_should_reissue(req)) {
3512 req->flags |= REQ_F_REISSUE;
8ef12efe 3513 return true;
9532b99b 3514 }
93d2bcd2 3515 req_set_fail(req);
cef216fc 3516 req->cqe.res = res;
9532b99b 3517 }
8ef12efe
JA
3518 return false;
3519}
3520
cc8e9ba7 3521static inline void io_req_task_complete(struct io_kiocb *req, bool *locked)
8ef12efe 3522{
cef216fc 3523 int res = req->cqe.res;
126180b9
PB
3524
3525 if (*locked) {
cc3cec83 3526 io_req_complete_state(req, res, io_put_kbuf(req, 0));
fff4e40e 3527 io_req_add_compl_list(req);
126180b9 3528 } else {
cc3cec83
JA
3529 io_req_complete_post(req, res,
3530 io_put_kbuf(req, IO_URING_F_UNLOCKED));
126180b9 3531 }
8ef12efe
JA
3532}
3533
00f6e68b 3534static void __io_complete_rw(struct io_kiocb *req, long res,
8ef12efe
JA
3535 unsigned int issue_flags)
3536{
3537 if (__io_complete_rw_common(req, res))
3538 return;
cef216fc 3539 __io_req_complete(req, issue_flags, req->cqe.res,
cc3cec83 3540 io_put_kbuf(req, issue_flags));
ba816ad6
JA
3541}
3542
6b19b766 3543static void io_complete_rw(struct kiocb *kiocb, long res)
ba816ad6 3544{
9adbd45d 3545 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6 3546
8ef12efe
JA
3547 if (__io_complete_rw_common(req, res))
3548 return;
cef216fc 3549 req->cqe.res = res;
8ef12efe 3550 req->io_task_work.func = io_req_task_complete;
3fe07bcd 3551 io_req_task_prio_work_add(req);
2b188cc1
JA
3552}
3553
6b19b766 3554static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
def596e9 3555{
9adbd45d 3556 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 3557
491381ce
JA
3558 if (kiocb->ki_flags & IOCB_WRITE)
3559 kiocb_end_write(req);
cef216fc 3560 if (unlikely(res != req->cqe.res)) {
b66ceaf3
PB
3561 if (res == -EAGAIN && io_rw_should_reissue(req)) {
3562 req->flags |= REQ_F_REISSUE;
3563 return;
9532b99b 3564 }
cef216fc 3565 req->cqe.res = res;
8c130827 3566 }
bbde017a 3567
b3fa03fd
PB
3568 /* order with io_iopoll_complete() checking ->iopoll_completed */
3569 smp_store_release(&req->iopoll_completed, 1);
def596e9
JA
3570}
3571
3572/*
3573 * After the iocb has been issued, it's safe to be found on the poll list.
3574 * Adding the kiocb to the list AFTER submission ensures that we don't
f39c8a5b 3575 * find it from a io_do_iopoll() thread before the issuer is done
def596e9
JA
3576 * accessing the kiocb cookie.
3577 */
9882131c 3578static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
def596e9
JA
3579{
3580 struct io_ring_ctx *ctx = req->ctx;
3b44b371 3581 const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
cb3d8972
PB
3582
3583 /* workqueue context doesn't hold uring_lock, grab it now */
3b44b371 3584 if (unlikely(needs_lock))
cb3d8972 3585 mutex_lock(&ctx->uring_lock);
def596e9
JA
3586
3587 /*
3588 * Track whether we have multiple files in our lists. This will impact
3589 * how we do polling eventually, not spinning if we're on potentially
3590 * different devices.
3591 */
5eef4e87 3592 if (wq_list_empty(&ctx->iopoll_list)) {
915b3dde
HX
3593 ctx->poll_multi_queue = false;
3594 } else if (!ctx->poll_multi_queue) {
def596e9
JA
3595 struct io_kiocb *list_req;
3596
5eef4e87
PB
3597 list_req = container_of(ctx->iopoll_list.first, struct io_kiocb,
3598 comp_list);
30da1b45 3599 if (list_req->file != req->file)
915b3dde 3600 ctx->poll_multi_queue = true;
def596e9
JA
3601 }
3602
3603 /*
3604 * For fast devices, IO may have already completed. If it has, add
3605 * it to the front so we find it first.
3606 */
65a6543d 3607 if (READ_ONCE(req->iopoll_completed))
5eef4e87 3608 wq_list_add_head(&req->comp_list, &ctx->iopoll_list);
def596e9 3609 else
5eef4e87 3610 wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
bdcd3eab 3611
3b44b371 3612 if (unlikely(needs_lock)) {
cb3d8972
PB
3613 /*
3614 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
3615 * in sq thread task context or in io worker task context. If
3616 * current task context is sq thread, we don't need to check
3617 * whether should wake up sq thread.
3618 */
3619 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
3620 wq_has_sleeper(&ctx->sq_data->wait))
3621 wake_up(&ctx->sq_data->wait);
3622
3623 mutex_unlock(&ctx->uring_lock);
3624 }
def596e9
JA
3625}
3626
4503b767
JA
3627static bool io_bdev_nowait(struct block_device *bdev)
3628{
9ba0d0c8 3629 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
4503b767
JA
3630}
3631
2b188cc1
JA
3632/*
3633 * If we tracked the file through the SCM inflight mechanism, we could support
3634 * any file. For now, just ensure that anything potentially problematic is done
3635 * inline.
3636 */
88459b50 3637static bool __io_file_supports_nowait(struct file *file, umode_t mode)
2b188cc1 3638{
4503b767 3639 if (S_ISBLK(mode)) {
4e7b5671
CH
3640 if (IS_ENABLED(CONFIG_BLOCK) &&
3641 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
4503b767
JA
3642 return true;
3643 return false;
3644 }
976517f1 3645 if (S_ISSOCK(mode))
2b188cc1 3646 return true;
4503b767 3647 if (S_ISREG(mode)) {
4e7b5671
CH
3648 if (IS_ENABLED(CONFIG_BLOCK) &&
3649 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
4503b767
JA
3650 file->f_op != &io_uring_fops)
3651 return true;
3652 return false;
3653 }
2b188cc1 3654
c5b85625
JA
3655 /* any ->read/write should understand O_NONBLOCK */
3656 if (file->f_flags & O_NONBLOCK)
3657 return true;
35645ac3 3658 return file->f_mode & FMODE_NOWAIT;
2b188cc1 3659}
c5b85625 3660
88459b50
PB
3661/*
3662 * If we tracked the file through the SCM inflight mechanism, we could support
3663 * any file. For now, just ensure that anything potentially problematic is done
3664 * inline.
3665 */
3666static unsigned int io_file_get_flags(struct file *file)
3667{
3668 umode_t mode = file_inode(file)->i_mode;
3669 unsigned int res = 0;
af197f50 3670
88459b50
PB
3671 if (S_ISREG(mode))
3672 res |= FFS_ISREG;
3673 if (__io_file_supports_nowait(file, mode))
3674 res |= FFS_NOWAIT;
5e45690a
JA
3675 if (io_file_need_scm(file))
3676 res |= FFS_SCM;
88459b50 3677 return res;
2b188cc1
JA
3678}
3679
35645ac3 3680static inline bool io_file_supports_nowait(struct io_kiocb *req)
7b29f92d 3681{
88459b50 3682 return req->flags & REQ_F_SUPPORT_NOWAIT;
7b29f92d
JA
3683}
3684
b9a6b8f9 3685static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2b188cc1 3686{
9adbd45d 3687 struct kiocb *kiocb = &req->rw.kiocb;
09bb8394
JA
3688 unsigned ioprio;
3689 int ret;
2b188cc1 3690
2b188cc1 3691 kiocb->ki_pos = READ_ONCE(sqe->off);
05b538c1
PB
3692 /* used for fixed read/write too - just read unconditionally */
3693 req->buf_index = READ_ONCE(sqe->buf_index);
3694
3695 if (req->opcode == IORING_OP_READ_FIXED ||
3696 req->opcode == IORING_OP_WRITE_FIXED) {
3697 struct io_ring_ctx *ctx = req->ctx;
3698 u16 index;
3699
3700 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
3701 return -EFAULT;
3702 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
3703 req->imu = ctx->user_bufs[index];
3704 io_req_set_rsrc_node(req, ctx, 0);
3705 }
9adbd45d 3706
fb27274a
PB
3707 ioprio = READ_ONCE(sqe->ioprio);
3708 if (ioprio) {
3709 ret = ioprio_check_cap(ioprio);
3710 if (ret)
3711 return ret;
3712
3713 kiocb->ki_ioprio = ioprio;
3714 } else {
3715 kiocb->ki_ioprio = get_current_ioprio();
eae071c9
PB
3716 }
3717
3529d8c2
JA
3718 req->rw.addr = READ_ONCE(sqe->addr);
3719 req->rw.len = READ_ONCE(sqe->len);
584b0180 3720 req->rw.flags = READ_ONCE(sqe->rw_flags);
2b188cc1 3721 return 0;
2b188cc1
JA
3722}
3723
3724static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
3725{
3726 switch (ret) {
3727 case -EIOCBQUEUED:
3728 break;
3729 case -ERESTARTSYS:
3730 case -ERESTARTNOINTR:
3731 case -ERESTARTNOHAND:
3732 case -ERESTART_RESTARTBLOCK:
3733 /*
3734 * We can't just restart the syscall, since previously
3735 * submitted sqes may already be in progress. Just fail this
3736 * IO with EINTR.
3737 */
3738 ret = -EINTR;
df561f66 3739 fallthrough;
2b188cc1 3740 default:
6b19b766 3741 kiocb->ki_complete(kiocb, ret);
2b188cc1
JA
3742 }
3743}
3744
b4aec400 3745static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
d34e1e5b
DY
3746{
3747 struct kiocb *kiocb = &req->rw.kiocb;
3748
6f83ab22
JA
3749 if (kiocb->ki_pos != -1)
3750 return &kiocb->ki_pos;
3751
3752 if (!(req->file->f_mode & FMODE_STREAM)) {
3753 req->flags |= REQ_F_CUR_POS;
3754 kiocb->ki_pos = req->file->f_pos;
3755 return &kiocb->ki_pos;
d34e1e5b 3756 }
6f83ab22
JA
3757
3758 kiocb->ki_pos = 0;
3759 return NULL;
d34e1e5b
DY
3760}
3761
2ea537ca 3762static void kiocb_done(struct io_kiocb *req, ssize_t ret,
889fca73 3763 unsigned int issue_flags)
ba816ad6 3764{
e8c2bc1f 3765 struct io_async_rw *io = req->async_data;
ba04291e 3766
227c0c96 3767 /* add previously done IO, if any */
d886e185 3768 if (req_has_async_data(req) && io->bytes_done > 0) {
227c0c96 3769 if (ret < 0)
e8c2bc1f 3770 ret = io->bytes_done;
227c0c96 3771 else
e8c2bc1f 3772 ret += io->bytes_done;
227c0c96
JA
3773 }
3774
ba04291e 3775 if (req->flags & REQ_F_CUR_POS)
2ea537ca
PB
3776 req->file->f_pos = req->rw.kiocb.ki_pos;
3777 if (ret >= 0 && (req->rw.kiocb.ki_complete == io_complete_rw))
00f6e68b 3778 __io_complete_rw(req, ret, issue_flags);
ba816ad6 3779 else
2ea537ca 3780 io_rw_done(&req->rw.kiocb, ret);
97284637 3781
b66ceaf3 3782 if (req->flags & REQ_F_REISSUE) {
97284637 3783 req->flags &= ~REQ_F_REISSUE;
b91ef187 3784 if (io_resubmit_prep(req))
773af691 3785 io_req_task_queue_reissue(req);
b91ef187
PB
3786 else
3787 io_req_task_queue_fail(req, ret);
97284637 3788 }
ba816ad6
JA
3789}
3790
eae071c9
PB
3791static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
3792 struct io_mapped_ubuf *imu)
edafccee 3793{
9adbd45d 3794 size_t len = req->rw.len;
75769e3f 3795 u64 buf_end, buf_addr = req->rw.addr;
edafccee 3796 size_t offset;
edafccee 3797
75769e3f 3798 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
edafccee
JA
3799 return -EFAULT;
3800 /* not inside the mapped region */
4751f53d 3801 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
edafccee
JA
3802 return -EFAULT;
3803
3804 /*
3805 * May not be a start of buffer, set size appropriately
3806 * and advance us to the beginning.
3807 */
3808 offset = buf_addr - imu->ubuf;
3809 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
3810
3811 if (offset) {
3812 /*
3813 * Don't use iov_iter_advance() here, as it's really slow for
3814 * using the latter parts of a big fixed buffer - it iterates
3815 * over each segment manually. We can cheat a bit here, because
3816 * we know that:
3817 *
3818 * 1) it's a BVEC iter, we set it up
3819 * 2) all bvecs are PAGE_SIZE in size, except potentially the
3820 * first and last bvec
3821 *
3822 * So just find our index, and adjust the iterator afterwards.
3823 * If the offset is within the first bvec (or the whole first
3824 * bvec, just use iov_iter_advance(). This makes it easier
3825 * since we can just skip the first segment, which may not
3826 * be PAGE_SIZE aligned.
3827 */
3828 const struct bio_vec *bvec = imu->bvec;
3829
3830 if (offset <= bvec->bv_len) {
3831 iov_iter_advance(iter, offset);
3832 } else {
3833 unsigned long seg_skip;
3834
3835 /* skip first vec */
3836 offset -= bvec->bv_len;
3837 seg_skip = 1 + (offset >> PAGE_SHIFT);
3838
3839 iter->bvec = bvec + seg_skip;
3840 iter->nr_segs -= seg_skip;
99c79f66 3841 iter->count -= bvec->bv_len + offset;
bd11b3a3 3842 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
3843 }
3844 }
3845
847595de 3846 return 0;
edafccee
JA
3847}
3848
5106dd6e
JA
3849static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
3850 unsigned int issue_flags)
eae071c9 3851{
05b538c1
PB
3852 if (WARN_ON_ONCE(!req->imu))
3853 return -EFAULT;
3854 return __io_import_fixed(req, rw, iter, req->imu);
eae071c9
PB
3855}
3856
9cfc7e94
JA
3857static int io_buffer_add_list(struct io_ring_ctx *ctx,
3858 struct io_buffer_list *bl, unsigned int bgid)
bcda7baa 3859{
dbc7d452 3860 bl->bgid = bgid;
9cfc7e94
JA
3861 if (bgid < BGID_ARRAY)
3862 return 0;
3863
3864 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
dbc7d452
JA
3865}
3866
149c69b0 3867static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
984824db 3868 struct io_buffer_list *bl)
bcda7baa 3869{
e7637a49
JA
3870 if (!list_empty(&bl->buf_list)) {
3871 struct io_buffer *kbuf;
bcda7baa 3872
dbc7d452
JA
3873 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
3874 list_del(&kbuf->list);
bcda7baa
JA
3875 if (*len > kbuf->len)
3876 *len = kbuf->len;
30d51dd4
PB
3877 req->flags |= REQ_F_BUFFER_SELECTED;
3878 req->kbuf = kbuf;
e7637a49 3879 req->buf_index = kbuf->bid;
984824db 3880 return u64_to_user_ptr(kbuf->addr);
e7637a49 3881 }
984824db 3882 return NULL;
149c69b0
JA
3883}
3884
c7fb1942
JA
3885static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
3886 struct io_buffer_list *bl,
3887 unsigned int issue_flags)
3888{
3889 struct io_uring_buf_ring *br = bl->buf_ring;
3890 struct io_uring_buf *buf;
c6e9fa5c 3891 __u16 head = bl->head;
c7fb1942 3892
fc9375e3 3893 if (unlikely(smp_load_acquire(&br->tail) == head))
984824db 3894 return NULL;
c7fb1942
JA
3895
3896 head &= bl->mask;
3897 if (head < IO_BUFFER_LIST_BUF_PER_PAGE) {
3898 buf = &br->bufs[head];
bcda7baa 3899 } else {
c7fb1942 3900 int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1);
97da4a53 3901 int index = head / IO_BUFFER_LIST_BUF_PER_PAGE;
c7fb1942
JA
3902 buf = page_address(bl->buf_pages[index]);
3903 buf += off;
bcda7baa 3904 }
c7fb1942
JA
3905 if (*len > buf->len)
3906 *len = buf->len;
3907 req->flags |= REQ_F_BUFFER_RING;
3908 req->buf_list = bl;
3909 req->buf_index = buf->bid;
bcda7baa 3910
984824db
CH
3911 if (issue_flags & IO_URING_F_UNLOCKED) {
3912 /*
3913 * If we came in unlocked, we have no choice but to consume the
3914 * buffer here. This does mean it'll be pinned until the IO
3915 * completes. But coming in unlocked means we're in io-wq
3916 * context, hence there should be no further retry. For the
3917 * locked case, the caller must ensure to call the commit when
3918 * the transfer completes (or if we get -EAGAIN and must poll
3919 * or retry).
3920 */
3921 req->buf_list = NULL;
3922 bl->head++;
3923 }
c7fb1942 3924 return u64_to_user_ptr(buf->addr);
bcda7baa
JA
3925}
3926
c54d52c2 3927static void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
4e906702 3928 unsigned int issue_flags)
4d954c25 3929{
dbc7d452
JA
3930 struct io_ring_ctx *ctx = req->ctx;
3931 struct io_buffer_list *bl;
984824db 3932 void __user *ret = NULL;
bcda7baa 3933
f8929630 3934 io_ring_submit_lock(req->ctx, issue_flags);
4d954c25 3935
4e906702 3936 bl = io_buffer_get_list(ctx, req->buf_index);
984824db
CH
3937 if (likely(bl)) {
3938 if (bl->buf_nr_pages)
3939 ret = io_ring_buffer_select(req, len, bl, issue_flags);
3940 else
3941 ret = io_provided_buffer_select(req, len, bl);
bcda7baa 3942 }
984824db
CH
3943 io_ring_submit_unlock(req->ctx, issue_flags);
3944 return ret;
4d954c25
JA
3945}
3946
3947#ifdef CONFIG_COMPAT
3948static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
51aac424 3949 unsigned int issue_flags)
4d954c25
JA
3950{
3951 struct compat_iovec __user *uiov;
3952 compat_ssize_t clen;
3953 void __user *buf;
e5b00349 3954 size_t len;
4d954c25
JA
3955
3956 uiov = u64_to_user_ptr(req->rw.addr);
3957 if (!access_ok(uiov, sizeof(*uiov)))
3958 return -EFAULT;
3959 if (__get_user(clen, &uiov->iov_len))
3960 return -EFAULT;
3961 if (clen < 0)
3962 return -EINVAL;
3963
3964 len = clen;
4e906702 3965 buf = io_buffer_select(req, &len, issue_flags);
984824db
CH
3966 if (!buf)
3967 return -ENOBUFS;
b66e65f4 3968 req->rw.addr = (unsigned long) buf;
4d954c25 3969 iov[0].iov_base = buf;
b66e65f4 3970 req->rw.len = iov[0].iov_len = (compat_size_t) len;
4d954c25
JA
3971 return 0;
3972}
3973#endif
3974
3975static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
51aac424 3976 unsigned int issue_flags)
4d954c25
JA
3977{
3978 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3979 void __user *buf;
3980 ssize_t len;
3981
3982 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3983 return -EFAULT;
3984
3985 len = iov[0].iov_len;
3986 if (len < 0)
3987 return -EINVAL;
4e906702 3988 buf = io_buffer_select(req, &len, issue_flags);
984824db
CH
3989 if (!buf)
3990 return -ENOBUFS;
b66e65f4 3991 req->rw.addr = (unsigned long) buf;
4d954c25 3992 iov[0].iov_base = buf;
b66e65f4 3993 req->rw.len = iov[0].iov_len = len;
4d954c25
JA
3994 return 0;
3995}
3996
3997static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
51aac424 3998 unsigned int issue_flags)
4d954c25 3999{
c7fb1942 4000 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
b66e65f4
JA
4001 iov[0].iov_base = u64_to_user_ptr(req->rw.addr);
4002 iov[0].iov_len = req->rw.len;
4d954c25 4003 return 0;
dddb3e26 4004 }
dd201662 4005 if (req->rw.len != 1)
4d954c25
JA
4006 return -EINVAL;
4007
4008#ifdef CONFIG_COMPAT
4009 if (req->ctx->compat)
51aac424 4010 return io_compat_import(req, iov, issue_flags);
4d954c25
JA
4011#endif
4012
51aac424 4013 return __io_iov_buffer_select(req, iov, issue_flags);
4d954c25
JA
4014}
4015
b66e65f4
JA
4016static inline bool io_do_buffer_select(struct io_kiocb *req)
4017{
4018 if (!(req->flags & REQ_F_BUFFER_SELECT))
4019 return false;
c7fb1942 4020 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
b66e65f4
JA
4021}
4022
caa8fe6e
PB
4023static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
4024 struct io_rw_state *s,
4025 unsigned int issue_flags)
2b188cc1 4026{
5e49c973 4027 struct iov_iter *iter = &s->iter;
847595de 4028 u8 opcode = req->opcode;
caa8fe6e 4029 struct iovec *iovec;
d1d681b0
PB
4030 void __user *buf;
4031 size_t sqe_len;
4d954c25 4032 ssize_t ret;
edafccee 4033
f3251183 4034 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
5106dd6e 4035 ret = io_import_fixed(req, rw, iter, issue_flags);
f3251183
PB
4036 if (ret)
4037 return ERR_PTR(ret);
4038 return NULL;
4039 }
2b188cc1 4040
d1d681b0
PB
4041 buf = u64_to_user_ptr(req->rw.addr);
4042 sqe_len = req->rw.len;
9adbd45d 4043
3a6820f2 4044 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
b66e65f4 4045 if (io_do_buffer_select(req)) {
4e906702 4046 buf = io_buffer_select(req, &sqe_len, issue_flags);
984824db
CH
4047 if (!buf)
4048 return ERR_PTR(-ENOBUFS);
b66e65f4 4049 req->rw.addr = (unsigned long) buf;
3f9d6441 4050 req->rw.len = sqe_len;
bcda7baa
JA
4051 }
4052
5e49c973 4053 ret = import_single_range(rw, buf, sqe_len, s->fast_iov, iter);
f3251183
PB
4054 if (ret)
4055 return ERR_PTR(ret);
4056 return NULL;
3a6820f2
JA
4057 }
4058
caa8fe6e 4059 iovec = s->fast_iov;
4d954c25 4060 if (req->flags & REQ_F_BUFFER_SELECT) {
caa8fe6e 4061 ret = io_iov_buffer_select(req, iovec, issue_flags);
f3251183
PB
4062 if (ret)
4063 return ERR_PTR(ret);
4064 iov_iter_init(iter, rw, iovec, 1, iovec->iov_len);
4065 return NULL;
4d954c25
JA
4066 }
4067
caa8fe6e 4068 ret = __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
89cd35c5 4069 req->ctx->compat);
caa8fe6e
PB
4070 if (unlikely(ret < 0))
4071 return ERR_PTR(ret);
4072 return iovec;
2b188cc1
JA
4073}
4074
5e49c973
PB
4075static inline int io_import_iovec(int rw, struct io_kiocb *req,
4076 struct iovec **iovec, struct io_rw_state *s,
4077 unsigned int issue_flags)
4078{
caa8fe6e
PB
4079 *iovec = __io_import_iovec(rw, req, s, issue_flags);
4080 if (unlikely(IS_ERR(*iovec)))
4081 return PTR_ERR(*iovec);
5e49c973 4082
5e49c973 4083 iov_iter_save_state(&s->iter, &s->iter_state);
caa8fe6e 4084 return 0;
2b188cc1
JA
4085}
4086
0fef9483
JA
4087static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
4088{
5b09e37e 4089 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
0fef9483
JA
4090}
4091
31b51510 4092/*
32960613
JA
4093 * For files that don't have ->read_iter() and ->write_iter(), handle them
4094 * by looping over ->read() or ->write() manually.
31b51510 4095 */
4017eb91 4096static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
32960613 4097{
4017eb91
JA
4098 struct kiocb *kiocb = &req->rw.kiocb;
4099 struct file *file = req->file;
32960613 4100 ssize_t ret = 0;
af9c45ec 4101 loff_t *ppos;
32960613
JA
4102
4103 /*
4104 * Don't support polled IO through this interface, and we can't
4105 * support non-blocking either. For the latter, this just causes
4106 * the kiocb to be handled from an async context.
4107 */
4108 if (kiocb->ki_flags & IOCB_HIPRI)
4109 return -EOPNOTSUPP;
35645ac3
PB
4110 if ((kiocb->ki_flags & IOCB_NOWAIT) &&
4111 !(kiocb->ki_filp->f_flags & O_NONBLOCK))
32960613
JA
4112 return -EAGAIN;
4113
af9c45ec
DY
4114 ppos = io_kiocb_ppos(kiocb);
4115
32960613 4116 while (iov_iter_count(iter)) {
311ae9e1 4117 struct iovec iovec;
32960613
JA
4118 ssize_t nr;
4119
311ae9e1
PB
4120 if (!iov_iter_is_bvec(iter)) {
4121 iovec = iov_iter_iovec(iter);
4122 } else {
4017eb91
JA
4123 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
4124 iovec.iov_len = req->rw.len;
311ae9e1
PB
4125 }
4126
32960613
JA
4127 if (rw == READ) {
4128 nr = file->f_op->read(file, iovec.iov_base,
af9c45ec 4129 iovec.iov_len, ppos);
32960613
JA
4130 } else {
4131 nr = file->f_op->write(file, iovec.iov_base,
af9c45ec 4132 iovec.iov_len, ppos);
32960613
JA
4133 }
4134
4135 if (nr < 0) {
4136 if (!ret)
4137 ret = nr;
4138 break;
4139 }
5e929367 4140 ret += nr;
16c8d2df
JA
4141 if (!iov_iter_is_bvec(iter)) {
4142 iov_iter_advance(iter, nr);
4143 } else {
16c8d2df 4144 req->rw.addr += nr;
5e929367
JA
4145 req->rw.len -= nr;
4146 if (!req->rw.len)
4147 break;
16c8d2df 4148 }
32960613
JA
4149 if (nr != iovec.iov_len)
4150 break;
32960613
JA
4151 }
4152
4153 return ret;
4154}
4155
ff6165b2
JA
4156static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
4157 const struct iovec *fast_iov, struct iov_iter *iter)
f67676d1 4158{
e8c2bc1f 4159 struct io_async_rw *rw = req->async_data;
b64e3444 4160
538941e2 4161 memcpy(&rw->s.iter, iter, sizeof(*iter));
afb87658 4162 rw->free_iovec = iovec;
227c0c96 4163 rw->bytes_done = 0;
ff6165b2 4164 /* can only be fixed buffers, no need to do anything */
9c3a205c 4165 if (iov_iter_is_bvec(iter))
ff6165b2 4166 return;
b64e3444 4167 if (!iovec) {
ff6165b2
JA
4168 unsigned iov_off = 0;
4169
538941e2 4170 rw->s.iter.iov = rw->s.fast_iov;
ff6165b2
JA
4171 if (iter->iov != fast_iov) {
4172 iov_off = iter->iov - fast_iov;
538941e2 4173 rw->s.iter.iov += iov_off;
ff6165b2 4174 }
538941e2
PB
4175 if (rw->s.fast_iov != fast_iov)
4176 memcpy(rw->s.fast_iov + iov_off, fast_iov + iov_off,
45097dae 4177 sizeof(struct iovec) * iter->nr_segs);
99bc4c38
PB
4178 } else {
4179 req->flags |= REQ_F_NEED_CLEANUP;
f67676d1
JA
4180 }
4181}
4182
8d4af685 4183static inline bool io_alloc_async_data(struct io_kiocb *req)
3d9932a8 4184{
e8c2bc1f
JA
4185 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
4186 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
d886e185
PB
4187 if (req->async_data) {
4188 req->flags |= REQ_F_ASYNC_DATA;
4189 return false;
4190 }
4191 return true;
3d9932a8
XW
4192}
4193
ff6165b2 4194static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
c88598a9 4195 struct io_rw_state *s, bool force)
b7bb4f7d 4196{
26f0505a 4197 if (!force && !io_op_defs[req->opcode].needs_async_setup)
74566df3 4198 return 0;
d886e185 4199 if (!req_has_async_data(req)) {
cd658695
JA
4200 struct io_async_rw *iorw;
4201
6cb78689 4202 if (io_alloc_async_data(req)) {
6bf985dc 4203 kfree(iovec);
5d204bcf 4204 return -ENOMEM;
6bf985dc 4205 }
b7bb4f7d 4206
c88598a9 4207 io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
cd658695
JA
4208 iorw = req->async_data;
4209 /* we've copied and mapped the iter, ensure state is saved */
538941e2 4210 iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
5d204bcf 4211 }
b7bb4f7d 4212 return 0;
f67676d1
JA
4213}
4214
73debe68 4215static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
c3e330a4 4216{
e8c2bc1f 4217 struct io_async_rw *iorw = req->async_data;
5e49c973 4218 struct iovec *iov;
847595de 4219 int ret;
c3e330a4 4220
51aac424 4221 /* submission path, ->uring_lock should already be taken */
3b44b371 4222 ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
c3e330a4
PB
4223 if (unlikely(ret < 0))
4224 return ret;
4225
ab0b196c
PB
4226 iorw->bytes_done = 0;
4227 iorw->free_iovec = iov;
4228 if (iov)
4229 req->flags |= REQ_F_NEED_CLEANUP;
c3e330a4
PB
4230 return 0;
4231}
4232
157dc813
JA
4233static int io_readv_prep_async(struct io_kiocb *req)
4234{
4235 return io_rw_prep_async(req, READ);
4236}
4237
4238static int io_writev_prep_async(struct io_kiocb *req)
4239{
4240 return io_rw_prep_async(req, WRITE);
4241}
4242
c1dd91d1 4243/*
ffdc8dab 4244 * This is our waitqueue callback handler, registered through __folio_lock_async()
c1dd91d1
JA
4245 * when we initially tried to do the IO with the iocb armed our waitqueue.
4246 * This gets called when the page is unlocked, and we generally expect that to
4247 * happen when the page IO is completed and the page is now uptodate. This will
4248 * queue a task_work based retry of the operation, attempting to copy the data
4249 * again. If the latter fails because the page was NOT uptodate, then we will
4250 * do a thread based blocking retry of the operation. That's the unexpected
4251 * slow path.
4252 */
bcf5a063
JA
4253static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
4254 int sync, void *arg)
4255{
4256 struct wait_page_queue *wpq;
4257 struct io_kiocb *req = wait->private;
bcf5a063 4258 struct wait_page_key *key = arg;
bcf5a063
JA
4259
4260 wpq = container_of(wait, struct wait_page_queue, wait);
4261
cdc8fcb4
LT
4262 if (!wake_page_match(wpq, key))
4263 return 0;
4264
c8d317aa 4265 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
bcf5a063 4266 list_del_init(&wait->entry);
921b9054 4267 io_req_task_queue(req);
bcf5a063
JA
4268 return 1;
4269}
4270
c1dd91d1
JA
4271/*
4272 * This controls whether a given IO request should be armed for async page
4273 * based retry. If we return false here, the request is handed to the async
4274 * worker threads for retry. If we're doing buffered reads on a regular file,
4275 * we prepare a private wait_page_queue entry and retry the operation. This
4276 * will either succeed because the page is now uptodate and unlocked, or it
4277 * will register a callback when the page is unlocked at IO completion. Through
4278 * that callback, io_uring uses task_work to setup a retry of the operation.
4279 * That retry will attempt the buffered read again. The retry will generally
4280 * succeed, or in rare cases where it fails, we then fall back to using the
4281 * async worker threads for a blocking retry.
4282 */
227c0c96 4283static bool io_rw_should_retry(struct io_kiocb *req)
f67676d1 4284{
e8c2bc1f
JA
4285 struct io_async_rw *rw = req->async_data;
4286 struct wait_page_queue *wait = &rw->wpq;
bcf5a063 4287 struct kiocb *kiocb = &req->rw.kiocb;
f67676d1 4288
bcf5a063
JA
4289 /* never retry for NOWAIT, we just complete with -EAGAIN */
4290 if (req->flags & REQ_F_NOWAIT)
4291 return false;
f67676d1 4292
227c0c96 4293 /* Only for buffered IO */
3b2a4439 4294 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
bcf5a063 4295 return false;
3b2a4439 4296
bcf5a063
JA
4297 /*
4298 * just use poll if we can, and don't attempt if the fs doesn't
4299 * support callback based unlocks
4300 */
4301 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
4302 return false;
f67676d1 4303
3b2a4439
JA
4304 wait->wait.func = io_async_buf_func;
4305 wait->wait.private = req;
4306 wait->wait.flags = 0;
4307 INIT_LIST_HEAD(&wait->wait.entry);
4308 kiocb->ki_flags |= IOCB_WAITQ;
c8d317aa 4309 kiocb->ki_flags &= ~IOCB_NOWAIT;
3b2a4439 4310 kiocb->ki_waitq = wait;
3b2a4439 4311 return true;
bcf5a063
JA
4312}
4313
aeab9506 4314static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
bcf5a063 4315{
607b6fb8 4316 if (likely(req->file->f_op->read_iter))
bcf5a063 4317 return call_read_iter(req->file, &req->rw.kiocb, iter);
2dd2111d 4318 else if (req->file->f_op->read)
4017eb91 4319 return loop_rw_iter(READ, req, iter);
2dd2111d
GH
4320 else
4321 return -EINVAL;
f67676d1
JA
4322}
4323
7db30437
ML
4324static bool need_read_all(struct io_kiocb *req)
4325{
4326 return req->flags & REQ_F_ISREG ||
4327 S_ISBLK(file_inode(req->file)->i_mode);
4328}
4329
584b0180
JA
4330static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
4331{
4332 struct kiocb *kiocb = &req->rw.kiocb;
4333 struct io_ring_ctx *ctx = req->ctx;
4334 struct file *file = req->file;
4335 int ret;
4336
4337 if (unlikely(!file || !(file->f_mode & mode)))
4338 return -EBADF;
4339
4340 if (!io_req_ffs_set(req))
4341 req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
4342
4343 kiocb->ki_flags = iocb_flags(file);
4344 ret = kiocb_set_rw_flags(kiocb, req->rw.flags);
4345 if (unlikely(ret))
4346 return ret;
4347
4348 /*
4349 * If the file is marked O_NONBLOCK, still allow retry for it if it
4350 * supports async. Otherwise it's impossible to use O_NONBLOCK files
4351 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
4352 */
4353 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
4354 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
4355 req->flags |= REQ_F_NOWAIT;
4356
4357 if (ctx->flags & IORING_SETUP_IOPOLL) {
4358 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
4359 return -EOPNOTSUPP;
4360
32452a3e 4361 kiocb->private = NULL;
584b0180
JA
4362 kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
4363 kiocb->ki_complete = io_complete_rw_iopoll;
4364 req->iopoll_completed = 0;
4365 } else {
4366 if (kiocb->ki_flags & IOCB_HIPRI)
4367 return -EINVAL;
4368 kiocb->ki_complete = io_complete_rw;
4369 }
4370
4371 return 0;
4372}
4373
889fca73 4374static int io_read(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1 4375{
607b6fb8 4376 struct io_rw_state __s, *s = &__s;
c88598a9 4377 struct iovec *iovec;
9adbd45d 4378 struct kiocb *kiocb = &req->rw.kiocb;
45d189c6 4379 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
d886e185 4380 struct io_async_rw *rw;
cd658695 4381 ssize_t ret, ret2;
b4aec400 4382 loff_t *ppos;
ff6165b2 4383
607b6fb8
PB
4384 if (!req_has_async_data(req)) {
4385 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
4386 if (unlikely(ret < 0))
4387 return ret;
4388 } else {
2be2eb02
JA
4389 /*
4390 * Safe and required to re-import if we're using provided
4391 * buffers, as we dropped the selected one before retry.
4392 */
4393 if (req->flags & REQ_F_BUFFER_SELECT) {
4394 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
4395 if (unlikely(ret < 0))
4396 return ret;
4397 }
4398
d886e185 4399 rw = req->async_data;
c88598a9 4400 s = &rw->s;
cd658695
JA
4401 /*
4402 * We come here from an earlier attempt, restore our state to
4403 * match in case it doesn't. It's cheap enough that we don't
4404 * need to make this conditional.
4405 */
c88598a9 4406 iov_iter_restore(&s->iter, &s->iter_state);
2846c481 4407 iovec = NULL;
2846c481 4408 }
584b0180 4409 ret = io_rw_init_file(req, FMODE_READ);
323b190b
JA
4410 if (unlikely(ret)) {
4411 kfree(iovec);
584b0180 4412 return ret;
323b190b 4413 }
cef216fc 4414 req->cqe.res = iov_iter_count(&s->iter);
2b188cc1 4415
607b6fb8
PB
4416 if (force_nonblock) {
4417 /* If the file doesn't support async, just async punt */
35645ac3 4418 if (unlikely(!io_file_supports_nowait(req))) {
607b6fb8
PB
4419 ret = io_setup_async_rw(req, iovec, s, true);
4420 return ret ?: -EAGAIN;
4421 }
a88fc400 4422 kiocb->ki_flags |= IOCB_NOWAIT;
607b6fb8
PB
4423 } else {
4424 /* Ensure we clear previously set non-block flag */
4425 kiocb->ki_flags &= ~IOCB_NOWAIT;
6713e7a6 4426 }
9e645e11 4427
b4aec400 4428 ppos = io_kiocb_update_pos(req);
d34e1e5b 4429
cef216fc 4430 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
5ea5dd45
PB
4431 if (unlikely(ret)) {
4432 kfree(iovec);
4433 return ret;
4434 }
2b188cc1 4435
c88598a9 4436 ret = io_iter_do_read(req, &s->iter);
32960613 4437
230d50d4 4438 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
6ad7f233 4439 req->flags &= ~REQ_F_REISSUE;
9af177ee
JA
4440 /* if we can poll, just do that */
4441 if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
4442 return -EAGAIN;
eefdf30f
JA
4443 /* IOPOLL retry should happen for io-wq threads */
4444 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
f91daf56 4445 goto done;
75c668cd
PB
4446 /* no retry on NONBLOCK nor RWF_NOWAIT */
4447 if (req->flags & REQ_F_NOWAIT)
355afaeb 4448 goto done;
f38c7e3a 4449 ret = 0;
230d50d4
JA
4450 } else if (ret == -EIOCBQUEUED) {
4451 goto out_free;
cef216fc 4452 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
7db30437 4453 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
7335e3bf 4454 /* read all, failed, already did sync or don't want to retry */
00d23d51 4455 goto done;
227c0c96
JA
4456 }
4457
cd658695
JA
4458 /*
4459 * Don't depend on the iter state matching what was consumed, or being
4460 * untouched in case of error. Restore it and we'll advance it
4461 * manually if we need to.
4462 */
c88598a9 4463 iov_iter_restore(&s->iter, &s->iter_state);
cd658695 4464
c88598a9 4465 ret2 = io_setup_async_rw(req, iovec, s, true);
6bf985dc
PB
4466 if (ret2)
4467 return ret2;
4468
fe1cdd55 4469 iovec = NULL;
e8c2bc1f 4470 rw = req->async_data;
c88598a9 4471 s = &rw->s;
cd658695
JA
4472 /*
4473 * Now use our persistent iterator and state, if we aren't already.
4474 * We've restored and mapped the iter to match.
4475 */
227c0c96 4476
b23df91b 4477 do {
cd658695
JA
4478 /*
4479 * We end up here because of a partial read, either from
4480 * above or inside this loop. Advance the iter by the bytes
4481 * that were consumed.
4482 */
c88598a9
PB
4483 iov_iter_advance(&s->iter, ret);
4484 if (!iov_iter_count(&s->iter))
cd658695 4485 break;
b23df91b 4486 rw->bytes_done += ret;
c88598a9 4487 iov_iter_save_state(&s->iter, &s->iter_state);
cd658695 4488
b23df91b
PB
4489 /* if we can retry, do so with the callbacks armed */
4490 if (!io_rw_should_retry(req)) {
4491 kiocb->ki_flags &= ~IOCB_WAITQ;
4492 return -EAGAIN;
4493 }
4494
4495 /*
4496 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
4497 * we get -EIOCBQUEUED, then we'll get a notification when the
4498 * desired page gets unlocked. We can also get a partial read
4499 * here, and if we do, then just retry at the new offset.
4500 */
c88598a9 4501 ret = io_iter_do_read(req, &s->iter);
b23df91b
PB
4502 if (ret == -EIOCBQUEUED)
4503 return 0;
227c0c96 4504 /* we got some bytes, but not all. retry. */
b5b0ecb7 4505 kiocb->ki_flags &= ~IOCB_WAITQ;
c88598a9 4506 iov_iter_restore(&s->iter, &s->iter_state);
cd658695 4507 } while (ret > 0);
227c0c96 4508done:
2ea537ca 4509 kiocb_done(req, ret, issue_flags);
fe1cdd55
PB
4510out_free:
4511 /* it's faster to check here then delegate to kfree */
4512 if (iovec)
4513 kfree(iovec);
5ea5dd45 4514 return 0;
2b188cc1
JA
4515}
4516
889fca73 4517static int io_write(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1 4518{
607b6fb8 4519 struct io_rw_state __s, *s = &__s;
c88598a9 4520 struct iovec *iovec;
9adbd45d 4521 struct kiocb *kiocb = &req->rw.kiocb;
45d189c6 4522 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
cd658695 4523 ssize_t ret, ret2;
b4aec400 4524 loff_t *ppos;
2b188cc1 4525
607b6fb8 4526 if (!req_has_async_data(req)) {
5e49c973
PB
4527 ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
4528 if (unlikely(ret < 0))
2846c481 4529 return ret;
607b6fb8
PB
4530 } else {
4531 struct io_async_rw *rw = req->async_data;
4532
4533 s = &rw->s;
4534 iov_iter_restore(&s->iter, &s->iter_state);
2846c481 4535 iovec = NULL;
2846c481 4536 }
584b0180 4537 ret = io_rw_init_file(req, FMODE_WRITE);
323b190b
JA
4538 if (unlikely(ret)) {
4539 kfree(iovec);
584b0180 4540 return ret;
323b190b 4541 }
cef216fc 4542 req->cqe.res = iov_iter_count(&s->iter);
2b188cc1 4543
607b6fb8
PB
4544 if (force_nonblock) {
4545 /* If the file doesn't support async, just async punt */
35645ac3 4546 if (unlikely(!io_file_supports_nowait(req)))
607b6fb8 4547 goto copy_iov;
fd6c2e4c 4548
607b6fb8
PB
4549 /* file path doesn't support NOWAIT for non-direct_IO */
4550 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
4551 (req->flags & REQ_F_ISREG))
4552 goto copy_iov;
31b51510 4553
607b6fb8
PB
4554 kiocb->ki_flags |= IOCB_NOWAIT;
4555 } else {
4556 /* Ensure we clear previously set non-block flag */
4557 kiocb->ki_flags &= ~IOCB_NOWAIT;
4558 }
31b51510 4559
b4aec400 4560 ppos = io_kiocb_update_pos(req);
d34e1e5b 4561
cef216fc 4562 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
fa15bafb
PB
4563 if (unlikely(ret))
4564 goto out_free;
4ed734b0 4565
fa15bafb
PB
4566 /*
4567 * Open-code file_start_write here to grab freeze protection,
4568 * which will be released by another thread in
4569 * io_complete_rw(). Fool lockdep by telling it the lock got
4570 * released so that it doesn't complain about the held lock when
4571 * we return to userspace.
4572 */
4573 if (req->flags & REQ_F_ISREG) {
8a3c84b6 4574 sb_start_write(file_inode(req->file)->i_sb);
fa15bafb
PB
4575 __sb_writers_release(file_inode(req->file)->i_sb,
4576 SB_FREEZE_WRITE);
4577 }
4578 kiocb->ki_flags |= IOCB_WRITE;
4ed734b0 4579
35645ac3 4580 if (likely(req->file->f_op->write_iter))
c88598a9 4581 ret2 = call_write_iter(req->file, kiocb, &s->iter);
2dd2111d 4582 else if (req->file->f_op->write)
c88598a9 4583 ret2 = loop_rw_iter(WRITE, req, &s->iter);
2dd2111d
GH
4584 else
4585 ret2 = -EINVAL;
4ed734b0 4586
6ad7f233
PB
4587 if (req->flags & REQ_F_REISSUE) {
4588 req->flags &= ~REQ_F_REISSUE;
230d50d4 4589 ret2 = -EAGAIN;
6ad7f233 4590 }
230d50d4 4591
fa15bafb
PB
4592 /*
4593 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
4594 * retry them without IOCB_NOWAIT.
4595 */
4596 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
4597 ret2 = -EAGAIN;
75c668cd
PB
4598 /* no retry on NONBLOCK nor RWF_NOWAIT */
4599 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
355afaeb 4600 goto done;
fa15bafb 4601 if (!force_nonblock || ret2 != -EAGAIN) {
eefdf30f 4602 /* IOPOLL retry should happen for io-wq threads */
b10841c9 4603 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
eefdf30f 4604 goto copy_iov;
355afaeb 4605done:
2ea537ca 4606 kiocb_done(req, ret2, issue_flags);
fa15bafb 4607 } else {
f67676d1 4608copy_iov:
c88598a9
PB
4609 iov_iter_restore(&s->iter, &s->iter_state);
4610 ret = io_setup_async_rw(req, iovec, s, false);
6bf985dc 4611 return ret ?: -EAGAIN;
2b188cc1 4612 }
31b51510 4613out_free:
f261c168 4614 /* it's reportedly faster than delegating the null check to kfree() */
252917c3 4615 if (iovec)
6f2cc166 4616 kfree(iovec);
2b188cc1
JA
4617 return ret;
4618}
4619
80a261fd
JA
4620static int io_renameat_prep(struct io_kiocb *req,
4621 const struct io_uring_sqe *sqe)
4622{
4623 struct io_rename *ren = &req->rename;
4624 const char __user *oldf, *newf;
4625
73911426 4626 if (sqe->buf_index || sqe->splice_fd_in)
ed7eb259 4627 return -EINVAL;
80a261fd
JA
4628 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4629 return -EBADF;
4630
4631 ren->old_dfd = READ_ONCE(sqe->fd);
4632 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
4633 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4634 ren->new_dfd = READ_ONCE(sqe->len);
4635 ren->flags = READ_ONCE(sqe->rename_flags);
4636
4637 ren->oldpath = getname(oldf);
4638 if (IS_ERR(ren->oldpath))
4639 return PTR_ERR(ren->oldpath);
4640
4641 ren->newpath = getname(newf);
4642 if (IS_ERR(ren->newpath)) {
4643 putname(ren->oldpath);
4644 return PTR_ERR(ren->newpath);
4645 }
4646
4647 req->flags |= REQ_F_NEED_CLEANUP;
4648 return 0;
4649}
4650
45d189c6 4651static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
80a261fd
JA
4652{
4653 struct io_rename *ren = &req->rename;
4654 int ret;
4655
45d189c6 4656 if (issue_flags & IO_URING_F_NONBLOCK)
80a261fd
JA
4657 return -EAGAIN;
4658
4659 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
4660 ren->newpath, ren->flags);
4661
4662 req->flags &= ~REQ_F_NEED_CLEANUP;
80a261fd
JA
4663 io_req_complete(req, ret);
4664 return 0;
4665}
4666
e9621e2b
SR
4667static inline void __io_xattr_finish(struct io_kiocb *req)
4668{
4669 struct io_xattr *ix = &req->xattr;
4670
4671 if (ix->filename)
4672 putname(ix->filename);
4673
4674 kfree(ix->ctx.kname);
4675 kvfree(ix->ctx.kvalue);
4676}
4677
4678static void io_xattr_finish(struct io_kiocb *req, int ret)
4679{
4680 req->flags &= ~REQ_F_NEED_CLEANUP;
4681
4682 __io_xattr_finish(req);
e9621e2b
SR
4683 io_req_complete(req, ret);
4684}
4685
a56834e0
SR
4686static int __io_getxattr_prep(struct io_kiocb *req,
4687 const struct io_uring_sqe *sqe)
4688{
4689 struct io_xattr *ix = &req->xattr;
4690 const char __user *name;
4691 int ret;
4692
a56834e0
SR
4693 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4694 return -EBADF;
4695
4696 ix->filename = NULL;
4697 ix->ctx.kvalue = NULL;
4698 name = u64_to_user_ptr(READ_ONCE(sqe->addr));
4699 ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4700 ix->ctx.size = READ_ONCE(sqe->len);
4701 ix->ctx.flags = READ_ONCE(sqe->xattr_flags);
4702
4703 if (ix->ctx.flags)
4704 return -EINVAL;
4705
4706 ix->ctx.kname = kmalloc(sizeof(*ix->ctx.kname), GFP_KERNEL);
4707 if (!ix->ctx.kname)
4708 return -ENOMEM;
4709
4710 ret = strncpy_from_user(ix->ctx.kname->name, name,
4711 sizeof(ix->ctx.kname->name));
4712 if (!ret || ret == sizeof(ix->ctx.kname->name))
4713 ret = -ERANGE;
4714 if (ret < 0) {
4715 kfree(ix->ctx.kname);
4716 return ret;
4717 }
4718
4719 req->flags |= REQ_F_NEED_CLEANUP;
4720 return 0;
4721}
4722
4723static int io_fgetxattr_prep(struct io_kiocb *req,
4724 const struct io_uring_sqe *sqe)
4725{
4726 return __io_getxattr_prep(req, sqe);
4727}
4728
4729static int io_getxattr_prep(struct io_kiocb *req,
4730 const struct io_uring_sqe *sqe)
4731{
4732 struct io_xattr *ix = &req->xattr;
4733 const char __user *path;
4734 int ret;
4735
4736 ret = __io_getxattr_prep(req, sqe);
4737 if (ret)
4738 return ret;
4739
4740 path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
4741
4742 ix->filename = getname_flags(path, LOOKUP_FOLLOW, NULL);
4743 if (IS_ERR(ix->filename)) {
4744 ret = PTR_ERR(ix->filename);
4745 ix->filename = NULL;
4746 }
4747
4748 return ret;
4749}
4750
4751static int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags)
4752{
4753 struct io_xattr *ix = &req->xattr;
4754 int ret;
4755
4756 if (issue_flags & IO_URING_F_NONBLOCK)
4757 return -EAGAIN;
4758
4759 ret = do_getxattr(mnt_user_ns(req->file->f_path.mnt),
4760 req->file->f_path.dentry,
4761 &ix->ctx);
4762
4763 io_xattr_finish(req, ret);
4764 return 0;
4765}
4766
4767static int io_getxattr(struct io_kiocb *req, unsigned int issue_flags)
4768{
4769 struct io_xattr *ix = &req->xattr;
4770 unsigned int lookup_flags = LOOKUP_FOLLOW;
4771 struct path path;
4772 int ret;
4773
4774 if (issue_flags & IO_URING_F_NONBLOCK)
4775 return -EAGAIN;
4776
4777retry:
4778 ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL);
4779 if (!ret) {
4780 ret = do_getxattr(mnt_user_ns(path.mnt),
4781 path.dentry,
4782 &ix->ctx);
4783
4784 path_put(&path);
4785 if (retry_estale(ret, lookup_flags)) {
4786 lookup_flags |= LOOKUP_REVAL;
4787 goto retry;
4788 }
4789 }
4790
4791 io_xattr_finish(req, ret);
4792 return 0;
4793}
4794
e9621e2b
SR
4795static int __io_setxattr_prep(struct io_kiocb *req,
4796 const struct io_uring_sqe *sqe)
4797{
4798 struct io_xattr *ix = &req->xattr;
4799 const char __user *name;
4800 int ret;
4801
e9621e2b
SR
4802 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4803 return -EBADF;
4804
4805 ix->filename = NULL;
4806 name = u64_to_user_ptr(READ_ONCE(sqe->addr));
4807 ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4808 ix->ctx.kvalue = NULL;
4809 ix->ctx.size = READ_ONCE(sqe->len);
4810 ix->ctx.flags = READ_ONCE(sqe->xattr_flags);
4811
4812 ix->ctx.kname = kmalloc(sizeof(*ix->ctx.kname), GFP_KERNEL);
4813 if (!ix->ctx.kname)
4814 return -ENOMEM;
4815
4816 ret = setxattr_copy(name, &ix->ctx);
4817 if (ret) {
4818 kfree(ix->ctx.kname);
4819 return ret;
4820 }
4821
4822 req->flags |= REQ_F_NEED_CLEANUP;
4823 return 0;
4824}
4825
4826static int io_setxattr_prep(struct io_kiocb *req,
4827 const struct io_uring_sqe *sqe)
4828{
4829 struct io_xattr *ix = &req->xattr;
4830 const char __user *path;
4831 int ret;
4832
4833 ret = __io_setxattr_prep(req, sqe);
4834 if (ret)
4835 return ret;
4836
4837 path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
4838
4839 ix->filename = getname_flags(path, LOOKUP_FOLLOW, NULL);
4840 if (IS_ERR(ix->filename)) {
4841 ret = PTR_ERR(ix->filename);
4842 ix->filename = NULL;
4843 }
4844
4845 return ret;
4846}
4847
4848static int io_fsetxattr_prep(struct io_kiocb *req,
4849 const struct io_uring_sqe *sqe)
4850{
4851 return __io_setxattr_prep(req, sqe);
4852}
4853
4854static int __io_setxattr(struct io_kiocb *req, unsigned int issue_flags,
4855 struct path *path)
4856{
4857 struct io_xattr *ix = &req->xattr;
4858 int ret;
4859
4860 ret = mnt_want_write(path->mnt);
4861 if (!ret) {
4862 ret = do_setxattr(mnt_user_ns(path->mnt), path->dentry, &ix->ctx);
4863 mnt_drop_write(path->mnt);
4864 }
4865
4866 return ret;
4867}
4868
4869static int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags)
4870{
4871 int ret;
4872
4873 if (issue_flags & IO_URING_F_NONBLOCK)
4874 return -EAGAIN;
4875
4876 ret = __io_setxattr(req, issue_flags, &req->file->f_path);
4877 io_xattr_finish(req, ret);
4878
4879 return 0;
4880}
4881
4882static int io_setxattr(struct io_kiocb *req, unsigned int issue_flags)
4883{
4884 struct io_xattr *ix = &req->xattr;
4885 unsigned int lookup_flags = LOOKUP_FOLLOW;
4886 struct path path;
4887 int ret;
4888
4889 if (issue_flags & IO_URING_F_NONBLOCK)
4890 return -EAGAIN;
4891
4892retry:
4893 ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL);
4894 if (!ret) {
4895 ret = __io_setxattr(req, issue_flags, &path);
4896 path_put(&path);
4897 if (retry_estale(ret, lookup_flags)) {
4898 lookup_flags |= LOOKUP_REVAL;
4899 goto retry;
4900 }
4901 }
4902
4903 io_xattr_finish(req, ret);
4904 return 0;
4905}
4906
14a1143b
JA
4907static int io_unlinkat_prep(struct io_kiocb *req,
4908 const struct io_uring_sqe *sqe)
4909{
4910 struct io_unlink *un = &req->unlink;
4911 const char __user *fname;
4912
73911426 4913 if (sqe->off || sqe->len || sqe->buf_index || sqe->splice_fd_in)
22634bc5 4914 return -EINVAL;
14a1143b
JA
4915 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4916 return -EBADF;
4917
4918 un->dfd = READ_ONCE(sqe->fd);
4919
4920 un->flags = READ_ONCE(sqe->unlink_flags);
4921 if (un->flags & ~AT_REMOVEDIR)
4922 return -EINVAL;
4923
4924 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
4925 un->filename = getname(fname);
4926 if (IS_ERR(un->filename))
4927 return PTR_ERR(un->filename);
4928
4929 req->flags |= REQ_F_NEED_CLEANUP;
4930 return 0;
4931}
4932
45d189c6 4933static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
14a1143b
JA
4934{
4935 struct io_unlink *un = &req->unlink;
4936 int ret;
4937
45d189c6 4938 if (issue_flags & IO_URING_F_NONBLOCK)
14a1143b
JA
4939 return -EAGAIN;
4940
4941 if (un->flags & AT_REMOVEDIR)
4942 ret = do_rmdir(un->dfd, un->filename);
4943 else
4944 ret = do_unlinkat(un->dfd, un->filename);
4945
4946 req->flags &= ~REQ_F_NEED_CLEANUP;
14a1143b
JA
4947 io_req_complete(req, ret);
4948 return 0;
4949}
4950
e34a02dc
DK
4951static int io_mkdirat_prep(struct io_kiocb *req,
4952 const struct io_uring_sqe *sqe)
4953{
4954 struct io_mkdir *mkd = &req->mkdir;
4955 const char __user *fname;
4956
73911426 4957 if (sqe->off || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
e34a02dc
DK
4958 return -EINVAL;
4959 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4960 return -EBADF;
4961
4962 mkd->dfd = READ_ONCE(sqe->fd);
4963 mkd->mode = READ_ONCE(sqe->len);
4964
4965 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
4966 mkd->filename = getname(fname);
4967 if (IS_ERR(mkd->filename))
4968 return PTR_ERR(mkd->filename);
4969
4970 req->flags |= REQ_F_NEED_CLEANUP;
4971 return 0;
4972}
4973
04f34081 4974static int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags)
e34a02dc
DK
4975{
4976 struct io_mkdir *mkd = &req->mkdir;
4977 int ret;
4978
4979 if (issue_flags & IO_URING_F_NONBLOCK)
4980 return -EAGAIN;
4981
4982 ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
4983
4984 req->flags &= ~REQ_F_NEED_CLEANUP;
e34a02dc
DK
4985 io_req_complete(req, ret);
4986 return 0;
4987}
4988
7a8721f8
DK
4989static int io_symlinkat_prep(struct io_kiocb *req,
4990 const struct io_uring_sqe *sqe)
4991{
4992 struct io_symlink *sl = &req->symlink;
4993 const char __user *oldpath, *newpath;
4994
73911426 4995 if (sqe->len || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
7a8721f8
DK
4996 return -EINVAL;
4997 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4998 return -EBADF;
4999
5000 sl->new_dfd = READ_ONCE(sqe->fd);
5001 oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
5002 newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
5003
5004 sl->oldpath = getname(oldpath);
5005 if (IS_ERR(sl->oldpath))
5006 return PTR_ERR(sl->oldpath);
5007
5008 sl->newpath = getname(newpath);
5009 if (IS_ERR(sl->newpath)) {
5010 putname(sl->oldpath);
5011 return PTR_ERR(sl->newpath);
5012 }
5013
5014 req->flags |= REQ_F_NEED_CLEANUP;
5015 return 0;
5016}
5017
04f34081 5018static int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags)
7a8721f8
DK
5019{
5020 struct io_symlink *sl = &req->symlink;
5021 int ret;
5022
5023 if (issue_flags & IO_URING_F_NONBLOCK)
5024 return -EAGAIN;
5025
5026 ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
5027
5028 req->flags &= ~REQ_F_NEED_CLEANUP;
7a8721f8
DK
5029 io_req_complete(req, ret);
5030 return 0;
5031}
5032
cf30da90
DK
5033static int io_linkat_prep(struct io_kiocb *req,
5034 const struct io_uring_sqe *sqe)
5035{
5036 struct io_hardlink *lnk = &req->hardlink;
5037 const char __user *oldf, *newf;
5038
73911426 5039 if (sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
cf30da90
DK
5040 return -EINVAL;
5041 if (unlikely(req->flags & REQ_F_FIXED_FILE))
5042 return -EBADF;
5043
5044 lnk->old_dfd = READ_ONCE(sqe->fd);
5045 lnk->new_dfd = READ_ONCE(sqe->len);
5046 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
5047 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
5048 lnk->flags = READ_ONCE(sqe->hardlink_flags);
5049
5050 lnk->oldpath = getname(oldf);
5051 if (IS_ERR(lnk->oldpath))
5052 return PTR_ERR(lnk->oldpath);
5053
5054 lnk->newpath = getname(newf);
5055 if (IS_ERR(lnk->newpath)) {
5056 putname(lnk->oldpath);
5057 return PTR_ERR(lnk->newpath);
5058 }
5059
5060 req->flags |= REQ_F_NEED_CLEANUP;
5061 return 0;
5062}
5063
04f34081 5064static int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
cf30da90
DK
5065{
5066 struct io_hardlink *lnk = &req->hardlink;
5067 int ret;
5068
5069 if (issue_flags & IO_URING_F_NONBLOCK)
5070 return -EAGAIN;
5071
5072 ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
5073 lnk->newpath, lnk->flags);
5074
5075 req->flags &= ~REQ_F_NEED_CLEANUP;
cf30da90
DK
5076 io_req_complete(req, ret);
5077 return 0;
5078}
5079
ee692a21
JA
5080static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
5081{
5082 req->uring_cmd.task_work_cb(&req->uring_cmd);
5083}
5084
5085void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
5086 void (*task_work_cb)(struct io_uring_cmd *))
5087{
5088 struct io_kiocb *req = container_of(ioucmd, struct io_kiocb, uring_cmd);
5089
5090 req->uring_cmd.task_work_cb = task_work_cb;
5091 req->io_task_work.func = io_uring_cmd_work;
3fe07bcd 5092 io_req_task_prio_work_add(req);
ee692a21
JA
5093}
5094EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task);
5095
5096/*
5097 * Called by consumers of io_uring_cmd, if they originally returned
5098 * -EIOCBQUEUED upon receiving the command.
5099 */
5100void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
5101{
5102 struct io_kiocb *req = container_of(ioucmd, struct io_kiocb, uring_cmd);
5103
5104 if (ret < 0)
5105 req_set_fail(req);
5106 if (req->ctx->flags & IORING_SETUP_CQE32)
5107 __io_req_complete32(req, 0, ret, 0, res2, 0);
5108 else
5109 io_req_complete(req, ret);
5110}
5111EXPORT_SYMBOL_GPL(io_uring_cmd_done);
5112
5113static int io_uring_cmd_prep_async(struct io_kiocb *req)
5114{
5115 size_t cmd_size;
5116
5117 cmd_size = uring_cmd_pdu_size(req->ctx->flags & IORING_SETUP_SQE128);
5118
5119 memcpy(req->async_data, req->uring_cmd.cmd, cmd_size);
5120 return 0;
5121}
5122
5123static int io_uring_cmd_prep(struct io_kiocb *req,
5124 const struct io_uring_sqe *sqe)
5125{
5126 struct io_uring_cmd *ioucmd = &req->uring_cmd;
5127
5128 if (sqe->rw_flags)
5129 return -EINVAL;
5130 ioucmd->cmd = sqe->cmd;
5131 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
5132 return 0;
5133}
5134
5135static int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
5136{
5137 struct io_uring_cmd *ioucmd = &req->uring_cmd;
5138 struct io_ring_ctx *ctx = req->ctx;
5139 struct file *file = req->file;
5140 int ret;
5141
5142 if (!req->file->f_op->uring_cmd)
5143 return -EOPNOTSUPP;
5144
5145 if (ctx->flags & IORING_SETUP_SQE128)
5146 issue_flags |= IO_URING_F_SQE128;
5147 if (ctx->flags & IORING_SETUP_CQE32)
5148 issue_flags |= IO_URING_F_CQE32;
5149 if (ctx->flags & IORING_SETUP_IOPOLL)
5150 issue_flags |= IO_URING_F_IOPOLL;
5151
5152 if (req_has_async_data(req))
5153 ioucmd->cmd = req->async_data;
5154
5155 ret = file->f_op->uring_cmd(ioucmd, issue_flags);
5156 if (ret == -EAGAIN) {
5157 if (!req_has_async_data(req)) {
5158 if (io_alloc_async_data(req))
5159 return -ENOMEM;
5160 io_uring_cmd_prep_async(req);
5161 }
5162 return -EAGAIN;
5163 }
5164
5165 if (ret != -EIOCBQUEUED)
5166 io_uring_cmd_done(ioucmd, ret, 0);
5167 return 0;
5168}
5169
f2a8d5c7
PB
5170static int __io_splice_prep(struct io_kiocb *req,
5171 const struct io_uring_sqe *sqe)
7d67af2c 5172{
fe7e3257 5173 struct io_splice *sp = &req->splice;
7d67af2c 5174 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
7d67af2c 5175
7d67af2c
PB
5176 sp->len = READ_ONCE(sqe->len);
5177 sp->flags = READ_ONCE(sqe->splice_flags);
7d67af2c
PB
5178 if (unlikely(sp->flags & ~valid_flags))
5179 return -EINVAL;
a3e4bc23 5180 sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
7d67af2c
PB
5181 return 0;
5182}
5183
f2a8d5c7
PB
5184static int io_tee_prep(struct io_kiocb *req,
5185 const struct io_uring_sqe *sqe)
5186{
5187 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
5188 return -EINVAL;
5189 return __io_splice_prep(req, sqe);
5190}
5191
45d189c6 5192static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
f2a8d5c7
PB
5193{
5194 struct io_splice *sp = &req->splice;
f2a8d5c7
PB
5195 struct file *out = sp->file_out;
5196 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
a3e4bc23 5197 struct file *in;
f2a8d5c7
PB
5198 long ret = 0;
5199
45d189c6 5200 if (issue_flags & IO_URING_F_NONBLOCK)
f2a8d5c7 5201 return -EAGAIN;
a3e4bc23 5202
5106dd6e 5203 if (sp->flags & SPLICE_F_FD_IN_FIXED)
e9419766 5204 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
5106dd6e
JA
5205 else
5206 in = io_file_get_normal(req, sp->splice_fd_in);
a3e4bc23
JA
5207 if (!in) {
5208 ret = -EBADF;
5209 goto done;
5210 }
5211
f2a8d5c7
PB
5212 if (sp->len)
5213 ret = do_tee(in, out, sp->len, flags);
5214
e1d767f0
PB
5215 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
5216 io_put_file(in);
a3e4bc23 5217done:
f2a8d5c7 5218 if (ret != sp->len)
93d2bcd2 5219 req_set_fail(req);
4ffaa94b 5220 __io_req_complete(req, 0, ret, 0);
f2a8d5c7
PB
5221 return 0;
5222}
5223
5224static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5225{
fe7e3257 5226 struct io_splice *sp = &req->splice;
f2a8d5c7
PB
5227
5228 sp->off_in = READ_ONCE(sqe->splice_off_in);
5229 sp->off_out = READ_ONCE(sqe->off);
5230 return __io_splice_prep(req, sqe);
5231}
5232
45d189c6 5233static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
7d67af2c
PB
5234{
5235 struct io_splice *sp = &req->splice;
7d67af2c
PB
5236 struct file *out = sp->file_out;
5237 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
5238 loff_t *poff_in, *poff_out;
a3e4bc23 5239 struct file *in;
c9687426 5240 long ret = 0;
7d67af2c 5241
45d189c6 5242 if (issue_flags & IO_URING_F_NONBLOCK)
2fb3e822 5243 return -EAGAIN;
7d67af2c 5244
5106dd6e 5245 if (sp->flags & SPLICE_F_FD_IN_FIXED)
e9419766 5246 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
5106dd6e
JA
5247 else
5248 in = io_file_get_normal(req, sp->splice_fd_in);
a3e4bc23
JA
5249 if (!in) {
5250 ret = -EBADF;
5251 goto done;
5252 }
5253
7d67af2c
PB
5254 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
5255 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
c9687426 5256
948a7749 5257 if (sp->len)
c9687426 5258 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
7d67af2c 5259
e1d767f0
PB
5260 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
5261 io_put_file(in);
a3e4bc23 5262done:
7d67af2c 5263 if (ret != sp->len)
93d2bcd2 5264 req_set_fail(req);
4ffaa94b 5265 __io_req_complete(req, 0, ret, 0);
7d67af2c
PB
5266 return 0;
5267}
5268
2bb04df7
SR
5269static int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5270{
5271 /*
5272 * If the ring is setup with CQE32, relay back addr/addr
5273 */
5274 if (req->ctx->flags & IORING_SETUP_CQE32) {
5275 req->nop.extra1 = READ_ONCE(sqe->addr);
5276 req->nop.extra2 = READ_ONCE(sqe->addr2);
5277 }
5278
5279 return 0;
5280}
5281
2b188cc1
JA
5282/*
5283 * IORING_OP_NOP just posts a completion event, nothing else.
5284 */
889fca73 5285static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1 5286{
9836e93c 5287 unsigned int cflags;
3d200242 5288 void __user *buf;
2b188cc1 5289
3d200242
JA
5290 if (req->flags & REQ_F_BUFFER_SELECT) {
5291 size_t len = 1;
5292
5293 buf = io_buffer_select(req, &len, issue_flags);
984824db
CH
5294 if (!buf)
5295 return -ENOBUFS;
3d200242 5296 }
def596e9 5297
9836e93c 5298 cflags = io_put_kbuf(req, issue_flags);
2bb04df7 5299 if (!(req->ctx->flags & IORING_SETUP_CQE32))
9836e93c 5300 __io_req_complete(req, issue_flags, 0, cflags);
2bb04df7 5301 else
9836e93c
LT
5302 __io_req_complete32(req, issue_flags, 0, cflags,
5303 req->nop.extra1, req->nop.extra2);
2b188cc1
JA
5304 return 0;
5305}
5306
4f57f06c
JA
5307static int io_msg_ring_prep(struct io_kiocb *req,
5308 const struct io_uring_sqe *sqe)
5309{
73911426
JA
5310 if (unlikely(sqe->addr || sqe->rw_flags || sqe->splice_fd_in ||
5311 sqe->buf_index || sqe->personality))
4f57f06c
JA
5312 return -EINVAL;
5313
4f57f06c
JA
5314 req->msg.user_data = READ_ONCE(sqe->off);
5315 req->msg.len = READ_ONCE(sqe->len);
5316 return 0;
5317}
5318
5319static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
5320{
5321 struct io_ring_ctx *target_ctx;
5322 struct io_msg *msg = &req->msg;
4f57f06c 5323 bool filled;
3f1d52ab 5324 int ret;
4f57f06c 5325
3f1d52ab
JA
5326 ret = -EBADFD;
5327 if (req->file->f_op != &io_uring_fops)
5328 goto done;
4f57f06c 5329
3f1d52ab 5330 ret = -EOVERFLOW;
4f57f06c
JA
5331 target_ctx = req->file->private_data;
5332
5333 spin_lock(&target_ctx->completion_lock);
7ef66d18 5334 filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, 0);
4f57f06c
JA
5335 io_commit_cqring(target_ctx);
5336 spin_unlock(&target_ctx->completion_lock);
5337
5338 if (filled) {
5339 io_cqring_ev_posted(target_ctx);
5340 ret = 0;
5341 }
5342
3f1d52ab 5343done:
9666d420
JA
5344 if (ret < 0)
5345 req_set_fail(req);
4f57f06c 5346 __io_req_complete(req, issue_flags, ret, 0);
aa184e86
JA
5347 /* put file to avoid an attempt to IOPOLL the req */
5348 io_put_file(req->file);
5349 req->file = NULL;
4f57f06c
JA
5350 return 0;
5351}
5352
1155c76a 5353static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 5354{
73911426 5355 if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
c992fe29
CH
5356 return -EINVAL;
5357
8ed8d3c3
JA
5358 req->sync.flags = READ_ONCE(sqe->fsync_flags);
5359 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
5360 return -EINVAL;
5361
5362 req->sync.off = READ_ONCE(sqe->off);
5363 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
5364 return 0;
5365}
5366
45d189c6 5367static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3 5368{
8ed8d3c3 5369 loff_t end = req->sync.off + req->sync.len;
8ed8d3c3
JA
5370 int ret;
5371
ac45abc0 5372 /* fsync always requires a blocking context */
45d189c6 5373 if (issue_flags & IO_URING_F_NONBLOCK)
ac45abc0
PB
5374 return -EAGAIN;
5375
9adbd45d 5376 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
5377 end > 0 ? end : LLONG_MAX,
5378 req->sync.flags & IORING_FSYNC_DATASYNC);
e1e16097 5379 io_req_complete(req, ret);
c992fe29
CH
5380 return 0;
5381}
5382
d63d1b5e
JA
5383static int io_fallocate_prep(struct io_kiocb *req,
5384 const struct io_uring_sqe *sqe)
5385{
73911426 5386 if (sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
3232dd02 5387 return -EINVAL;
d63d1b5e
JA
5388
5389 req->sync.off = READ_ONCE(sqe->off);
5390 req->sync.len = READ_ONCE(sqe->addr);
5391 req->sync.mode = READ_ONCE(sqe->len);
5392 return 0;
5393}
5394
45d189c6 5395static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
5d17b4a4 5396{
ac45abc0
PB
5397 int ret;
5398
d63d1b5e 5399 /* fallocate always requiring blocking context */
45d189c6 5400 if (issue_flags & IO_URING_F_NONBLOCK)
5d17b4a4 5401 return -EAGAIN;
ac45abc0
PB
5402 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
5403 req->sync.len);
4ffaa94b 5404 if (ret >= 0)
f63cf519 5405 fsnotify_modify(req->file);
e1e16097 5406 io_req_complete(req, ret);
5d17b4a4
JA
5407 return 0;
5408}
5409
ec65fea5 5410static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 5411{
f8748881 5412 const char __user *fname;
15b71abe 5413 int ret;
b7bb4f7d 5414
73911426 5415 if (unlikely(sqe->buf_index))
15b71abe 5416 return -EINVAL;
ec65fea5 5417 if (unlikely(req->flags & REQ_F_FIXED_FILE))
cf3040ca 5418 return -EBADF;
03b1230c 5419
ec65fea5
PB
5420 /* open.how should be already initialised */
5421 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
08a1d26e 5422 req->open.how.flags |= O_LARGEFILE;
3529d8c2 5423
25e72d10
PB
5424 req->open.dfd = READ_ONCE(sqe->fd);
5425 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
f8748881 5426 req->open.filename = getname(fname);
15b71abe
JA
5427 if (IS_ERR(req->open.filename)) {
5428 ret = PTR_ERR(req->open.filename);
5429 req->open.filename = NULL;
5430 return ret;
5431 }
b9445598
PB
5432
5433 req->open.file_slot = READ_ONCE(sqe->file_index);
5434 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC))
5435 return -EINVAL;
5436
4022e7af 5437 req->open.nofile = rlimit(RLIMIT_NOFILE);
8fef80bf 5438 req->flags |= REQ_F_NEED_CLEANUP;
15b71abe 5439 return 0;
03b1230c
JA
5440}
5441
ec65fea5
PB
5442static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5443{
d3fddf6d
PB
5444 u64 mode = READ_ONCE(sqe->len);
5445 u64 flags = READ_ONCE(sqe->open_flags);
ec65fea5 5446
ec65fea5
PB
5447 req->open.how = build_open_how(flags, mode);
5448 return __io_openat_prep(req, sqe);
5449}
5450
cebdb986 5451static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 5452{
cebdb986 5453 struct open_how __user *how;
cebdb986 5454 size_t len;
0fa03c62
JA
5455 int ret;
5456
cebdb986
JA
5457 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
5458 len = READ_ONCE(sqe->len);
cebdb986
JA
5459 if (len < OPEN_HOW_SIZE_VER0)
5460 return -EINVAL;
3529d8c2 5461
cebdb986
JA
5462 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
5463 len);
5464 if (ret)
5465 return ret;
3529d8c2 5466
ec65fea5 5467 return __io_openat_prep(req, sqe);
cebdb986
JA
5468}
5469
1339f24b 5470static int io_file_bitmap_get(struct io_ring_ctx *ctx)
b70b8e33
JA
5471{
5472 struct io_file_table *table = &ctx->file_table;
5473 unsigned long nr = ctx->nr_user_files;
5474 int ret;
5475
b70b8e33
JA
5476 do {
5477 ret = find_next_zero_bit(table->bitmap, nr, table->alloc_hint);
4278a0de 5478 if (ret != nr)
b70b8e33 5479 return ret;
4278a0de 5480
b70b8e33
JA
5481 if (!table->alloc_hint)
5482 break;
5483
5484 nr = table->alloc_hint;
5485 table->alloc_hint = 0;
5486 } while (1);
5487
5488 return -ENFILE;
5489}
5490
8c71fe75
XW
5491/*
5492 * Note when io_fixed_fd_install() returns error value, it will ensure
5493 * fput() is called correspondingly.
5494 */
1339f24b
JA
5495static int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
5496 struct file *file, unsigned int file_slot)
5497{
5498 bool alloc_slot = file_slot == IORING_FILE_INDEX_ALLOC;
5499 struct io_ring_ctx *ctx = req->ctx;
5500 int ret;
5501
61c1b44a
PB
5502 io_ring_submit_lock(ctx, issue_flags);
5503
1339f24b 5504 if (alloc_slot) {
1339f24b 5505 ret = io_file_bitmap_get(ctx);
61c1b44a
PB
5506 if (unlikely(ret < 0))
5507 goto err;
1339f24b
JA
5508 file_slot = ret;
5509 } else {
5510 file_slot--;
5511 }
5512
5513 ret = io_install_fixed_file(req, file, issue_flags, file_slot);
61c1b44a
PB
5514 if (!ret && alloc_slot)
5515 ret = file_slot;
5516err:
5517 io_ring_submit_unlock(ctx, issue_flags);
5518 if (unlikely(ret < 0))
5519 fput(file);
1339f24b
JA
5520 return ret;
5521}
5522
45d189c6 5523static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
15b71abe
JA
5524{
5525 struct open_flags op;
15b71abe 5526 struct file *file;
b9445598
PB
5527 bool resolve_nonblock, nonblock_set;
5528 bool fixed = !!req->open.file_slot;
15b71abe
JA
5529 int ret;
5530
cebdb986 5531 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
5532 if (ret)
5533 goto err;
3a81fd02
JA
5534 nonblock_set = op.open_flag & O_NONBLOCK;
5535 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
45d189c6 5536 if (issue_flags & IO_URING_F_NONBLOCK) {
3a81fd02
JA
5537 /*
5538 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
5539 * it'll always -EAGAIN
5540 */
5541 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
5542 return -EAGAIN;
5543 op.lookup_flags |= LOOKUP_CACHED;
5544 op.open_flag |= O_NONBLOCK;
5545 }
15b71abe 5546
b9445598
PB
5547 if (!fixed) {
5548 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
5549 if (ret < 0)
5550 goto err;
5551 }
15b71abe
JA
5552
5553 file = do_filp_open(req->open.dfd, req->open.filename, &op);
12dcb58a 5554 if (IS_ERR(file)) {
944d1444 5555 /*
12dcb58a
PB
5556 * We could hang on to this 'fd' on retrying, but seems like
5557 * marginal gain for something that is now known to be a slower
5558 * path. So just put it, and we'll get a new one when we retry.
944d1444 5559 */
b9445598
PB
5560 if (!fixed)
5561 put_unused_fd(ret);
3a81fd02 5562
15b71abe 5563 ret = PTR_ERR(file);
12dcb58a
PB
5564 /* only retry if RESOLVE_CACHED wasn't already set by application */
5565 if (ret == -EAGAIN &&
5566 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
5567 return -EAGAIN;
5568 goto err;
15b71abe 5569 }
12dcb58a
PB
5570
5571 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
5572 file->f_flags &= ~O_NONBLOCK;
5573 fsnotify_open(file);
b9445598
PB
5574
5575 if (!fixed)
5576 fd_install(ret, file);
5577 else
1339f24b
JA
5578 ret = io_fixed_fd_install(req, issue_flags, file,
5579 req->open.file_slot);
15b71abe
JA
5580err:
5581 putname(req->open.filename);
8fef80bf 5582 req->flags &= ~REQ_F_NEED_CLEANUP;
15b71abe 5583 if (ret < 0)
93d2bcd2 5584 req_set_fail(req);
0bdf3398 5585 __io_req_complete(req, issue_flags, ret, 0);
15b71abe
JA
5586 return 0;
5587}
5588
45d189c6 5589static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
cebdb986 5590{
e45cff58 5591 return io_openat2(req, issue_flags);
cebdb986
JA
5592}
5593
067524e9
JA
5594static int io_remove_buffers_prep(struct io_kiocb *req,
5595 const struct io_uring_sqe *sqe)
5596{
5597 struct io_provide_buf *p = &req->pbuf;
5598 u64 tmp;
5599
73911426 5600 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
26578cda 5601 sqe->splice_fd_in)
067524e9
JA
5602 return -EINVAL;
5603
5604 tmp = READ_ONCE(sqe->fd);
5605 if (!tmp || tmp > USHRT_MAX)
5606 return -EINVAL;
5607
5608 memset(p, 0, sizeof(*p));
5609 p->nbufs = tmp;
5610 p->bgid = READ_ONCE(sqe->buf_group);
5611 return 0;
5612}
5613
dbc7d452
JA
5614static int __io_remove_buffers(struct io_ring_ctx *ctx,
5615 struct io_buffer_list *bl, unsigned nbufs)
067524e9
JA
5616{
5617 unsigned i = 0;
5618
5619 /* shouldn't happen */
5620 if (!nbufs)
5621 return 0;
5622
c7fb1942
JA
5623 if (bl->buf_nr_pages) {
5624 int j;
5625
5626 i = bl->buf_ring->tail - bl->head;
5627 for (j = 0; j < bl->buf_nr_pages; j++)
5628 unpin_user_page(bl->buf_pages[j]);
5629 kvfree(bl->buf_pages);
5630 bl->buf_pages = NULL;
5631 bl->buf_nr_pages = 0;
1d0dbbfa
JA
5632 /* make sure it's seen as empty */
5633 INIT_LIST_HEAD(&bl->buf_list);
c7fb1942
JA
5634 return i;
5635 }
5636
067524e9 5637 /* the head kbuf is the list itself */
dbc7d452 5638 while (!list_empty(&bl->buf_list)) {
067524e9
JA
5639 struct io_buffer *nxt;
5640
dbc7d452 5641 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
067524e9 5642 list_del(&nxt->list);
067524e9
JA
5643 if (++i == nbufs)
5644 return i;
1d0254e6 5645 cond_resched();
067524e9
JA
5646 }
5647 i++;
067524e9
JA
5648
5649 return i;
5650}
5651
889fca73 5652static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
067524e9
JA
5653{
5654 struct io_provide_buf *p = &req->pbuf;
5655 struct io_ring_ctx *ctx = req->ctx;
dbc7d452 5656 struct io_buffer_list *bl;
067524e9 5657 int ret = 0;
067524e9 5658
f8929630 5659 io_ring_submit_lock(ctx, issue_flags);
067524e9
JA
5660
5661 ret = -ENOENT;
dbc7d452 5662 bl = io_buffer_get_list(ctx, p->bgid);
c7fb1942
JA
5663 if (bl) {
5664 ret = -EINVAL;
5665 /* can't use provide/remove buffers command on mapped buffers */
5666 if (!bl->buf_nr_pages)
5667 ret = __io_remove_buffers(ctx, bl, p->nbufs);
5668 }
067524e9 5669 if (ret < 0)
93d2bcd2 5670 req_set_fail(req);
067524e9 5671
9fb8cb49
PB
5672 /* complete before unlock, IOPOLL may need the lock */
5673 __io_req_complete(req, issue_flags, ret, 0);
f8929630 5674 io_ring_submit_unlock(ctx, issue_flags);
067524e9
JA
5675 return 0;
5676}
5677
ddf0322d
JA
5678static int io_provide_buffers_prep(struct io_kiocb *req,
5679 const struct io_uring_sqe *sqe)
5680{
38134ada 5681 unsigned long size, tmp_check;
ddf0322d
JA
5682 struct io_provide_buf *p = &req->pbuf;
5683 u64 tmp;
5684
73911426 5685 if (sqe->rw_flags || sqe->splice_fd_in)
ddf0322d
JA
5686 return -EINVAL;
5687
5688 tmp = READ_ONCE(sqe->fd);
5689 if (!tmp || tmp > USHRT_MAX)
5690 return -E2BIG;
5691 p->nbufs = tmp;
5692 p->addr = READ_ONCE(sqe->addr);
5693 p->len = READ_ONCE(sqe->len);
5694
38134ada
PB
5695 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
5696 &size))
5697 return -EOVERFLOW;
5698 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
5699 return -EOVERFLOW;
5700
d81269fe
PB
5701 size = (unsigned long)p->len * p->nbufs;
5702 if (!access_ok(u64_to_user_ptr(p->addr), size))
ddf0322d
JA
5703 return -EFAULT;
5704
5705 p->bgid = READ_ONCE(sqe->buf_group);
5706 tmp = READ_ONCE(sqe->off);
5707 if (tmp > USHRT_MAX)
5708 return -E2BIG;
5709 p->bid = tmp;
5710 return 0;
5711}
5712
cc3cec83
JA
5713static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
5714{
5715 struct io_buffer *buf;
5716 struct page *page;
5717 int bufs_in_page;
5718
5719 /*
5720 * Completions that don't happen inline (eg not under uring_lock) will
5721 * add to ->io_buffers_comp. If we don't have any free buffers, check
5722 * the completion list and splice those entries first.
5723 */
5724 if (!list_empty_careful(&ctx->io_buffers_comp)) {
5725 spin_lock(&ctx->completion_lock);
5726 if (!list_empty(&ctx->io_buffers_comp)) {
5727 list_splice_init(&ctx->io_buffers_comp,
5728 &ctx->io_buffers_cache);
5729 spin_unlock(&ctx->completion_lock);
5730 return 0;
5731 }
5732 spin_unlock(&ctx->completion_lock);
5733 }
5734
5735 /*
5736 * No free buffers and no completion entries either. Allocate a new
5737 * page worth of buffer entries and add those to our freelist.
5738 */
5739 page = alloc_page(GFP_KERNEL_ACCOUNT);
5740 if (!page)
5741 return -ENOMEM;
5742
5743 list_add(&page->lru, &ctx->io_buffers_pages);
5744
5745 buf = page_address(page);
5746 bufs_in_page = PAGE_SIZE / sizeof(*buf);
5747 while (bufs_in_page) {
5748 list_add_tail(&buf->list, &ctx->io_buffers_cache);
5749 buf++;
5750 bufs_in_page--;
5751 }
5752
5753 return 0;
5754}
5755
5756static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
dbc7d452 5757 struct io_buffer_list *bl)
ddf0322d
JA
5758{
5759 struct io_buffer *buf;
5760 u64 addr = pbuf->addr;
5761 int i, bid = pbuf->bid;
5762
5763 for (i = 0; i < pbuf->nbufs; i++) {
cc3cec83
JA
5764 if (list_empty(&ctx->io_buffers_cache) &&
5765 io_refill_buffer_cache(ctx))
ddf0322d 5766 break;
cc3cec83
JA
5767 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
5768 list);
dbc7d452 5769 list_move_tail(&buf->list, &bl->buf_list);
ddf0322d 5770 buf->addr = addr;
d1f82808 5771 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
ddf0322d 5772 buf->bid = bid;
b1c62645 5773 buf->bgid = pbuf->bgid;
ddf0322d
JA
5774 addr += pbuf->len;
5775 bid++;
f240762f 5776 cond_resched();
ddf0322d
JA
5777 }
5778
dbc7d452 5779 return i ? 0 : -ENOMEM;
ddf0322d
JA
5780}
5781
9cfc7e94
JA
5782static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
5783{
5784 int i;
5785
5786 ctx->io_bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list),
5787 GFP_KERNEL);
5788 if (!ctx->io_bl)
5789 return -ENOMEM;
5790
5791 for (i = 0; i < BGID_ARRAY; i++) {
5792 INIT_LIST_HEAD(&ctx->io_bl[i].buf_list);
5793 ctx->io_bl[i].bgid = i;
5794 }
5795
5796 return 0;
5797}
5798
889fca73 5799static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
ddf0322d
JA
5800{
5801 struct io_provide_buf *p = &req->pbuf;
5802 struct io_ring_ctx *ctx = req->ctx;
dbc7d452 5803 struct io_buffer_list *bl;
ddf0322d
JA
5804 int ret = 0;
5805
f8929630 5806 io_ring_submit_lock(ctx, issue_flags);
ddf0322d 5807
9cfc7e94
JA
5808 if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
5809 ret = io_init_bl_list(ctx);
5810 if (ret)
5811 goto err;
5812 }
ddf0322d 5813
dbc7d452
JA
5814 bl = io_buffer_get_list(ctx, p->bgid);
5815 if (unlikely(!bl)) {
c7fb1942 5816 bl = kzalloc(sizeof(*bl), GFP_KERNEL);
dbc7d452
JA
5817 if (!bl) {
5818 ret = -ENOMEM;
5819 goto err;
5820 }
1d0dbbfa 5821 INIT_LIST_HEAD(&bl->buf_list);
9cfc7e94
JA
5822 ret = io_buffer_add_list(ctx, bl, p->bgid);
5823 if (ret) {
5824 kfree(bl);
5825 goto err;
5826 }
ddf0322d 5827 }
c7fb1942
JA
5828 /* can't add buffers via this command for a mapped buffer ring */
5829 if (bl->buf_nr_pages) {
5830 ret = -EINVAL;
5831 goto err;
ddf0322d 5832 }
dbc7d452
JA
5833
5834 ret = io_add_buffers(ctx, p, bl);
5835err:
ddf0322d 5836 if (ret < 0)
93d2bcd2 5837 req_set_fail(req);
9fb8cb49
PB
5838 /* complete before unlock, IOPOLL may need the lock */
5839 __io_req_complete(req, issue_flags, ret, 0);
f8929630 5840 io_ring_submit_unlock(ctx, issue_flags);
ddf0322d 5841 return 0;
cebdb986
JA
5842}
5843
3e4827b0
JA
5844static int io_epoll_ctl_prep(struct io_kiocb *req,
5845 const struct io_uring_sqe *sqe)
5846{
5847#if defined(CONFIG_EPOLL)
73911426 5848 if (sqe->buf_index || sqe->splice_fd_in)
3232dd02 5849 return -EINVAL;
3e4827b0
JA
5850
5851 req->epoll.epfd = READ_ONCE(sqe->fd);
5852 req->epoll.op = READ_ONCE(sqe->len);
5853 req->epoll.fd = READ_ONCE(sqe->off);
5854
5855 if (ep_op_has_event(req->epoll.op)) {
5856 struct epoll_event __user *ev;
5857
5858 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
5859 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
5860 return -EFAULT;
5861 }
5862
5863 return 0;
5864#else
5865 return -EOPNOTSUPP;
5866#endif
5867}
5868
889fca73 5869static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
3e4827b0
JA
5870{
5871#if defined(CONFIG_EPOLL)
5872 struct io_epoll *ie = &req->epoll;
5873 int ret;
45d189c6 5874 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3e4827b0
JA
5875
5876 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
5877 if (force_nonblock && ret == -EAGAIN)
5878 return -EAGAIN;
5879
5880 if (ret < 0)
93d2bcd2 5881 req_set_fail(req);
889fca73 5882 __io_req_complete(req, issue_flags, ret, 0);
3e4827b0
JA
5883 return 0;
5884#else
5885 return -EOPNOTSUPP;
5886#endif
5887}
5888
c1ca757b
JA
5889static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5890{
5891#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
73911426 5892 if (sqe->buf_index || sqe->off || sqe->splice_fd_in)
3232dd02 5893 return -EINVAL;
c1ca757b
JA
5894
5895 req->madvise.addr = READ_ONCE(sqe->addr);
5896 req->madvise.len = READ_ONCE(sqe->len);
5897 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
5898 return 0;
5899#else
5900 return -EOPNOTSUPP;
5901#endif
5902}
5903
45d189c6 5904static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
c1ca757b
JA
5905{
5906#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
5907 struct io_madvise *ma = &req->madvise;
5908 int ret;
5909
45d189c6 5910 if (issue_flags & IO_URING_F_NONBLOCK)
c1ca757b
JA
5911 return -EAGAIN;
5912
0726b01e 5913 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
e1e16097 5914 io_req_complete(req, ret);
c1ca757b
JA
5915 return 0;
5916#else
5917 return -EOPNOTSUPP;
5918#endif
5919}
5920
4840e418
JA
5921static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5922{
73911426 5923 if (sqe->buf_index || sqe->addr || sqe->splice_fd_in)
3232dd02 5924 return -EINVAL;
4840e418
JA
5925
5926 req->fadvise.offset = READ_ONCE(sqe->off);
5927 req->fadvise.len = READ_ONCE(sqe->len);
5928 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
5929 return 0;
5930}
5931
45d189c6 5932static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
4840e418
JA
5933{
5934 struct io_fadvise *fa = &req->fadvise;
5935 int ret;
5936
45d189c6 5937 if (issue_flags & IO_URING_F_NONBLOCK) {
3e69426d
JA
5938 switch (fa->advice) {
5939 case POSIX_FADV_NORMAL:
5940 case POSIX_FADV_RANDOM:
5941 case POSIX_FADV_SEQUENTIAL:
5942 break;
5943 default:
5944 return -EAGAIN;
5945 }
5946 }
4840e418
JA
5947
5948 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
5949 if (ret < 0)
93d2bcd2 5950 req_set_fail(req);
0bdf3398 5951 __io_req_complete(req, issue_flags, ret, 0);
4840e418
JA
5952 return 0;
5953}
5954
eddc7ef5
JA
5955static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5956{
1b6fe6e0
SR
5957 const char __user *path;
5958
73911426 5959 if (sqe->buf_index || sqe->splice_fd_in)
eddc7ef5 5960 return -EINVAL;
9c280f90 5961 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 5962 return -EBADF;
eddc7ef5 5963
1d9e1288
BM
5964 req->statx.dfd = READ_ONCE(sqe->fd);
5965 req->statx.mask = READ_ONCE(sqe->len);
1b6fe6e0 5966 path = u64_to_user_ptr(READ_ONCE(sqe->addr));
1d9e1288
BM
5967 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
5968 req->statx.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5 5969
1b6fe6e0
SR
5970 req->statx.filename = getname_flags(path,
5971 getname_statx_lookup_flags(req->statx.flags),
5972 NULL);
5973
5974 if (IS_ERR(req->statx.filename)) {
5975 int ret = PTR_ERR(req->statx.filename);
5976
5977 req->statx.filename = NULL;
5978 return ret;
5979 }
5980
5981 req->flags |= REQ_F_NEED_CLEANUP;
eddc7ef5
JA
5982 return 0;
5983}
5984
45d189c6 5985static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
eddc7ef5 5986{
1d9e1288 5987 struct io_statx *ctx = &req->statx;
eddc7ef5
JA
5988 int ret;
5989
59d70013 5990 if (issue_flags & IO_URING_F_NONBLOCK)
eddc7ef5
JA
5991 return -EAGAIN;
5992
e62753e4
BM
5993 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
5994 ctx->buffer);
e1e16097 5995 io_req_complete(req, ret);
eddc7ef5
JA
5996 return 0;
5997}
5998
b5dba59e
JA
5999static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6000{
a7c41b46 6001 if (sqe->off || sqe->addr || sqe->len || sqe->buf_index)
b5dba59e 6002 return -EINVAL;
9c280f90 6003 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 6004 return -EBADF;
b5dba59e
JA
6005
6006 req->close.fd = READ_ONCE(sqe->fd);
7df778be 6007 req->close.file_slot = READ_ONCE(sqe->file_index);
a7c41b46
XW
6008 req->close.flags = READ_ONCE(sqe->close_flags);
6009 if (req->close.flags & ~IORING_CLOSE_FD_AND_FILE_SLOT)
6010 return -EINVAL;
6011 if (!(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT) &&
6012 req->close.file_slot && req->close.fd)
7df778be
PB
6013 return -EINVAL;
6014
b5dba59e 6015 return 0;
b5dba59e
JA
6016}
6017
889fca73 6018static int io_close(struct io_kiocb *req, unsigned int issue_flags)
b5dba59e 6019{
9eac1904 6020 struct files_struct *files = current->files;
3af73b28 6021 struct io_close *close = &req->close;
9eac1904 6022 struct fdtable *fdt;
40a19260 6023 struct file *file;
a1fde923 6024 int ret = -EBADF;
b5dba59e 6025
7df778be
PB
6026 if (req->close.file_slot) {
6027 ret = io_close_fixed(req, issue_flags);
a7c41b46
XW
6028 if (ret || !(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT))
6029 goto err;
7df778be
PB
6030 }
6031
9eac1904
JA
6032 spin_lock(&files->file_lock);
6033 fdt = files_fdtable(files);
6034 if (close->fd >= fdt->max_fds) {
6035 spin_unlock(&files->file_lock);
6036 goto err;
6037 }
0bf1dbee
CH
6038 file = rcu_dereference_protected(fdt->fd[close->fd],
6039 lockdep_is_held(&files->file_lock));
a1fde923 6040 if (!file || file->f_op == &io_uring_fops) {
9eac1904 6041 spin_unlock(&files->file_lock);
9eac1904 6042 goto err;
3af73b28 6043 }
b5dba59e
JA
6044
6045 /* if the file has a flush method, be safe and punt to async */
45d189c6 6046 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
9eac1904 6047 spin_unlock(&files->file_lock);
0bf0eefd 6048 return -EAGAIN;
a2100672 6049 }
b5dba59e 6050
6319194e 6051 file = __close_fd_get_file(close->fd);
9eac1904 6052 spin_unlock(&files->file_lock);
6319194e 6053 if (!file)
9eac1904 6054 goto err;
9eac1904 6055
3af73b28 6056 /* No ->flush() or already async, safely close from here */
9eac1904
JA
6057 ret = filp_close(file, current->files);
6058err:
3af73b28 6059 if (ret < 0)
93d2bcd2 6060 req_set_fail(req);
889fca73 6061 __io_req_complete(req, issue_flags, ret, 0);
1a417f4e 6062 return 0;
b5dba59e
JA
6063}
6064
1155c76a 6065static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4 6066{
73911426 6067 if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
5d17b4a4
JA
6068 return -EINVAL;
6069
8ed8d3c3
JA
6070 req->sync.off = READ_ONCE(sqe->off);
6071 req->sync.len = READ_ONCE(sqe->len);
6072 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
6073 return 0;
6074}
6075
45d189c6 6076static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3 6077{
8ed8d3c3
JA
6078 int ret;
6079
ac45abc0 6080 /* sync_file_range always requires a blocking context */
45d189c6 6081 if (issue_flags & IO_URING_F_NONBLOCK)
ac45abc0
PB
6082 return -EAGAIN;
6083
9adbd45d 6084 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3 6085 req->sync.flags);
e1e16097 6086 io_req_complete(req, ret);
5d17b4a4
JA
6087 return 0;
6088}
6089
469956e8 6090#if defined(CONFIG_NET)
1151a7cc
JA
6091static int io_shutdown_prep(struct io_kiocb *req,
6092 const struct io_uring_sqe *sqe)
6093{
6094 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
6095 sqe->buf_index || sqe->splice_fd_in))
6096 return -EINVAL;
6097
6098 req->shutdown.how = READ_ONCE(sqe->len);
6099 return 0;
6100}
6101
6102static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
6103{
6104 struct socket *sock;
6105 int ret;
6106
6107 if (issue_flags & IO_URING_F_NONBLOCK)
6108 return -EAGAIN;
6109
6110 sock = sock_from_file(req->file);
6111 if (unlikely(!sock))
6112 return -ENOTSOCK;
6113
6114 ret = __sys_shutdown_sock(sock, req->shutdown.how);
6115 io_req_complete(req, ret);
6116 return 0;
6117}
6118
4c3c0943
JA
6119static bool io_net_retry(struct socket *sock, int flags)
6120{
6121 if (!(flags & MSG_WAITALL))
6122 return false;
6123 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
6124}
6125
02d27d89
PB
6126static int io_setup_async_msg(struct io_kiocb *req,
6127 struct io_async_msghdr *kmsg)
6128{
e8c2bc1f
JA
6129 struct io_async_msghdr *async_msg = req->async_data;
6130
6131 if (async_msg)
02d27d89 6132 return -EAGAIN;
e8c2bc1f 6133 if (io_alloc_async_data(req)) {
257e84a5 6134 kfree(kmsg->free_iov);
02d27d89
PB
6135 return -ENOMEM;
6136 }
e8c2bc1f 6137 async_msg = req->async_data;
02d27d89 6138 req->flags |= REQ_F_NEED_CLEANUP;
e8c2bc1f 6139 memcpy(async_msg, kmsg, sizeof(*kmsg));
2a780802 6140 async_msg->msg.msg_name = &async_msg->addr;
257e84a5
PB
6141 /* if were using fast_iov, set it to the new one */
6142 if (!async_msg->free_iov)
6143 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
6144
02d27d89
PB
6145 return -EAGAIN;
6146}
6147
2ae523ed
PB
6148static int io_sendmsg_copy_hdr(struct io_kiocb *req,
6149 struct io_async_msghdr *iomsg)
6150{
2ae523ed 6151 iomsg->msg.msg_name = &iomsg->addr;
257e84a5 6152 iomsg->free_iov = iomsg->fast_iov;
2ae523ed 6153 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
257e84a5 6154 req->sr_msg.msg_flags, &iomsg->free_iov);
2ae523ed
PB
6155}
6156
93642ef8
PB
6157static int io_sendmsg_prep_async(struct io_kiocb *req)
6158{
6159 int ret;
6160
93642ef8
PB
6161 ret = io_sendmsg_copy_hdr(req, req->async_data);
6162 if (!ret)
6163 req->flags |= REQ_F_NEED_CLEANUP;
6164 return ret;
6165}
6166
3529d8c2 6167static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 6168{
e47293fd 6169 struct io_sr_msg *sr = &req->sr_msg;
03b1230c 6170
0455d4cc 6171 if (unlikely(sqe->file_index))
d2b6f48b 6172 return -EINVAL;
588faa1e
JA
6173 if (unlikely(sqe->addr2 || sqe->file_index))
6174 return -EINVAL;
d2b6f48b 6175
270a5940 6176 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 6177 sr->len = READ_ONCE(sqe->len);
0455d4cc
JA
6178 sr->flags = READ_ONCE(sqe->addr2);
6179 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
6180 return -EINVAL;
04411806
PB
6181 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
6182 if (sr->msg_flags & MSG_DONTWAIT)
6183 req->flags |= REQ_F_NOWAIT;
3529d8c2 6184
d8768362
JA
6185#ifdef CONFIG_COMPAT
6186 if (req->ctx->compat)
6187 sr->msg_flags |= MSG_CMSG_COMPAT;
6188#endif
4c3c0943 6189 sr->done_io = 0;
93642ef8 6190 return 0;
03b1230c
JA
6191}
6192
889fca73 6193static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
aa1fa28f 6194{
6b754c8b 6195 struct io_async_msghdr iomsg, *kmsg;
4c3c0943 6196 struct io_sr_msg *sr = &req->sr_msg;
0fa03c62 6197 struct socket *sock;
7a7cacba 6198 unsigned flags;
0031275d 6199 int min_ret = 0;
0fa03c62
JA
6200 int ret;
6201
dba4a925 6202 sock = sock_from_file(req->file);
7a7cacba 6203 if (unlikely(!sock))
dba4a925 6204 return -ENOTSOCK;
3529d8c2 6205
d886e185
PB
6206 if (req_has_async_data(req)) {
6207 kmsg = req->async_data;
6208 } else {
7a7cacba
PB
6209 ret = io_sendmsg_copy_hdr(req, &iomsg);
6210 if (ret)
6211 return ret;
6212 kmsg = &iomsg;
0fa03c62 6213 }
0fa03c62 6214
0455d4cc
JA
6215 if (!(req->flags & REQ_F_POLLED) &&
6216 (sr->flags & IORING_RECVSEND_POLL_FIRST))
6217 return io_setup_async_msg(req, kmsg);
6218
0a352aaa 6219 flags = sr->msg_flags;
04411806 6220 if (issue_flags & IO_URING_F_NONBLOCK)
7a7cacba 6221 flags |= MSG_DONTWAIT;
0031275d
SM
6222 if (flags & MSG_WAITALL)
6223 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
6224
7a7cacba 6225 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
0fa03c62 6226
7297ce3d
PB
6227 if (ret < min_ret) {
6228 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
6229 return io_setup_async_msg(req, kmsg);
6230 if (ret == -ERESTARTSYS)
6231 ret = -EINTR;
4c3c0943
JA
6232 if (ret > 0 && io_net_retry(sock, flags)) {
6233 sr->done_io += ret;
6234 req->flags |= REQ_F_PARTIAL_IO;
6235 return io_setup_async_msg(req, kmsg);
6236 }
7297ce3d
PB
6237 req_set_fail(req);
6238 }
257e84a5
PB
6239 /* fast path, check for non-NULL to avoid function call */
6240 if (kmsg->free_iov)
6241 kfree(kmsg->free_iov);
99bc4c38 6242 req->flags &= ~REQ_F_NEED_CLEANUP;
4c3c0943
JA
6243 if (ret >= 0)
6244 ret += sr->done_io;
6245 else if (sr->done_io)
6246 ret = sr->done_io;
889fca73 6247 __io_req_complete(req, issue_flags, ret, 0);
5d17b4a4 6248 return 0;
03b1230c 6249}
aa1fa28f 6250
889fca73 6251static int io_send(struct io_kiocb *req, unsigned int issue_flags)
fddaface 6252{
7a7cacba
PB
6253 struct io_sr_msg *sr = &req->sr_msg;
6254 struct msghdr msg;
6255 struct iovec iov;
fddaface 6256 struct socket *sock;
7a7cacba 6257 unsigned flags;
0031275d 6258 int min_ret = 0;
fddaface
JA
6259 int ret;
6260
0455d4cc
JA
6261 if (!(req->flags & REQ_F_POLLED) &&
6262 (sr->flags & IORING_RECVSEND_POLL_FIRST))
6263 return -EAGAIN;
6264
dba4a925 6265 sock = sock_from_file(req->file);
7a7cacba 6266 if (unlikely(!sock))
dba4a925 6267 return -ENOTSOCK;
fddaface 6268
7a7cacba
PB
6269 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
6270 if (unlikely(ret))
14db8411 6271 return ret;
fddaface 6272
7a7cacba
PB
6273 msg.msg_name = NULL;
6274 msg.msg_control = NULL;
6275 msg.msg_controllen = 0;
6276 msg.msg_namelen = 0;
fddaface 6277
0a352aaa 6278 flags = sr->msg_flags;
04411806 6279 if (issue_flags & IO_URING_F_NONBLOCK)
7a7cacba 6280 flags |= MSG_DONTWAIT;
0031275d
SM
6281 if (flags & MSG_WAITALL)
6282 min_ret = iov_iter_count(&msg.msg_iter);
6283
7a7cacba
PB
6284 msg.msg_flags = flags;
6285 ret = sock_sendmsg(sock, &msg);
7297ce3d
PB
6286 if (ret < min_ret) {
6287 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
6288 return -EAGAIN;
6289 if (ret == -ERESTARTSYS)
6290 ret = -EINTR;
4c3c0943
JA
6291 if (ret > 0 && io_net_retry(sock, flags)) {
6292 sr->len -= ret;
6293 sr->buf += ret;
6294 sr->done_io += ret;
6295 req->flags |= REQ_F_PARTIAL_IO;
6296 return -EAGAIN;
6297 }
93d2bcd2 6298 req_set_fail(req);
7297ce3d 6299 }
4c3c0943
JA
6300 if (ret >= 0)
6301 ret += sr->done_io;
6302 else if (sr->done_io)
6303 ret = sr->done_io;
889fca73 6304 __io_req_complete(req, issue_flags, ret, 0);
fddaface 6305 return 0;
fddaface
JA
6306}
6307
1400e697
PB
6308static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
6309 struct io_async_msghdr *iomsg)
52de1fe1
JA
6310{
6311 struct io_sr_msg *sr = &req->sr_msg;
6312 struct iovec __user *uiov;
6313 size_t iov_len;
6314 int ret;
6315
1400e697
PB
6316 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
6317 &iomsg->uaddr, &uiov, &iov_len);
52de1fe1
JA
6318 if (ret)
6319 return ret;
6320
6321 if (req->flags & REQ_F_BUFFER_SELECT) {
6322 if (iov_len > 1)
6323 return -EINVAL;
5476dfed 6324 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
52de1fe1 6325 return -EFAULT;
5476dfed 6326 sr->len = iomsg->fast_iov[0].iov_len;
257e84a5 6327 iomsg->free_iov = NULL;
52de1fe1 6328 } else {
257e84a5 6329 iomsg->free_iov = iomsg->fast_iov;
89cd35c5 6330 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
257e84a5 6331 &iomsg->free_iov, &iomsg->msg.msg_iter,
89cd35c5 6332 false);
52de1fe1
JA
6333 if (ret > 0)
6334 ret = 0;
6335 }
6336
6337 return ret;
6338}
6339
6340#ifdef CONFIG_COMPAT
6341static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
1400e697 6342 struct io_async_msghdr *iomsg)
52de1fe1 6343{
52de1fe1
JA
6344 struct io_sr_msg *sr = &req->sr_msg;
6345 struct compat_iovec __user *uiov;
6346 compat_uptr_t ptr;
6347 compat_size_t len;
6348 int ret;
6349
4af3417a
PB
6350 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
6351 &ptr, &len);
52de1fe1
JA
6352 if (ret)
6353 return ret;
6354
6355 uiov = compat_ptr(ptr);
6356 if (req->flags & REQ_F_BUFFER_SELECT) {
6357 compat_ssize_t clen;
6358
6359 if (len > 1)
6360 return -EINVAL;
6361 if (!access_ok(uiov, sizeof(*uiov)))
6362 return -EFAULT;
6363 if (__get_user(clen, &uiov->iov_len))
6364 return -EFAULT;
6365 if (clen < 0)
6366 return -EINVAL;
2d280bc8 6367 sr->len = clen;
257e84a5 6368 iomsg->free_iov = NULL;
52de1fe1 6369 } else {
257e84a5 6370 iomsg->free_iov = iomsg->fast_iov;
89cd35c5 6371 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
257e84a5 6372 UIO_FASTIOV, &iomsg->free_iov,
89cd35c5 6373 &iomsg->msg.msg_iter, true);
52de1fe1
JA
6374 if (ret < 0)
6375 return ret;
6376 }
6377
6378 return 0;
6379}
6380#endif
6381
1400e697
PB
6382static int io_recvmsg_copy_hdr(struct io_kiocb *req,
6383 struct io_async_msghdr *iomsg)
52de1fe1 6384{
1400e697 6385 iomsg->msg.msg_name = &iomsg->addr;
52de1fe1
JA
6386
6387#ifdef CONFIG_COMPAT
6388 if (req->ctx->compat)
1400e697 6389 return __io_compat_recvmsg_copy_hdr(req, iomsg);
fddaface 6390#endif
52de1fe1 6391
1400e697 6392 return __io_recvmsg_copy_hdr(req, iomsg);
52de1fe1
JA
6393}
6394
93642ef8 6395static int io_recvmsg_prep_async(struct io_kiocb *req)
aa1fa28f 6396{
99bc4c38 6397 int ret;
3529d8c2 6398
93642ef8
PB
6399 ret = io_recvmsg_copy_hdr(req, req->async_data);
6400 if (!ret)
6401 req->flags |= REQ_F_NEED_CLEANUP;
6402 return ret;
6403}
6404
6405static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6406{
6407 struct io_sr_msg *sr = &req->sr_msg;
6408
0455d4cc 6409 if (unlikely(sqe->file_index))
d2b6f48b 6410 return -EINVAL;
5a1e99b6
JA
6411 if (unlikely(sqe->addr2 || sqe->file_index))
6412 return -EINVAL;
d2b6f48b 6413
270a5940 6414 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 6415 sr->len = READ_ONCE(sqe->len);
0455d4cc
JA
6416 sr->flags = READ_ONCE(sqe->addr2);
6417 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
6418 return -EINVAL;
04411806
PB
6419 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
6420 if (sr->msg_flags & MSG_DONTWAIT)
6421 req->flags |= REQ_F_NOWAIT;
06b76d44 6422
d8768362
JA
6423#ifdef CONFIG_COMPAT
6424 if (req->ctx->compat)
6425 sr->msg_flags |= MSG_CMSG_COMPAT;
6426#endif
7ba89d2a 6427 sr->done_io = 0;
93642ef8 6428 return 0;
aa1fa28f
JA
6429}
6430
889fca73 6431static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
aa1fa28f 6432{
6b754c8b 6433 struct io_async_msghdr iomsg, *kmsg;
7ba89d2a 6434 struct io_sr_msg *sr = &req->sr_msg;
03b1230c 6435 struct socket *sock;
f548a12e 6436 unsigned int cflags;
7a7cacba 6437 unsigned flags;
d1fd1c20 6438 int ret, min_ret = 0;
45d189c6 6439 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
03b1230c 6440
dba4a925 6441 sock = sock_from_file(req->file);
7a7cacba 6442 if (unlikely(!sock))
dba4a925 6443 return -ENOTSOCK;
3529d8c2 6444
d886e185
PB
6445 if (req_has_async_data(req)) {
6446 kmsg = req->async_data;
6447 } else {
7a7cacba
PB
6448 ret = io_recvmsg_copy_hdr(req, &iomsg);
6449 if (ret)
681fda8d 6450 return ret;
7a7cacba
PB
6451 kmsg = &iomsg;
6452 }
03b1230c 6453
0455d4cc
JA
6454 if (!(req->flags & REQ_F_POLLED) &&
6455 (sr->flags & IORING_RECVSEND_POLL_FIRST))
6456 return io_setup_async_msg(req, kmsg);
6457
b66e65f4 6458 if (io_do_buffer_select(req)) {
c54d52c2
JA
6459 void __user *buf;
6460
4e906702 6461 buf = io_buffer_select(req, &sr->len, issue_flags);
984824db
CH
6462 if (!buf)
6463 return -ENOBUFS;
c54d52c2 6464 kmsg->fast_iov[0].iov_base = buf;
0a352aaa
JA
6465 kmsg->fast_iov[0].iov_len = sr->len;
6466 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
6467 sr->len);
7a7cacba 6468 }
52de1fe1 6469
0a352aaa 6470 flags = sr->msg_flags;
04411806 6471 if (force_nonblock)
7a7cacba 6472 flags |= MSG_DONTWAIT;
0031275d
SM
6473 if (flags & MSG_WAITALL)
6474 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
6475
f548a12e 6476 kmsg->msg.msg_get_inq = 1;
0a352aaa 6477 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, kmsg->uaddr, flags);
7297ce3d
PB
6478 if (ret < min_ret) {
6479 if (ret == -EAGAIN && force_nonblock)
6480 return io_setup_async_msg(req, kmsg);
6481 if (ret == -ERESTARTSYS)
6482 ret = -EINTR;
7ba89d2a
JA
6483 if (ret > 0 && io_net_retry(sock, flags)) {
6484 sr->done_io += ret;
8a3e8ee5 6485 req->flags |= REQ_F_PARTIAL_IO;
7ba89d2a
JA
6486 return io_setup_async_msg(req, kmsg);
6487 }
7297ce3d
PB
6488 req_set_fail(req);
6489 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
6490 req_set_fail(req);
6491 }
03b1230c 6492
257e84a5
PB
6493 /* fast path, check for non-NULL to avoid function call */
6494 if (kmsg->free_iov)
6495 kfree(kmsg->free_iov);
99bc4c38 6496 req->flags &= ~REQ_F_NEED_CLEANUP;
7ba89d2a
JA
6497 if (ret >= 0)
6498 ret += sr->done_io;
6499 else if (sr->done_io)
6500 ret = sr->done_io;
f548a12e
JA
6501 cflags = io_put_kbuf(req, issue_flags);
6502 if (kmsg->msg.msg_inq)
6503 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
6504 __io_req_complete(req, issue_flags, ret, cflags);
03b1230c 6505 return 0;
0fa03c62 6506}
5d17b4a4 6507
889fca73 6508static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
fddaface 6509{
7a7cacba
PB
6510 struct io_sr_msg *sr = &req->sr_msg;
6511 struct msghdr msg;
fddaface 6512 struct socket *sock;
7a7cacba 6513 struct iovec iov;
f548a12e 6514 unsigned int cflags;
7a7cacba 6515 unsigned flags;
d1fd1c20 6516 int ret, min_ret = 0;
45d189c6 6517 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
fddaface 6518
0455d4cc
JA
6519 if (!(req->flags & REQ_F_POLLED) &&
6520 (sr->flags & IORING_RECVSEND_POLL_FIRST))
6521 return -EAGAIN;
6522
dba4a925 6523 sock = sock_from_file(req->file);
7a7cacba 6524 if (unlikely(!sock))
dba4a925 6525 return -ENOTSOCK;
fddaface 6526
b66e65f4 6527 if (io_do_buffer_select(req)) {
c54d52c2
JA
6528 void __user *buf;
6529
4e906702 6530 buf = io_buffer_select(req, &sr->len, issue_flags);
984824db
CH
6531 if (!buf)
6532 return -ENOBUFS;
c54d52c2 6533 sr->buf = buf;
bc02ef33 6534 }
bcda7baa 6535
c54d52c2 6536 ret = import_single_range(READ, sr->buf, sr->len, &iov, &msg.msg_iter);
14c32eee
PB
6537 if (unlikely(ret))
6538 goto out_free;
fddaface 6539
7a7cacba 6540 msg.msg_name = NULL;
f548a12e 6541 msg.msg_namelen = 0;
7a7cacba 6542 msg.msg_control = NULL;
f548a12e
JA
6543 msg.msg_get_inq = 1;
6544 msg.msg_flags = 0;
7a7cacba 6545 msg.msg_controllen = 0;
7a7cacba 6546 msg.msg_iocb = NULL;
fddaface 6547
0a352aaa 6548 flags = sr->msg_flags;
04411806 6549 if (force_nonblock)
7a7cacba 6550 flags |= MSG_DONTWAIT;
0031275d
SM
6551 if (flags & MSG_WAITALL)
6552 min_ret = iov_iter_count(&msg.msg_iter);
6553
7a7cacba 6554 ret = sock_recvmsg(sock, &msg, flags);
7297ce3d
PB
6555 if (ret < min_ret) {
6556 if (ret == -EAGAIN && force_nonblock)
6557 return -EAGAIN;
6558 if (ret == -ERESTARTSYS)
6559 ret = -EINTR;
7ba89d2a
JA
6560 if (ret > 0 && io_net_retry(sock, flags)) {
6561 sr->len -= ret;
6562 sr->buf += ret;
6563 sr->done_io += ret;
8a3e8ee5 6564 req->flags |= REQ_F_PARTIAL_IO;
7ba89d2a
JA
6565 return -EAGAIN;
6566 }
7297ce3d
PB
6567 req_set_fail(req);
6568 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
0d7c1153 6569out_free:
93d2bcd2 6570 req_set_fail(req);
7297ce3d 6571 }
cc3cec83 6572
7ba89d2a
JA
6573 if (ret >= 0)
6574 ret += sr->done_io;
6575 else if (sr->done_io)
6576 ret = sr->done_io;
f548a12e
JA
6577 cflags = io_put_kbuf(req, issue_flags);
6578 if (msg.msg_inq)
6579 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
6580 __io_req_complete(req, issue_flags, ret, cflags);
fddaface 6581 return 0;
fddaface
JA
6582}
6583
3529d8c2 6584static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35 6585{
8ed8d3c3 6586 struct io_accept *accept = &req->accept;
4e86a2c9 6587 unsigned flags;
8ed8d3c3 6588
73911426 6589 if (sqe->len || sqe->buf_index)
17f2fe35
JA
6590 return -EINVAL;
6591
d55e5f5b
JA
6592 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
6593 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 6594 accept->flags = READ_ONCE(sqe->accept_flags);
09952e3e 6595 accept->nofile = rlimit(RLIMIT_NOFILE);
4e86a2c9
HX
6596 flags = READ_ONCE(sqe->ioprio);
6597 if (flags & ~IORING_ACCEPT_MULTISHOT)
6598 return -EINVAL;
a7083ad5 6599
aaa4db12 6600 accept->file_slot = READ_ONCE(sqe->file_index);
4e86a2c9
HX
6601 if (accept->file_slot) {
6602 if (accept->flags & SOCK_CLOEXEC)
6603 return -EINVAL;
6604 if (flags & IORING_ACCEPT_MULTISHOT &&
6605 accept->file_slot != IORING_FILE_INDEX_ALLOC)
6606 return -EINVAL;
6607 }
a7083ad5
PB
6608 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
6609 return -EINVAL;
6610 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
6611 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
4e86a2c9
HX
6612 if (flags & IORING_ACCEPT_MULTISHOT)
6613 req->flags |= REQ_F_APOLL_MULTISHOT;
8ed8d3c3 6614 return 0;
8ed8d3c3 6615}
17f2fe35 6616
889fca73 6617static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3 6618{
4e86a2c9 6619 struct io_ring_ctx *ctx = req->ctx;
8ed8d3c3 6620 struct io_accept *accept = &req->accept;
45d189c6 6621 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ac45abc0 6622 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
aaa4db12 6623 bool fixed = !!accept->file_slot;
a7083ad5
PB
6624 struct file *file;
6625 int ret, fd;
8ed8d3c3 6626
4e86a2c9 6627retry:
aaa4db12
PB
6628 if (!fixed) {
6629 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
6630 if (unlikely(fd < 0))
6631 return fd;
6632 }
a7083ad5
PB
6633 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
6634 accept->flags);
6635 if (IS_ERR(file)) {
aaa4db12
PB
6636 if (!fixed)
6637 put_unused_fd(fd);
a7083ad5 6638 ret = PTR_ERR(file);
4e86a2c9
HX
6639 if (ret == -EAGAIN && force_nonblock) {
6640 /*
6641 * if it's multishot and polled, we don't need to
6642 * return EAGAIN to arm the poll infra since it
6643 * has already been done
6644 */
6645 if ((req->flags & IO_APOLL_MULTI_POLLED) ==
6646 IO_APOLL_MULTI_POLLED)
6647 ret = 0;
6648 return ret;
6649 }
ac45abc0
PB
6650 if (ret == -ERESTARTSYS)
6651 ret = -EINTR;
93d2bcd2 6652 req_set_fail(req);
aaa4db12 6653 } else if (!fixed) {
a7083ad5
PB
6654 fd_install(fd, file);
6655 ret = fd;
aaa4db12 6656 } else {
c30c3e00
JA
6657 ret = io_fixed_fd_install(req, issue_flags, file,
6658 accept->file_slot);
ac45abc0 6659 }
4e86a2c9
HX
6660
6661 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
6662 __io_req_complete(req, issue_flags, ret, 0);
6663 return 0;
6664 }
6665 if (ret >= 0) {
6666 bool filled;
6667
6668 spin_lock(&ctx->completion_lock);
6669 filled = io_fill_cqe_aux(ctx, req->cqe.user_data, ret,
6670 IORING_CQE_F_MORE);
6671 io_commit_cqring(ctx);
6672 spin_unlock(&ctx->completion_lock);
6673 if (filled) {
6674 io_cqring_ev_posted(ctx);
6675 goto retry;
6676 }
6677 ret = -ECANCELED;
6678 }
6679
6680 return ret;
8ed8d3c3
JA
6681}
6682
1374e08e
JA
6683static int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6684{
6685 struct io_socket *sock = &req->sock;
6686
ee692a21 6687 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1374e08e
JA
6688 return -EINVAL;
6689
6690 sock->domain = READ_ONCE(sqe->fd);
6691 sock->type = READ_ONCE(sqe->off);
6692 sock->protocol = READ_ONCE(sqe->len);
6693 sock->file_slot = READ_ONCE(sqe->file_index);
6694 sock->nofile = rlimit(RLIMIT_NOFILE);
6695
6696 sock->flags = sock->type & ~SOCK_TYPE_MASK;
6697 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
6698 return -EINVAL;
6699 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
6700 return -EINVAL;
6701 return 0;
6702}
6703
6704static int io_socket(struct io_kiocb *req, unsigned int issue_flags)
6705{
6706 struct io_socket *sock = &req->sock;
6707 bool fixed = !!sock->file_slot;
6708 struct file *file;
6709 int ret, fd;
6710
6711 if (!fixed) {
6712 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
6713 if (unlikely(fd < 0))
6714 return fd;
6715 }
6716 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
6717 if (IS_ERR(file)) {
6718 if (!fixed)
6719 put_unused_fd(fd);
6720 ret = PTR_ERR(file);
6721 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
6722 return -EAGAIN;
6723 if (ret == -ERESTARTSYS)
6724 ret = -EINTR;
6725 req_set_fail(req);
6726 } else if (!fixed) {
6727 fd_install(fd, file);
6728 ret = fd;
6729 } else {
fa82dd10
JA
6730 ret = io_fixed_fd_install(req, issue_flags, file,
6731 sock->file_slot);
1374e08e
JA
6732 }
6733 __io_req_complete(req, issue_flags, ret, 0);
6734 return 0;
6735}
6736
93642ef8
PB
6737static int io_connect_prep_async(struct io_kiocb *req)
6738{
6739 struct io_async_connect *io = req->async_data;
6740 struct io_connect *conn = &req->connect;
6741
6742 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
6743}
6744
3529d8c2 6745static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021 6746{
3529d8c2 6747 struct io_connect *conn = &req->connect;
f499a021 6748
73911426 6749 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
3fbb51c1
JA
6750 return -EINVAL;
6751
3529d8c2
JA
6752 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
6753 conn->addr_len = READ_ONCE(sqe->addr2);
93642ef8 6754 return 0;
f499a021
JA
6755}
6756
889fca73 6757static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
f8e85cf2 6758{
e8c2bc1f 6759 struct io_async_connect __io, *io;
f8e85cf2 6760 unsigned file_flags;
3fbb51c1 6761 int ret;
45d189c6 6762 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
f8e85cf2 6763
d886e185 6764 if (req_has_async_data(req)) {
e8c2bc1f 6765 io = req->async_data;
f499a021 6766 } else {
3529d8c2
JA
6767 ret = move_addr_to_kernel(req->connect.addr,
6768 req->connect.addr_len,
e8c2bc1f 6769 &__io.address);
f499a021
JA
6770 if (ret)
6771 goto out;
6772 io = &__io;
6773 }
6774
3fbb51c1
JA
6775 file_flags = force_nonblock ? O_NONBLOCK : 0;
6776
e8c2bc1f 6777 ret = __sys_connect_file(req->file, &io->address,
3fbb51c1 6778 req->connect.addr_len, file_flags);
87f80d62 6779 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
d886e185 6780 if (req_has_async_data(req))
b7bb4f7d 6781 return -EAGAIN;
e8c2bc1f 6782 if (io_alloc_async_data(req)) {
f499a021
JA
6783 ret = -ENOMEM;
6784 goto out;
6785 }
e8c2bc1f 6786 memcpy(req->async_data, &__io, sizeof(__io));
f8e85cf2 6787 return -EAGAIN;
f499a021 6788 }
f8e85cf2
JA
6789 if (ret == -ERESTARTSYS)
6790 ret = -EINTR;
f499a021 6791out:
4e88d6e7 6792 if (ret < 0)
93d2bcd2 6793 req_set_fail(req);
889fca73 6794 __io_req_complete(req, issue_flags, ret, 0);
f8e85cf2 6795 return 0;
469956e8
Y
6796}
6797#else /* !CONFIG_NET */
99a10081
JA
6798#define IO_NETOP_FN(op) \
6799static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
6800{ \
6801 return -EOPNOTSUPP; \
6802}
6803
6804#define IO_NETOP_PREP(op) \
6805IO_NETOP_FN(op) \
6806static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
6807{ \
6808 return -EOPNOTSUPP; \
6809} \
6810
6811#define IO_NETOP_PREP_ASYNC(op) \
6812IO_NETOP_PREP(op) \
6813static int io_##op##_prep_async(struct io_kiocb *req) \
6814{ \
6815 return -EOPNOTSUPP; \
6816}
6817
6818IO_NETOP_PREP_ASYNC(sendmsg);
6819IO_NETOP_PREP_ASYNC(recvmsg);
6820IO_NETOP_PREP_ASYNC(connect);
6821IO_NETOP_PREP(accept);
1374e08e 6822IO_NETOP_PREP(socket);
1151a7cc 6823IO_NETOP_PREP(shutdown);
99a10081
JA
6824IO_NETOP_FN(send);
6825IO_NETOP_FN(recv);
469956e8 6826#endif /* CONFIG_NET */
f8e85cf2 6827
d7718a9d
JA
6828struct io_poll_table {
6829 struct poll_table_struct pt;
6830 struct io_kiocb *req;
68b11e8b 6831 int nr_entries;
d7718a9d
JA
6832 int error;
6833};
ce593a6c 6834
aa43477b 6835#define IO_POLL_CANCEL_FLAG BIT(31)
e2c0cb7c 6836#define IO_POLL_REF_MASK GENMASK(30, 0)
6d816e08 6837
aa43477b
PB
6838/*
6839 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
6840 * bump it and acquire ownership. It's disallowed to modify requests while not
6841 * owning it, that prevents from races for enqueueing task_work's and b/w
6842 * arming poll and wakeups.
6843 */
6844static inline bool io_poll_get_ownership(struct io_kiocb *req)
6845{
6846 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
d7718a9d
JA
6847}
6848
aa43477b 6849static void io_poll_mark_cancelled(struct io_kiocb *req)
74ce6ce4 6850{
aa43477b 6851 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
74ce6ce4
JA
6852}
6853
d4e7cd36 6854static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
18bceab1 6855{
e8c2bc1f 6856 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
d4e7cd36 6857 if (req->opcode == IORING_OP_POLL_ADD)
e8c2bc1f 6858 return req->async_data;
d4e7cd36
JA
6859 return req->apoll->double_poll;
6860}
6861
6862static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
6863{
6864 if (req->opcode == IORING_OP_POLL_ADD)
6865 return &req->poll;
6866 return &req->apoll->poll;
6867}
6868
5641897a 6869static void io_poll_req_insert(struct io_kiocb *req)
d4e7cd36 6870{
5641897a
PB
6871 struct io_ring_ctx *ctx = req->ctx;
6872 struct hlist_head *list;
18bceab1 6873
cef216fc 6874 list = &ctx->cancel_hash[hash_long(req->cqe.user_data, ctx->cancel_hash_bits)];
5641897a 6875 hlist_add_head(&req->hash_node, list);
18bceab1
JA
6876}
6877
5641897a
PB
6878static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
6879 wait_queue_func_t wake_func)
18bceab1 6880{
5641897a 6881 poll->head = NULL;
5641897a
PB
6882#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
6883 /* mask in events that we always want/need */
6884 poll->events = events | IO_POLL_UNMASK;
6885 INIT_LIST_HEAD(&poll->wait.entry);
6886 init_waitqueue_func_entry(&poll->wait, wake_func);
18bceab1
JA
6887}
6888
aa43477b 6889static inline void io_poll_remove_entry(struct io_poll_iocb *poll)
18bceab1 6890{
791f3465 6891 struct wait_queue_head *head = smp_load_acquire(&poll->head);
18bceab1 6892
791f3465
PB
6893 if (head) {
6894 spin_lock_irq(&head->lock);
6895 list_del_init(&poll->wait.entry);
6896 poll->head = NULL;
6897 spin_unlock_irq(&head->lock);
6898 }
aa43477b 6899}
18bceab1 6900
aa43477b
PB
6901static void io_poll_remove_entries(struct io_kiocb *req)
6902{
91eac1c6
JA
6903 /*
6904 * Nothing to do if neither of those flags are set. Avoid dipping
6905 * into the poll/apoll/double cachelines if we can.
6906 */
6907 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
6908 return;
18bceab1 6909
791f3465
PB
6910 /*
6911 * While we hold the waitqueue lock and the waitqueue is nonempty,
6912 * wake_up_pollfree() will wait for us. However, taking the waitqueue
6913 * lock in the first place can race with the waitqueue being freed.
6914 *
6915 * We solve this as eventpoll does: by taking advantage of the fact that
6916 * all users of wake_up_pollfree() will RCU-delay the actual free. If
6917 * we enter rcu_read_lock() and see that the pointer to the queue is
6918 * non-NULL, we can then lock it without the memory being freed out from
6919 * under us.
6920 *
6921 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
6922 * case the caller deletes the entry from the queue, leaving it empty.
6923 * In that case, only RCU prevents the queue memory from being freed.
6924 */
6925 rcu_read_lock();
91eac1c6
JA
6926 if (req->flags & REQ_F_SINGLE_POLL)
6927 io_poll_remove_entry(io_poll_get_single(req));
6928 if (req->flags & REQ_F_DOUBLE_POLL)
6929 io_poll_remove_entry(io_poll_get_double(req));
791f3465 6930 rcu_read_unlock();
18bceab1
JA
6931}
6932
dbc2564c 6933static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags);
aa43477b
PB
6934/*
6935 * All poll tw should go through this. Checks for poll events, manages
6936 * references, does rewait, etc.
6937 *
6938 * Returns a negative error on failure. >0 when no action require, which is
6939 * either spurious wakeup or multishot CQE is served. 0 when it's done with
cef216fc 6940 * the request, then the mask is stored in req->cqe.res.
aa43477b 6941 */
dbc2564c 6942static int io_poll_check_events(struct io_kiocb *req, bool *locked)
18bceab1 6943{
74ce6ce4 6944 struct io_ring_ctx *ctx = req->ctx;
dbc2564c 6945 int v, ret;
18bceab1 6946
316319e8 6947 /* req->task == current here, checking PF_EXITING is safe */
e09ee510 6948 if (unlikely(req->task->flags & PF_EXITING))
f2219057 6949 return -ECANCELED;
18bceab1 6950
aa43477b
PB
6951 do {
6952 v = atomic_read(&req->poll_refs);
74ce6ce4 6953
aa43477b
PB
6954 /* tw handler should be the owner, and so have some references */
6955 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
6956 return 0;
6957 if (v & IO_POLL_CANCEL_FLAG)
6958 return -ECANCELED;
8706e04e 6959
cef216fc 6960 if (!req->cqe.res) {
2804ecd8 6961 struct poll_table_struct pt = { ._key = req->apoll_events };
cef216fc 6962 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
c8b5e260 6963 }
74ce6ce4 6964
dbc2564c
HX
6965 if ((unlikely(!req->cqe.res)))
6966 continue;
6967 if (req->apoll_events & EPOLLONESHOT)
6968 return 0;
6969
6970 /* multishot, just fill a CQE and proceed */
6971 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
6972 __poll_t mask = mangle_poll(req->cqe.res &
6973 req->apoll_events);
aa43477b 6974 bool filled;
18bceab1 6975
aa43477b 6976 spin_lock(&ctx->completion_lock);
dbc2564c
HX
6977 filled = io_fill_cqe_aux(ctx, req->cqe.user_data,
6978 mask, IORING_CQE_F_MORE);
aa43477b
PB
6979 io_commit_cqring(ctx);
6980 spin_unlock(&ctx->completion_lock);
dbc2564c
HX
6981 if (filled) {
6982 io_cqring_ev_posted(ctx);
6983 continue;
6984 }
6985 return -ECANCELED;
aa43477b 6986 }
18bceab1 6987
dbc2564c
HX
6988 io_tw_lock(req->ctx, locked);
6989 if (unlikely(req->task->flags & PF_EXITING))
6990 return -EFAULT;
6991 ret = io_issue_sqe(req,
6992 IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
6993 if (ret)
6994 return ret;
6995
aa43477b
PB
6996 /*
6997 * Release all references, retry if someone tried to restart
6998 * task_work while we were executing it.
6999 */
7000 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
18bceab1 7001
18bceab1
JA
7002 return 1;
7003}
7004
aa43477b 7005static void io_poll_task_func(struct io_kiocb *req, bool *locked)
18bceab1 7006{
18bceab1 7007 struct io_ring_ctx *ctx = req->ctx;
aa43477b 7008 int ret;
18bceab1 7009
dbc2564c 7010 ret = io_poll_check_events(req, locked);
aa43477b
PB
7011 if (ret > 0)
7012 return;
7013
7014 if (!ret) {
cef216fc 7015 req->cqe.res = mangle_poll(req->cqe.res & req->poll.events);
e27414be 7016 } else {
cef216fc 7017 req->cqe.res = ret;
aa43477b 7018 req_set_fail(req);
a62682f9 7019 }
aa43477b
PB
7020
7021 io_poll_remove_entries(req);
7022 spin_lock(&ctx->completion_lock);
7023 hash_del(&req->hash_node);
cef216fc 7024 __io_req_complete_post(req, req->cqe.res, 0);
aa43477b
PB
7025 io_commit_cqring(ctx);
7026 spin_unlock(&ctx->completion_lock);
7027 io_cqring_ev_posted(ctx);
18bceab1
JA
7028}
7029
aa43477b 7030static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
18bceab1
JA
7031{
7032 struct io_ring_ctx *ctx = req->ctx;
aa43477b 7033 int ret;
18bceab1 7034
dbc2564c 7035 ret = io_poll_check_events(req, locked);
aa43477b
PB
7036 if (ret > 0)
7037 return;
18bceab1 7038
aa43477b
PB
7039 io_poll_remove_entries(req);
7040 spin_lock(&ctx->completion_lock);
7041 hash_del(&req->hash_node);
7042 spin_unlock(&ctx->completion_lock);
18bceab1 7043
aa43477b
PB
7044 if (!ret)
7045 io_req_task_submit(req, locked);
7046 else
7047 io_req_complete_failed(req, ret);
18bceab1
JA
7048}
7049
58f5c8d3 7050static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events)
aa43477b 7051{
cef216fc 7052 req->cqe.res = mask;
81459350
JA
7053 /*
7054 * This is useful for poll that is armed on behalf of another
7055 * request, and where the wakeup path could be on a different
7056 * CPU. We want to avoid pulling in req->apoll->events for that
7057 * case.
7058 */
2804ecd8 7059 req->apoll_events = events;
aa43477b
PB
7060 if (req->opcode == IORING_OP_POLL_ADD)
7061 req->io_task_work.func = io_poll_task_func;
7062 else
7063 req->io_task_work.func = io_apoll_task_func;
7064
cef216fc 7065 trace_io_uring_task_add(req->ctx, req, req->cqe.user_data, req->opcode, mask);
3fe07bcd 7066 io_req_task_work_add(req);
aa43477b
PB
7067}
7068
58f5c8d3
CH
7069static inline void io_poll_execute(struct io_kiocb *req, int res,
7070 __poll_t events)
aa43477b
PB
7071{
7072 if (io_poll_get_ownership(req))
81459350 7073 __io_poll_execute(req, res, events);
aa43477b
PB
7074}
7075
7076static void io_poll_cancel_req(struct io_kiocb *req)
7077{
7078 io_poll_mark_cancelled(req);
7079 /* kick tw, which should complete the request */
81459350 7080 io_poll_execute(req, 0, 0);
aa43477b
PB
7081}
7082
d89a4fac
JA
7083#define wqe_to_req(wait) ((void *)((unsigned long) (wait)->private & ~1))
7084#define wqe_is_double(wait) ((unsigned long) (wait)->private & 1)
a294bef5 7085#define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
d89a4fac 7086
aa43477b
PB
7087static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
7088 void *key)
18bceab1 7089{
d89a4fac 7090 struct io_kiocb *req = wqe_to_req(wait);
aa43477b
PB
7091 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
7092 wait);
18bceab1
JA
7093 __poll_t mask = key_to_poll(key);
7094
791f3465
PB
7095 if (unlikely(mask & POLLFREE)) {
7096 io_poll_mark_cancelled(req);
7097 /* we have to kick tw in case it's not already */
81459350 7098 io_poll_execute(req, 0, poll->events);
791f3465
PB
7099
7100 /*
7101 * If the waitqueue is being freed early but someone is already
7102 * holds ownership over it, we have to tear down the request as
7103 * best we can. That means immediately removing the request from
7104 * its waitqueue and preventing all further accesses to the
7105 * waitqueue via the request.
7106 */
7107 list_del_init(&poll->wait.entry);
7108
7109 /*
7110 * Careful: this *must* be the last step, since as soon
7111 * as req->head is NULL'ed out, the request can be
7112 * completed and freed, since aio_poll_complete_work()
7113 * will no longer need to take the waitqueue lock.
7114 */
7115 smp_store_release(&poll->head, NULL);
7116 return 1;
7117 }
7118
aa43477b 7119 /* for instances that support it check for an event match first */
1b1d7b4b 7120 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
18bceab1
JA
7121 return 0;
7122
eb0089d6
PB
7123 if (io_poll_get_ownership(req)) {
7124 /* optional, saves extra locking for removal in tw handler */
7125 if (mask && poll->events & EPOLLONESHOT) {
7126 list_del_init(&poll->wait.entry);
7127 poll->head = NULL;
d89a4fac
JA
7128 if (wqe_is_double(wait))
7129 req->flags &= ~REQ_F_DOUBLE_POLL;
7130 else
7131 req->flags &= ~REQ_F_SINGLE_POLL;
eb0089d6 7132 }
81459350 7133 __io_poll_execute(req, mask, poll->events);
eb0089d6 7134 }
18bceab1 7135 return 1;
18bceab1
JA
7136}
7137
7138static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
807abcb0
JA
7139 struct wait_queue_head *head,
7140 struct io_poll_iocb **poll_ptr)
18bceab1
JA
7141{
7142 struct io_kiocb *req = pt->req;
d89a4fac 7143 unsigned long wqe_private = (unsigned long) req;
18bceab1
JA
7144
7145 /*
68b11e8b
PB
7146 * The file being polled uses multiple waitqueues for poll handling
7147 * (e.g. one for read, one for write). Setup a separate io_poll_iocb
7148 * if this happens.
18bceab1 7149 */
68b11e8b 7150 if (unlikely(pt->nr_entries)) {
aa43477b 7151 struct io_poll_iocb *first = poll;
58852d4d 7152
23a65db8 7153 /* double add on the same waitqueue head, ignore */
aa43477b 7154 if (first->head == head)
23a65db8 7155 return;
18bceab1 7156 /* already have a 2nd entry, fail a third attempt */
807abcb0 7157 if (*poll_ptr) {
23a65db8
PB
7158 if ((*poll_ptr)->head == head)
7159 return;
18bceab1
JA
7160 pt->error = -EINVAL;
7161 return;
7162 }
aa43477b 7163
18bceab1
JA
7164 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
7165 if (!poll) {
7166 pt->error = -ENOMEM;
7167 return;
7168 }
d89a4fac
JA
7169 /* mark as double wq entry */
7170 wqe_private |= 1;
91eac1c6 7171 req->flags |= REQ_F_DOUBLE_POLL;
aa43477b 7172 io_init_poll_iocb(poll, first->events, first->wait.func);
807abcb0 7173 *poll_ptr = poll;
d886e185
PB
7174 if (req->opcode == IORING_OP_POLL_ADD)
7175 req->flags |= REQ_F_ASYNC_DATA;
18bceab1
JA
7176 }
7177
91eac1c6 7178 req->flags |= REQ_F_SINGLE_POLL;
68b11e8b 7179 pt->nr_entries++;
18bceab1 7180 poll->head = head;
d89a4fac 7181 poll->wait.private = (void *) wqe_private;
a31eb4a2
JX
7182
7183 if (poll->events & EPOLLEXCLUSIVE)
7184 add_wait_queue_exclusive(head, &poll->wait);
7185 else
7186 add_wait_queue(head, &poll->wait);
18bceab1
JA
7187}
7188
aa43477b 7189static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
18bceab1
JA
7190 struct poll_table_struct *p)
7191{
7192 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
d7718a9d 7193
aa43477b
PB
7194 __io_queue_proc(&pt->req->poll, pt, head,
7195 (struct io_poll_iocb **) &pt->req->async_data);
d7718a9d
JA
7196}
7197
aa43477b
PB
7198static int __io_arm_poll_handler(struct io_kiocb *req,
7199 struct io_poll_iocb *poll,
7200 struct io_poll_table *ipt, __poll_t mask)
d7718a9d
JA
7201{
7202 struct io_ring_ctx *ctx = req->ctx;
aa43477b 7203 int v;
d7718a9d 7204
4d52f338 7205 INIT_HLIST_NODE(&req->hash_node);
8e29da69 7206 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
aa43477b 7207 io_init_poll_iocb(poll, mask, io_poll_wake);
b90cd197 7208 poll->file = req->file;
d7718a9d
JA
7209
7210 ipt->pt._key = mask;
7211 ipt->req = req;
68b11e8b
PB
7212 ipt->error = 0;
7213 ipt->nr_entries = 0;
d7718a9d 7214
aa43477b
PB
7215 /*
7216 * Take the ownership to delay any tw execution up until we're done
7217 * with poll arming. see io_poll_get_ownership().
7218 */
7219 atomic_set(&req->poll_refs, 1);
d7718a9d 7220 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
aa43477b
PB
7221
7222 if (mask && (poll->events & EPOLLONESHOT)) {
7223 io_poll_remove_entries(req);
7224 /* no one else has access to the req, forget about the ref */
7225 return mask;
7226 }
7227 if (!mask && unlikely(ipt->error || !ipt->nr_entries)) {
7228 io_poll_remove_entries(req);
7229 if (!ipt->error)
7230 ipt->error = -EINVAL;
7231 return 0;
7232 }
d7718a9d 7233
79ebeaee 7234 spin_lock(&ctx->completion_lock);
aa43477b
PB
7235 io_poll_req_insert(req);
7236 spin_unlock(&ctx->completion_lock);
7237
7238 if (mask) {
7239 /* can't multishot if failed, just queue the event we've got */
7240 if (unlikely(ipt->error || !ipt->nr_entries))
7241 poll->events |= EPOLLONESHOT;
81459350 7242 __io_poll_execute(req, mask, poll->events);
aa43477b 7243 return 0;
d7718a9d
JA
7244 }
7245
aa43477b
PB
7246 /*
7247 * Release ownership. If someone tried to queue a tw while it was
7248 * locked, kick it off for them.
7249 */
7250 v = atomic_dec_return(&req->poll_refs);
7251 if (unlikely(v & IO_POLL_REF_MASK))
81459350 7252 __io_poll_execute(req, 0, poll->events);
aa43477b
PB
7253 return 0;
7254}
7255
7256static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
7257 struct poll_table_struct *p)
7258{
7259 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
7260 struct async_poll *apoll = pt->req->apoll;
7261
7262 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
d7718a9d
JA
7263}
7264
59b735ae
OL
7265enum {
7266 IO_APOLL_OK,
7267 IO_APOLL_ABORTED,
7268 IO_APOLL_READY
7269};
7270
4d9237e3 7271static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
d7718a9d
JA
7272{
7273 const struct io_op_def *def = &io_op_defs[req->opcode];
7274 struct io_ring_ctx *ctx = req->ctx;
7275 struct async_poll *apoll;
7276 struct io_poll_table ipt;
dbc2564c 7277 __poll_t mask = POLLPRI | POLLERR;
aa43477b 7278 int ret;
d7718a9d 7279
b2d9c3da
PB
7280 if (!def->pollin && !def->pollout)
7281 return IO_APOLL_ABORTED;
10c87333 7282 if (!file_can_poll(req->file))
658d0a40 7283 return IO_APOLL_ABORTED;
10c87333 7284 if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
658d0a40 7285 return IO_APOLL_ABORTED;
dbc2564c
HX
7286 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
7287 mask |= EPOLLONESHOT;
b2d9c3da
PB
7288
7289 if (def->pollin) {
a294bef5 7290 mask |= EPOLLIN | EPOLLRDNORM;
b2d9c3da
PB
7291
7292 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
7293 if ((req->opcode == IORING_OP_RECVMSG) &&
7294 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
a294bef5 7295 mask &= ~EPOLLIN;
b2d9c3da 7296 } else {
a294bef5 7297 mask |= EPOLLOUT | EPOLLWRNORM;
b2d9c3da 7298 }
52dd8640
DY
7299 if (def->poll_exclusive)
7300 mask |= EPOLLEXCLUSIVE;
10c87333
JA
7301 if (req->flags & REQ_F_POLLED) {
7302 apoll = req->apoll;
7303 } else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
7304 !list_empty(&ctx->apoll_cache)) {
4d9237e3
JA
7305 apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
7306 poll.wait.entry);
7307 list_del_init(&apoll->poll.wait.entry);
7308 } else {
7309 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
7310 if (unlikely(!apoll))
7311 return IO_APOLL_ABORTED;
7312 }
807abcb0 7313 apoll->double_poll = NULL;
d7718a9d 7314 req->apoll = apoll;
b2d9c3da 7315 req->flags |= REQ_F_POLLED;
d7718a9d
JA
7316 ipt.pt._qproc = io_async_queue_proc;
7317
4d55f238 7318 io_kbuf_recycle(req, issue_flags);
abdad709 7319
aa43477b 7320 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
41a5169c
HX
7321 if (ret || ipt.error)
7322 return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
7323
cef216fc 7324 trace_io_uring_poll_arm(ctx, req, req->cqe.user_data, req->opcode,
236daeae 7325 mask, apoll->poll.events);
59b735ae 7326 return IO_APOLL_OK;
d7718a9d
JA
7327}
7328
76e1b642
JA
7329/*
7330 * Returns true if we found and killed one or more poll requests
7331 */
c072481d
PB
7332static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
7333 struct task_struct *tsk, bool cancel_all)
221c5eb2 7334{
78076bb6 7335 struct hlist_node *tmp;
221c5eb2 7336 struct io_kiocb *req;
aa43477b
PB
7337 bool found = false;
7338 int i;
221c5eb2 7339
79ebeaee 7340 spin_lock(&ctx->completion_lock);
78076bb6
JA
7341 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
7342 struct hlist_head *list;
7343
7344 list = &ctx->cancel_hash[i];
f3606e3a 7345 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
42a7b4ed 7346 if (io_match_task_safe(req, tsk, cancel_all)) {
61bc84c4 7347 hlist_del_init(&req->hash_node);
aa43477b
PB
7348 io_poll_cancel_req(req);
7349 found = true;
7350 }
f3606e3a 7351 }
221c5eb2 7352 }
79ebeaee 7353 spin_unlock(&ctx->completion_lock);
aa43477b 7354 return found;
221c5eb2
JA
7355}
7356
b21432b4
JA
7357static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
7358 struct io_cancel_data *cd)
e07785b0 7359 __must_hold(&ctx->completion_lock)
47f46768 7360{
78076bb6 7361 struct hlist_head *list;
47f46768
JA
7362 struct io_kiocb *req;
7363
b21432b4 7364 list = &ctx->cancel_hash[hash_long(cd->data, ctx->cancel_hash_bits)];
78076bb6 7365 hlist_for_each_entry(req, list, hash_node) {
b21432b4 7366 if (cd->data != req->cqe.user_data)
b41e9852 7367 continue;
9ba5fac8
PB
7368 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
7369 continue;
8e29da69
JA
7370 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
7371 if (cd->seq == req->work.cancel_seq)
7372 continue;
7373 req->work.cancel_seq = cd->seq;
7374 }
b2cb805f 7375 return req;
47f46768 7376 }
b2cb805f
JA
7377 return NULL;
7378}
7379
4bf94615
JA
7380static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
7381 struct io_cancel_data *cd)
7382 __must_hold(&ctx->completion_lock)
7383{
7384 struct io_kiocb *req;
7385 int i;
7386
7387 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
7388 struct hlist_head *list;
7389
7390 list = &ctx->cancel_hash[i];
7391 hlist_for_each_entry(req, list, hash_node) {
970f256e
JA
7392 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
7393 req->file != cd->file)
4bf94615
JA
7394 continue;
7395 if (cd->seq == req->work.cancel_seq)
7396 continue;
7397 req->work.cancel_seq = cd->seq;
7398 return req;
7399 }
7400 }
7401 return NULL;
7402}
7403
aa43477b
PB
7404static bool io_poll_disarm(struct io_kiocb *req)
7405 __must_hold(&ctx->completion_lock)
7406{
7407 if (!io_poll_get_ownership(req))
7408 return false;
7409 io_poll_remove_entries(req);
7410 hash_del(&req->hash_node);
7411 return true;
7412}
7413
b21432b4 7414static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
e07785b0 7415 __must_hold(&ctx->completion_lock)
b2cb805f 7416{
4bf94615 7417 struct io_kiocb *req;
b2cb805f 7418
970f256e 7419 if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
4bf94615
JA
7420 req = io_poll_file_find(ctx, cd);
7421 else
7422 req = io_poll_find(ctx, false, cd);
b2cb805f
JA
7423 if (!req)
7424 return -ENOENT;
aa43477b
PB
7425 io_poll_cancel_req(req);
7426 return 0;
47f46768
JA
7427}
7428
9096af3e
PB
7429static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
7430 unsigned int flags)
7431{
7432 u32 events;
47f46768 7433
9096af3e
PB
7434 events = READ_ONCE(sqe->poll32_events);
7435#ifdef __BIG_ENDIAN
7436 events = swahw32(events);
7437#endif
7438 if (!(flags & IORING_POLL_ADD_MULTI))
7439 events |= EPOLLONESHOT;
7440 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
47f46768
JA
7441}
7442
54739cc6 7443static int io_poll_remove_prep(struct io_kiocb *req,
3529d8c2 7444 const struct io_uring_sqe *sqe)
0969e783 7445{
c5de0036
PB
7446 struct io_poll_update *upd = &req->poll_update;
7447 u32 flags;
7448
73911426 7449 if (sqe->buf_index || sqe->splice_fd_in)
c5de0036
PB
7450 return -EINVAL;
7451 flags = READ_ONCE(sqe->len);
7452 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
7453 IORING_POLL_ADD_MULTI))
7454 return -EINVAL;
7455 /* meaningless without update */
7456 if (flags == IORING_POLL_ADD_MULTI)
0969e783
JA
7457 return -EINVAL;
7458
c5de0036
PB
7459 upd->old_user_data = READ_ONCE(sqe->addr);
7460 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
7461 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
221c5eb2 7462
c5de0036
PB
7463 upd->new_user_data = READ_ONCE(sqe->off);
7464 if (!upd->update_user_data && upd->new_user_data)
7465 return -EINVAL;
7466 if (upd->update_events)
7467 upd->events = io_poll_parse_events(sqe, flags);
7468 else if (sqe->poll32_events)
7469 return -EINVAL;
221c5eb2 7470
221c5eb2
JA
7471 return 0;
7472}
7473
3529d8c2 7474static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
7475{
7476 struct io_poll_iocb *poll = &req->poll;
c5de0036 7477 u32 flags;
221c5eb2 7478
73911426 7479 if (sqe->buf_index || sqe->off || sqe->addr)
88e41cf9
JA
7480 return -EINVAL;
7481 flags = READ_ONCE(sqe->len);
c5de0036 7482 if (flags & ~IORING_POLL_ADD_MULTI)
221c5eb2 7483 return -EINVAL;
04c76b41
PB
7484 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
7485 return -EINVAL;
221c5eb2 7486
48dcd38d 7487 io_req_set_refcount(req);
2804ecd8 7488 req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
0969e783
JA
7489 return 0;
7490}
7491
61e98203 7492static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
0969e783
JA
7493{
7494 struct io_poll_iocb *poll = &req->poll;
0969e783 7495 struct io_poll_table ipt;
aa43477b 7496 int ret;
0969e783 7497
d7718a9d 7498 ipt.pt._qproc = io_poll_queue_proc;
36703247 7499
aa43477b
PB
7500 ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
7501 ret = ret ?: ipt.error;
7502 if (ret)
7503 __io_req_complete(req, issue_flags, ret, 0);
7504 return 0;
221c5eb2
JA
7505}
7506
54739cc6 7507static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
b69de288 7508{
b21432b4 7509 struct io_cancel_data cd = { .data = req->poll_update.old_user_data, };
b69de288
JA
7510 struct io_ring_ctx *ctx = req->ctx;
7511 struct io_kiocb *preq;
2bbb146d 7512 int ret2, ret = 0;
cc8e9ba7 7513 bool locked;
b69de288 7514
79ebeaee 7515 spin_lock(&ctx->completion_lock);
b21432b4 7516 preq = io_poll_find(ctx, true, &cd);
aa43477b 7517 if (!preq || !io_poll_disarm(preq)) {
79ebeaee 7518 spin_unlock(&ctx->completion_lock);
aa43477b 7519 ret = preq ? -EALREADY : -ENOENT;
2bbb146d 7520 goto out;
b69de288 7521 }
79ebeaee 7522 spin_unlock(&ctx->completion_lock);
cb3b200e 7523
2bbb146d
PB
7524 if (req->poll_update.update_events || req->poll_update.update_user_data) {
7525 /* only mask one event flags, keep behavior flags */
7526 if (req->poll_update.update_events) {
7527 preq->poll.events &= ~0xffff;
7528 preq->poll.events |= req->poll_update.events & 0xffff;
7529 preq->poll.events |= IO_POLL_UNMASK;
cb3b200e 7530 }
2bbb146d 7531 if (req->poll_update.update_user_data)
cef216fc 7532 preq->cqe.user_data = req->poll_update.new_user_data;
b69de288 7533
2bbb146d
PB
7534 ret2 = io_poll_add(preq, issue_flags);
7535 /* successfully updated, don't complete poll request */
7536 if (!ret2)
7537 goto out;
b69de288 7538 }
6224590d 7539
2bbb146d 7540 req_set_fail(preq);
cef216fc 7541 preq->cqe.res = -ECANCELED;
cc8e9ba7
PB
7542 locked = !(issue_flags & IO_URING_F_UNLOCKED);
7543 io_req_task_complete(preq, &locked);
2bbb146d
PB
7544out:
7545 if (ret < 0)
6224590d 7546 req_set_fail(req);
2bbb146d 7547 /* complete update request, we're done with it */
cc8e9ba7 7548 __io_req_complete(req, issue_flags, ret, 0);
b69de288 7549 return 0;
89850fce
JA
7550}
7551
5262f567
JA
7552static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
7553{
ad8a48ac
JA
7554 struct io_timeout_data *data = container_of(timer,
7555 struct io_timeout_data, timer);
7556 struct io_kiocb *req = data->req;
7557 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
7558 unsigned long flags;
7559
89850fce 7560 spin_lock_irqsave(&ctx->timeout_lock, flags);
a71976f3 7561 list_del_init(&req->timeout.list);
01cec8c1
PB
7562 atomic_set(&req->ctx->cq_timeouts,
7563 atomic_read(&req->ctx->cq_timeouts) + 1);
89850fce 7564 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
01cec8c1 7565
a90c8bf6
PB
7566 if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
7567 req_set_fail(req);
7568
cef216fc 7569 req->cqe.res = -ETIME;
a90c8bf6 7570 req->io_task_work.func = io_req_task_complete;
3fe07bcd 7571 io_req_task_work_add(req);
5262f567
JA
7572 return HRTIMER_NORESTART;
7573}
7574
fbd15848 7575static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
b21432b4 7576 struct io_cancel_data *cd)
89850fce 7577 __must_hold(&ctx->timeout_lock)
f254ac04 7578{
fbd15848 7579 struct io_timeout_data *io;
47f46768 7580 struct io_kiocb *req;
fd9c7bc5 7581 bool found = false;
f254ac04 7582
135fcde8 7583 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
970f256e
JA
7584 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
7585 cd->data != req->cqe.user_data)
8e29da69 7586 continue;
970f256e 7587 if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
8e29da69
JA
7588 if (cd->seq == req->work.cancel_seq)
7589 continue;
7590 req->work.cancel_seq = cd->seq;
7591 }
7592 found = true;
7593 break;
47f46768 7594 }
fd9c7bc5
PB
7595 if (!found)
7596 return ERR_PTR(-ENOENT);
fbd15848
PB
7597
7598 io = req->async_data;
fd9c7bc5 7599 if (hrtimer_try_to_cancel(&io->timer) == -1)
fbd15848 7600 return ERR_PTR(-EALREADY);
a71976f3 7601 list_del_init(&req->timeout.list);
fbd15848
PB
7602 return req;
7603}
47f46768 7604
b21432b4 7605static int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
ec3c3d0f 7606 __must_hold(&ctx->completion_lock)
fbd15848 7607{
3645c200
PB
7608 struct io_kiocb *req;
7609
7610 spin_lock_irq(&ctx->timeout_lock);
b21432b4 7611 req = io_timeout_extract(ctx, cd);
3645c200 7612 spin_unlock_irq(&ctx->timeout_lock);
fbd15848
PB
7613
7614 if (IS_ERR(req))
7615 return PTR_ERR(req);
6695490d 7616 io_req_task_queue_fail(req, -ECANCELED);
f254ac04
JA
7617 return 0;
7618}
7619
50c1df2b
JA
7620static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
7621{
7622 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
7623 case IORING_TIMEOUT_BOOTTIME:
7624 return CLOCK_BOOTTIME;
7625 case IORING_TIMEOUT_REALTIME:
7626 return CLOCK_REALTIME;
7627 default:
7628 /* can't happen, vetted at prep time */
7629 WARN_ON_ONCE(1);
7630 fallthrough;
7631 case 0:
7632 return CLOCK_MONOTONIC;
7633 }
7634}
7635
f1042b6c
PB
7636static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
7637 struct timespec64 *ts, enum hrtimer_mode mode)
7638 __must_hold(&ctx->timeout_lock)
7639{
7640 struct io_timeout_data *io;
7641 struct io_kiocb *req;
7642 bool found = false;
7643
7644 list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) {
cef216fc 7645 found = user_data == req->cqe.user_data;
f1042b6c
PB
7646 if (found)
7647 break;
7648 }
7649 if (!found)
7650 return -ENOENT;
7651
7652 io = req->async_data;
7653 if (hrtimer_try_to_cancel(&io->timer) == -1)
7654 return -EALREADY;
7655 hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
7656 io->timer.function = io_link_timeout_fn;
7657 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
7658 return 0;
7659}
7660
9c8e11b3
PB
7661static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
7662 struct timespec64 *ts, enum hrtimer_mode mode)
89850fce 7663 __must_hold(&ctx->timeout_lock)
47f46768 7664{
b21432b4
JA
7665 struct io_cancel_data cd = { .data = user_data, };
7666 struct io_kiocb *req = io_timeout_extract(ctx, &cd);
9c8e11b3 7667 struct io_timeout_data *data;
47f46768 7668
9c8e11b3
PB
7669 if (IS_ERR(req))
7670 return PTR_ERR(req);
47f46768 7671
9c8e11b3
PB
7672 req->timeout.off = 0; /* noseq */
7673 data = req->async_data;
7674 list_add_tail(&req->timeout.list, &ctx->timeout_list);
50c1df2b 7675 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
9c8e11b3
PB
7676 data->timer.function = io_timeout_fn;
7677 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
7678 return 0;
47f46768
JA
7679}
7680
3529d8c2
JA
7681static int io_timeout_remove_prep(struct io_kiocb *req,
7682 const struct io_uring_sqe *sqe)
b29472ee 7683{
9c8e11b3
PB
7684 struct io_timeout_rem *tr = &req->timeout_rem;
7685
61710e43
DA
7686 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
7687 return -EINVAL;
73911426 7688 if (sqe->buf_index || sqe->len || sqe->splice_fd_in)
b29472ee
JA
7689 return -EINVAL;
7690
f1042b6c 7691 tr->ltimeout = false;
9c8e11b3
PB
7692 tr->addr = READ_ONCE(sqe->addr);
7693 tr->flags = READ_ONCE(sqe->timeout_flags);
f1042b6c
PB
7694 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
7695 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
7696 return -EINVAL;
7697 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
7698 tr->ltimeout = true;
7699 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
9c8e11b3
PB
7700 return -EINVAL;
7701 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
7702 return -EFAULT;
2087009c
YB
7703 if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0)
7704 return -EINVAL;
9c8e11b3
PB
7705 } else if (tr->flags) {
7706 /* timeout removal doesn't support flags */
b29472ee 7707 return -EINVAL;
9c8e11b3 7708 }
b29472ee 7709
b29472ee
JA
7710 return 0;
7711}
7712
8662daec
PB
7713static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
7714{
7715 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
7716 : HRTIMER_MODE_REL;
7717}
7718
11365043
JA
7719/*
7720 * Remove or update an existing timeout command
7721 */
61e98203 7722static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
11365043 7723{
9c8e11b3 7724 struct io_timeout_rem *tr = &req->timeout_rem;
11365043 7725 struct io_ring_ctx *ctx = req->ctx;
47f46768 7726 int ret;
11365043 7727
ec3c3d0f 7728 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
b21432b4
JA
7729 struct io_cancel_data cd = { .data = tr->addr, };
7730
ec3c3d0f 7731 spin_lock(&ctx->completion_lock);
b21432b4 7732 ret = io_timeout_cancel(ctx, &cd);
ec3c3d0f
PB
7733 spin_unlock(&ctx->completion_lock);
7734 } else {
f1042b6c
PB
7735 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
7736
ec3c3d0f 7737 spin_lock_irq(&ctx->timeout_lock);
f1042b6c
PB
7738 if (tr->ltimeout)
7739 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
7740 else
7741 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
ec3c3d0f
PB
7742 spin_unlock_irq(&ctx->timeout_lock);
7743 }
11365043 7744
4e88d6e7 7745 if (ret < 0)
93d2bcd2 7746 req_set_fail(req);
505657bc 7747 io_req_complete_post(req, ret, 0);
11365043 7748 return 0;
5262f567
JA
7749}
7750
ecddc25d
JA
7751static int __io_timeout_prep(struct io_kiocb *req,
7752 const struct io_uring_sqe *sqe,
7753 bool is_timeout_link)
5262f567 7754{
ad8a48ac 7755 struct io_timeout_data *data;
a41525ab 7756 unsigned flags;
56080b02 7757 u32 off = READ_ONCE(sqe->off);
5262f567 7758
73911426 7759 if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in)
a41525ab 7760 return -EINVAL;
56080b02 7761 if (off && is_timeout_link)
2d28390a 7762 return -EINVAL;
a41525ab 7763 flags = READ_ONCE(sqe->timeout_flags);
6224590d
PB
7764 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK |
7765 IORING_TIMEOUT_ETIME_SUCCESS))
50c1df2b
JA
7766 return -EINVAL;
7767 /* more than one clock specified is invalid, obviously */
7768 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
5262f567 7769 return -EINVAL;
bdf20073 7770
ef9dd637 7771 INIT_LIST_HEAD(&req->timeout.list);
bfe68a22 7772 req->timeout.off = off;
f18ee4cf
PB
7773 if (unlikely(off && !req->ctx->off_timeout_used))
7774 req->ctx->off_timeout_used = true;
26a61679 7775
d6a644a7
PB
7776 if (WARN_ON_ONCE(req_has_async_data(req)))
7777 return -EFAULT;
7778 if (io_alloc_async_data(req))
26a61679
JA
7779 return -ENOMEM;
7780
e8c2bc1f 7781 data = req->async_data;
ad8a48ac 7782 data->req = req;
50c1df2b 7783 data->flags = flags;
ad8a48ac
JA
7784
7785 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
7786 return -EFAULT;
7787
f6223ff7
YB
7788 if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
7789 return -EINVAL;
7790
e677edbc 7791 INIT_LIST_HEAD(&req->timeout.list);
8662daec 7792 data->mode = io_translate_timeout_mode(flags);
50c1df2b 7793 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
b97e736a
PB
7794
7795 if (is_timeout_link) {
7796 struct io_submit_link *link = &req->ctx->submit_state.link;
7797
7798 if (!link->head)
7799 return -EINVAL;
7800 if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
7801 return -EINVAL;
4d13d1a4
PB
7802 req->timeout.head = link->last;
7803 link->last->flags |= REQ_F_ARM_LTIMEOUT;
b97e736a 7804 }
ad8a48ac
JA
7805 return 0;
7806}
7807
ecddc25d
JA
7808static int io_timeout_prep(struct io_kiocb *req,
7809 const struct io_uring_sqe *sqe)
7810{
7811 return __io_timeout_prep(req, sqe, false);
7812}
7813
7814static int io_link_timeout_prep(struct io_kiocb *req,
7815 const struct io_uring_sqe *sqe)
7816{
7817 return __io_timeout_prep(req, sqe, true);
7818}
7819
61e98203 7820static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
ad8a48ac 7821{
ad8a48ac 7822 struct io_ring_ctx *ctx = req->ctx;
e8c2bc1f 7823 struct io_timeout_data *data = req->async_data;
ad8a48ac 7824 struct list_head *entry;
bfe68a22 7825 u32 tail, off = req->timeout.off;
ad8a48ac 7826
89850fce 7827 spin_lock_irq(&ctx->timeout_lock);
93bd25bb 7828
5262f567
JA
7829 /*
7830 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
7831 * timeout event to be satisfied. If it isn't set, then this is
7832 * a pure timeout request, sequence isn't used.
5262f567 7833 */
8eb7e2d0 7834 if (io_is_timeout_noseq(req)) {
93bd25bb
JA
7835 entry = ctx->timeout_list.prev;
7836 goto add;
7837 }
5262f567 7838
bfe68a22
PB
7839 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
7840 req->timeout.target_seq = tail + off;
5262f567 7841
f010505b
MDG
7842 /* Update the last seq here in case io_flush_timeouts() hasn't.
7843 * This is safe because ->completion_lock is held, and submissions
7844 * and completions are never mixed in the same ->completion_lock section.
7845 */
7846 ctx->cq_last_tm_flush = tail;
7847
5262f567
JA
7848 /*
7849 * Insertion sort, ensuring the first entry in the list is always
7850 * the one we need first.
7851 */
5262f567 7852 list_for_each_prev(entry, &ctx->timeout_list) {
135fcde8
PB
7853 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
7854 timeout.list);
5262f567 7855
8eb7e2d0 7856 if (io_is_timeout_noseq(nxt))
93bd25bb 7857 continue;
bfe68a22
PB
7858 /* nxt.seq is behind @tail, otherwise would've been completed */
7859 if (off >= nxt->timeout.target_seq - tail)
5262f567
JA
7860 break;
7861 }
93bd25bb 7862add:
135fcde8 7863 list_add(&req->timeout.list, entry);
ad8a48ac
JA
7864 data->timer.function = io_timeout_fn;
7865 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
89850fce 7866 spin_unlock_irq(&ctx->timeout_lock);
5262f567
JA
7867 return 0;
7868}
5262f567 7869
62755e35
JA
7870static bool io_cancel_cb(struct io_wq_work *work, void *data)
7871{
7872 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
f458dd84 7873 struct io_cancel_data *cd = data;
62755e35 7874
8e29da69
JA
7875 if (req->ctx != cd->ctx)
7876 return false;
970f256e
JA
7877 if (cd->flags & IORING_ASYNC_CANCEL_ANY) {
7878 ;
7879 } else if (cd->flags & IORING_ASYNC_CANCEL_FD) {
4bf94615
JA
7880 if (req->file != cd->file)
7881 return false;
7882 } else {
7883 if (req->cqe.user_data != cd->data)
7884 return false;
7885 }
970f256e 7886 if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
8e29da69
JA
7887 if (cd->seq == req->work.cancel_seq)
7888 return false;
7889 req->work.cancel_seq = cd->seq;
7890 }
7891 return true;
62755e35
JA
7892}
7893
b21432b4
JA
7894static int io_async_cancel_one(struct io_uring_task *tctx,
7895 struct io_cancel_data *cd)
62755e35 7896{
62755e35 7897 enum io_wq_cancel cancel_ret;
62755e35 7898 int ret = 0;
970f256e 7899 bool all;
62755e35 7900
f458dd84 7901 if (!tctx || !tctx->io_wq)
5aa75ed5
JA
7902 return -ENOENT;
7903
970f256e
JA
7904 all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
7905 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
62755e35
JA
7906 switch (cancel_ret) {
7907 case IO_WQ_CANCEL_OK:
7908 ret = 0;
7909 break;
7910 case IO_WQ_CANCEL_RUNNING:
7911 ret = -EALREADY;
7912 break;
7913 case IO_WQ_CANCEL_NOTFOUND:
7914 ret = -ENOENT;
7915 break;
7916 }
7917
e977d6d3
JA
7918 return ret;
7919}
7920
b21432b4 7921static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd)
47f46768 7922{
8cb01fac 7923 struct io_ring_ctx *ctx = req->ctx;
47f46768
JA
7924 int ret;
7925
dadebc35 7926 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
8cb01fac 7927
b21432b4 7928 ret = io_async_cancel_one(req->task->io_uring, cd);
ccbf7261
JA
7929 /*
7930 * Fall-through even for -EALREADY, as we may have poll armed
7931 * that need unarming.
7932 */
7933 if (!ret)
7934 return 0;
505657bc
PB
7935
7936 spin_lock(&ctx->completion_lock);
b21432b4 7937 ret = io_poll_cancel(ctx, cd);
ccbf7261
JA
7938 if (ret != -ENOENT)
7939 goto out;
4bf94615
JA
7940 if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
7941 ret = io_timeout_cancel(ctx, cd);
505657bc
PB
7942out:
7943 spin_unlock(&ctx->completion_lock);
7944 return ret;
47f46768
JA
7945}
7946
970f256e
JA
7947#define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
7948 IORING_ASYNC_CANCEL_ANY)
7949
3529d8c2
JA
7950static int io_async_cancel_prep(struct io_kiocb *req,
7951 const struct io_uring_sqe *sqe)
e977d6d3 7952{
4bf94615 7953 if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
61710e43 7954 return -EINVAL;
73911426 7955 if (sqe->off || sqe->len || sqe->splice_fd_in)
e977d6d3
JA
7956 return -EINVAL;
7957
fbf23849 7958 req->cancel.addr = READ_ONCE(sqe->addr);
8e29da69 7959 req->cancel.flags = READ_ONCE(sqe->cancel_flags);
970f256e 7960 if (req->cancel.flags & ~CANCEL_FLAGS)
8e29da69 7961 return -EINVAL;
970f256e
JA
7962 if (req->cancel.flags & IORING_ASYNC_CANCEL_FD) {
7963 if (req->cancel.flags & IORING_ASYNC_CANCEL_ANY)
7964 return -EINVAL;
4bf94615 7965 req->cancel.fd = READ_ONCE(sqe->fd);
970f256e 7966 }
8e29da69 7967
fbf23849
JA
7968 return 0;
7969}
7970
8e29da69
JA
7971static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req,
7972 unsigned int issue_flags)
fbf23849 7973{
970f256e 7974 bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
8e29da69 7975 struct io_ring_ctx *ctx = cd->ctx;
58f99373 7976 struct io_tctx_node *node;
8e29da69 7977 int ret, nr = 0;
58f99373 7978
8e29da69
JA
7979 do {
7980 ret = io_try_cancel(req, cd);
7981 if (ret == -ENOENT)
7982 break;
970f256e 7983 if (!all)
8e29da69
JA
7984 return ret;
7985 nr++;
7986 } while (1);
58f99373
PB
7987
7988 /* slow path, try all io-wq's */
f8929630 7989 io_ring_submit_lock(ctx, issue_flags);
58f99373
PB
7990 ret = -ENOENT;
7991 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
7992 struct io_uring_task *tctx = node->task->io_uring;
fbf23849 7993
8e29da69
JA
7994 ret = io_async_cancel_one(tctx, cd);
7995 if (ret != -ENOENT) {
970f256e 7996 if (!all)
8e29da69
JA
7997 break;
7998 nr++;
7999 }
58f99373 8000 }
f8929630 8001 io_ring_submit_unlock(ctx, issue_flags);
970f256e 8002 return all ? nr : ret;
8e29da69
JA
8003}
8004
8005static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
8006{
8007 struct io_cancel_data cd = {
8008 .ctx = req->ctx,
8009 .data = req->cancel.addr,
8010 .flags = req->cancel.flags,
8011 .seq = atomic_inc_return(&req->ctx->cancel_seq),
8012 };
8013 int ret;
8014
4bf94615
JA
8015 if (cd.flags & IORING_ASYNC_CANCEL_FD) {
8016 if (req->flags & REQ_F_FIXED_FILE)
8017 req->file = io_file_get_fixed(req, req->cancel.fd,
8018 issue_flags);
8019 else
8020 req->file = io_file_get_normal(req, req->cancel.fd);
8021 if (!req->file) {
8022 ret = -EBADF;
8023 goto done;
8024 }
8025 cd.file = req->file;
58f99373 8026 }
4bf94615 8027
8e29da69 8028 ret = __io_async_cancel(&cd, req, issue_flags);
58f99373 8029done:
58f99373 8030 if (ret < 0)
93d2bcd2 8031 req_set_fail(req);
505657bc 8032 io_req_complete_post(req, ret, 0);
5262f567
JA
8033 return 0;
8034}
8035
54739cc6 8036static int io_files_update_prep(struct io_kiocb *req,
05f3fb3c
JA
8037 const struct io_uring_sqe *sqe)
8038{
61710e43
DA
8039 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
8040 return -EINVAL;
73911426 8041 if (sqe->rw_flags || sqe->splice_fd_in)
05f3fb3c
JA
8042 return -EINVAL;
8043
269bbe5f
BM
8044 req->rsrc_update.offset = READ_ONCE(sqe->off);
8045 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
8046 if (!req->rsrc_update.nr_args)
05f3fb3c 8047 return -EINVAL;
269bbe5f 8048 req->rsrc_update.arg = READ_ONCE(sqe->addr);
05f3fb3c
JA
8049 return 0;
8050}
8051
a7c41b46
XW
8052static int io_files_update_with_index_alloc(struct io_kiocb *req,
8053 unsigned int issue_flags)
8054{
8055 __s32 __user *fds = u64_to_user_ptr(req->rsrc_update.arg);
8056 unsigned int done;
8057 struct file *file;
8058 int ret, fd;
8059
8060 for (done = 0; done < req->rsrc_update.nr_args; done++) {
8061 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
8062 ret = -EFAULT;
8063 break;
8064 }
8065
8066 file = fget(fd);
8067 if (!file) {
8068 ret = -EBADF;
8069 break;
8070 }
8071 ret = io_fixed_fd_install(req, issue_flags, file,
8072 IORING_FILE_INDEX_ALLOC);
8073 if (ret < 0)
8074 break;
8075 if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
a7c41b46 8076 __io_close_fixed(req, issue_flags, ret);
e71d7c56 8077 ret = -EFAULT;
a7c41b46
XW
8078 break;
8079 }
8080 }
8081
8082 if (done)
8083 return done;
8084 return ret;
8085}
8086
889fca73 8087static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
fbf23849
JA
8088{
8089 struct io_ring_ctx *ctx = req->ctx;
c3bdad02 8090 struct io_uring_rsrc_update2 up;
05f3fb3c 8091 int ret;
fbf23849 8092
269bbe5f
BM
8093 up.offset = req->rsrc_update.offset;
8094 up.data = req->rsrc_update.arg;
c3bdad02
PB
8095 up.nr = 0;
8096 up.tags = 0;
615cee49 8097 up.resv = 0;
d8a3ba9c 8098 up.resv2 = 0;
05f3fb3c 8099
a7c41b46
XW
8100 if (req->rsrc_update.offset == IORING_FILE_INDEX_ALLOC) {
8101 ret = io_files_update_with_index_alloc(req, issue_flags);
8102 } else {
8103 io_ring_submit_lock(ctx, issue_flags);
8104 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
8105 &up, req->rsrc_update.nr_args);
8106 io_ring_submit_unlock(ctx, issue_flags);
8107 }
05f3fb3c
JA
8108
8109 if (ret < 0)
93d2bcd2 8110 req_set_fail(req);
889fca73 8111 __io_req_complete(req, issue_flags, ret, 0);
5262f567
JA
8112 return 0;
8113}
8114
bfe76559 8115static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1 8116{
d625c6ee 8117 switch (req->opcode) {
e781573e 8118 case IORING_OP_NOP:
2bb04df7 8119 return io_nop_prep(req, sqe);
f67676d1
JA
8120 case IORING_OP_READV:
8121 case IORING_OP_READ_FIXED:
3a6820f2 8122 case IORING_OP_READ:
f67676d1
JA
8123 case IORING_OP_WRITEV:
8124 case IORING_OP_WRITE_FIXED:
3a6820f2 8125 case IORING_OP_WRITE:
584b0180 8126 return io_prep_rw(req, sqe);
0969e783 8127 case IORING_OP_POLL_ADD:
bfe76559 8128 return io_poll_add_prep(req, sqe);
0969e783 8129 case IORING_OP_POLL_REMOVE:
54739cc6 8130 return io_poll_remove_prep(req, sqe);
8ed8d3c3 8131 case IORING_OP_FSYNC:
1155c76a 8132 return io_fsync_prep(req, sqe);
8ed8d3c3 8133 case IORING_OP_SYNC_FILE_RANGE:
1155c76a 8134 return io_sfr_prep(req, sqe);
03b1230c 8135 case IORING_OP_SENDMSG:
fddaface 8136 case IORING_OP_SEND:
bfe76559 8137 return io_sendmsg_prep(req, sqe);
03b1230c 8138 case IORING_OP_RECVMSG:
fddaface 8139 case IORING_OP_RECV:
bfe76559 8140 return io_recvmsg_prep(req, sqe);
f499a021 8141 case IORING_OP_CONNECT:
bfe76559 8142 return io_connect_prep(req, sqe);
2d28390a 8143 case IORING_OP_TIMEOUT:
ecddc25d 8144 return io_timeout_prep(req, sqe);
b29472ee 8145 case IORING_OP_TIMEOUT_REMOVE:
bfe76559 8146 return io_timeout_remove_prep(req, sqe);
fbf23849 8147 case IORING_OP_ASYNC_CANCEL:
bfe76559 8148 return io_async_cancel_prep(req, sqe);
2d28390a 8149 case IORING_OP_LINK_TIMEOUT:
ecddc25d 8150 return io_link_timeout_prep(req, sqe);
8ed8d3c3 8151 case IORING_OP_ACCEPT:
bfe76559 8152 return io_accept_prep(req, sqe);
d63d1b5e 8153 case IORING_OP_FALLOCATE:
bfe76559 8154 return io_fallocate_prep(req, sqe);
15b71abe 8155 case IORING_OP_OPENAT:
bfe76559 8156 return io_openat_prep(req, sqe);
b5dba59e 8157 case IORING_OP_CLOSE:
bfe76559 8158 return io_close_prep(req, sqe);
05f3fb3c 8159 case IORING_OP_FILES_UPDATE:
54739cc6 8160 return io_files_update_prep(req, sqe);
eddc7ef5 8161 case IORING_OP_STATX:
bfe76559 8162 return io_statx_prep(req, sqe);
4840e418 8163 case IORING_OP_FADVISE:
bfe76559 8164 return io_fadvise_prep(req, sqe);
c1ca757b 8165 case IORING_OP_MADVISE:
bfe76559 8166 return io_madvise_prep(req, sqe);
cebdb986 8167 case IORING_OP_OPENAT2:
bfe76559 8168 return io_openat2_prep(req, sqe);
3e4827b0 8169 case IORING_OP_EPOLL_CTL:
bfe76559 8170 return io_epoll_ctl_prep(req, sqe);
7d67af2c 8171 case IORING_OP_SPLICE:
bfe76559 8172 return io_splice_prep(req, sqe);
ddf0322d 8173 case IORING_OP_PROVIDE_BUFFERS:
bfe76559 8174 return io_provide_buffers_prep(req, sqe);
067524e9 8175 case IORING_OP_REMOVE_BUFFERS:
bfe76559 8176 return io_remove_buffers_prep(req, sqe);
f2a8d5c7 8177 case IORING_OP_TEE:
bfe76559 8178 return io_tee_prep(req, sqe);
36f4fa68
JA
8179 case IORING_OP_SHUTDOWN:
8180 return io_shutdown_prep(req, sqe);
80a261fd
JA
8181 case IORING_OP_RENAMEAT:
8182 return io_renameat_prep(req, sqe);
14a1143b
JA
8183 case IORING_OP_UNLINKAT:
8184 return io_unlinkat_prep(req, sqe);
e34a02dc
DK
8185 case IORING_OP_MKDIRAT:
8186 return io_mkdirat_prep(req, sqe);
7a8721f8
DK
8187 case IORING_OP_SYMLINKAT:
8188 return io_symlinkat_prep(req, sqe);
cf30da90
DK
8189 case IORING_OP_LINKAT:
8190 return io_linkat_prep(req, sqe);
4f57f06c
JA
8191 case IORING_OP_MSG_RING:
8192 return io_msg_ring_prep(req, sqe);
e9621e2b
SR
8193 case IORING_OP_FSETXATTR:
8194 return io_fsetxattr_prep(req, sqe);
8195 case IORING_OP_SETXATTR:
8196 return io_setxattr_prep(req, sqe);
a56834e0
SR
8197 case IORING_OP_FGETXATTR:
8198 return io_fgetxattr_prep(req, sqe);
8199 case IORING_OP_GETXATTR:
8200 return io_getxattr_prep(req, sqe);
1374e08e
JA
8201 case IORING_OP_SOCKET:
8202 return io_socket_prep(req, sqe);
ee692a21
JA
8203 case IORING_OP_URING_CMD:
8204 return io_uring_cmd_prep(req, sqe);
f67676d1
JA
8205 }
8206
bfe76559
PB
8207 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
8208 req->opcode);
bd54b6fe 8209 return -EINVAL;
bfe76559
PB
8210}
8211
93642ef8 8212static int io_req_prep_async(struct io_kiocb *req)
bfe76559 8213{
a196c78b
JA
8214 const struct io_op_def *def = &io_op_defs[req->opcode];
8215
8216 /* assign early for deferred execution for non-fixed file */
8217 if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE))
3a166bdb 8218 req->file = io_file_get_normal(req, req->cqe.fd);
a196c78b 8219 if (!def->needs_async_setup)
b7e298d2 8220 return 0;
d886e185 8221 if (WARN_ON_ONCE(req_has_async_data(req)))
b7e298d2
PB
8222 return -EFAULT;
8223 if (io_alloc_async_data(req))
8224 return -EAGAIN;
8225
93642ef8
PB
8226 switch (req->opcode) {
8227 case IORING_OP_READV:
157dc813 8228 return io_readv_prep_async(req);
93642ef8 8229 case IORING_OP_WRITEV:
157dc813 8230 return io_writev_prep_async(req);
93642ef8 8231 case IORING_OP_SENDMSG:
93642ef8
PB
8232 return io_sendmsg_prep_async(req);
8233 case IORING_OP_RECVMSG:
93642ef8
PB
8234 return io_recvmsg_prep_async(req);
8235 case IORING_OP_CONNECT:
8236 return io_connect_prep_async(req);
ee692a21
JA
8237 case IORING_OP_URING_CMD:
8238 return io_uring_cmd_prep_async(req);
93642ef8 8239 }
b7e298d2
PB
8240 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
8241 req->opcode);
8242 return -EFAULT;
f67676d1
JA
8243}
8244
9cf7c104
PB
8245static u32 io_get_sequence(struct io_kiocb *req)
8246{
a3dbdf54 8247 u32 seq = req->ctx->cached_sq_head;
963c6abb 8248 struct io_kiocb *cur;
9cf7c104 8249
a3dbdf54 8250 /* need original cached_sq_head, but it was increased for each req */
963c6abb 8251 io_for_each_link(cur, req)
a3dbdf54
PB
8252 seq--;
8253 return seq;
9cf7c104
PB
8254}
8255
c072481d 8256static __cold void io_drain_req(struct io_kiocb *req)
de0617e4 8257{
a197f664 8258 struct io_ring_ctx *ctx = req->ctx;
27dc8338 8259 struct io_defer_entry *de;
f67676d1 8260 int ret;
e0eb71dc 8261 u32 seq = io_get_sequence(req);
3c19966d 8262
9d858b21 8263 /* Still need defer if there is pending req in defer list. */
e302f104 8264 spin_lock(&ctx->completion_lock);
5e371265 8265 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
e302f104 8266 spin_unlock(&ctx->completion_lock);
e0eb71dc 8267queue:
10c66904 8268 ctx->drain_active = false;
e0eb71dc
PB
8269 io_req_task_queue(req);
8270 return;
10c66904 8271 }
e302f104 8272 spin_unlock(&ctx->completion_lock);
9cf7c104 8273
b7e298d2 8274 ret = io_req_prep_async(req);
e0eb71dc
PB
8275 if (ret) {
8276fail:
8277 io_req_complete_failed(req, ret);
8278 return;
8279 }
cbdcb435 8280 io_prep_async_link(req);
27dc8338 8281 de = kmalloc(sizeof(*de), GFP_KERNEL);
76cc33d7 8282 if (!de) {
1b48773f 8283 ret = -ENOMEM;
e0eb71dc 8284 goto fail;
76cc33d7 8285 }
2d28390a 8286
79ebeaee 8287 spin_lock(&ctx->completion_lock);
9cf7c104 8288 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
79ebeaee 8289 spin_unlock(&ctx->completion_lock);
27dc8338 8290 kfree(de);
e0eb71dc 8291 goto queue;
de0617e4
JA
8292 }
8293
cef216fc 8294 trace_io_uring_defer(ctx, req, req->cqe.user_data, req->opcode);
27dc8338 8295 de->req = req;
9cf7c104 8296 de->seq = seq;
27dc8338 8297 list_add_tail(&de->list, &ctx->defer_list);
79ebeaee 8298 spin_unlock(&ctx->completion_lock);
de0617e4
JA
8299}
8300
68fb8979 8301static void io_clean_op(struct io_kiocb *req)
99bc4c38 8302{
8197b053
PB
8303 if (req->flags & REQ_F_BUFFER_SELECTED) {
8304 spin_lock(&req->ctx->completion_lock);
cc3cec83 8305 io_put_kbuf_comp(req);
8197b053
PB
8306 spin_unlock(&req->ctx->completion_lock);
8307 }
99bc4c38 8308
0e1b6fe3
PB
8309 if (req->flags & REQ_F_NEED_CLEANUP) {
8310 switch (req->opcode) {
8311 case IORING_OP_READV:
8312 case IORING_OP_READ_FIXED:
8313 case IORING_OP_READ:
8314 case IORING_OP_WRITEV:
8315 case IORING_OP_WRITE_FIXED:
e8c2bc1f
JA
8316 case IORING_OP_WRITE: {
8317 struct io_async_rw *io = req->async_data;
1dacb4df
PB
8318
8319 kfree(io->free_iovec);
0e1b6fe3 8320 break;
e8c2bc1f 8321 }
0e1b6fe3 8322 case IORING_OP_RECVMSG:
e8c2bc1f
JA
8323 case IORING_OP_SENDMSG: {
8324 struct io_async_msghdr *io = req->async_data;
257e84a5
PB
8325
8326 kfree(io->free_iov);
0e1b6fe3 8327 break;
e8c2bc1f 8328 }
f3cd4850
JA
8329 case IORING_OP_OPENAT:
8330 case IORING_OP_OPENAT2:
8331 if (req->open.filename)
8332 putname(req->open.filename);
8333 break;
80a261fd
JA
8334 case IORING_OP_RENAMEAT:
8335 putname(req->rename.oldpath);
8336 putname(req->rename.newpath);
8337 break;
14a1143b
JA
8338 case IORING_OP_UNLINKAT:
8339 putname(req->unlink.filename);
8340 break;
e34a02dc
DK
8341 case IORING_OP_MKDIRAT:
8342 putname(req->mkdir.filename);
8343 break;
7a8721f8
DK
8344 case IORING_OP_SYMLINKAT:
8345 putname(req->symlink.oldpath);
8346 putname(req->symlink.newpath);
8347 break;
cf30da90
DK
8348 case IORING_OP_LINKAT:
8349 putname(req->hardlink.oldpath);
8350 putname(req->hardlink.newpath);
8351 break;
1b6fe6e0
SR
8352 case IORING_OP_STATX:
8353 if (req->statx.filename)
8354 putname(req->statx.filename);
8355 break;
e9621e2b
SR
8356 case IORING_OP_SETXATTR:
8357 case IORING_OP_FSETXATTR:
a56834e0
SR
8358 case IORING_OP_GETXATTR:
8359 case IORING_OP_FGETXATTR:
e9621e2b
SR
8360 __io_xattr_finish(req);
8361 break;
0e1b6fe3 8362 }
99bc4c38 8363 }
75652a30
JA
8364 if ((req->flags & REQ_F_POLLED) && req->apoll) {
8365 kfree(req->apoll->double_poll);
8366 kfree(req->apoll);
8367 req->apoll = NULL;
8368 }
9cae36a0
JA
8369 if (req->flags & REQ_F_INFLIGHT) {
8370 struct io_uring_task *tctx = req->task->io_uring;
8371
8372 atomic_dec(&tctx->inflight_tracked);
8373 }
c854357b 8374 if (req->flags & REQ_F_CREDS)
b8e64b53 8375 put_cred(req->creds);
d886e185
PB
8376 if (req->flags & REQ_F_ASYNC_DATA) {
8377 kfree(req->async_data);
8378 req->async_data = NULL;
8379 }
c854357b 8380 req->flags &= ~IO_REQ_CLEAN_FLAGS;
99bc4c38
PB
8381}
8382
6bf9c47a
JA
8383static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
8384{
8385 if (req->file || !io_op_defs[req->opcode].needs_file)
8386 return true;
8387
8388 if (req->flags & REQ_F_FIXED_FILE)
cef216fc 8389 req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags);
6bf9c47a 8390 else
cef216fc 8391 req->file = io_file_get_normal(req, req->cqe.fd);
6bf9c47a 8392
772f5e00 8393 return !!req->file;
6bf9c47a
JA
8394}
8395
889fca73 8396static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1 8397{
fcde59fe 8398 const struct io_op_def *def = &io_op_defs[req->opcode];
5730b27e 8399 const struct cred *creds = NULL;
d625c6ee 8400 int ret;
2b188cc1 8401
70152140
JA
8402 if (unlikely(!io_assign_file(req, issue_flags)))
8403 return -EBADF;
8404
6878b40e 8405 if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
c10d1f98 8406 creds = override_creds(req->creds);
5730b27e 8407
fcde59fe 8408 if (!def->audit_skip)
5bd2182d
PM
8409 audit_uring_entry(req->opcode);
8410
d625c6ee 8411 switch (req->opcode) {
2b188cc1 8412 case IORING_OP_NOP:
889fca73 8413 ret = io_nop(req, issue_flags);
2b188cc1
JA
8414 break;
8415 case IORING_OP_READV:
edafccee 8416 case IORING_OP_READ_FIXED:
3a6820f2 8417 case IORING_OP_READ:
889fca73 8418 ret = io_read(req, issue_flags);
edafccee 8419 break;
3529d8c2 8420 case IORING_OP_WRITEV:
edafccee 8421 case IORING_OP_WRITE_FIXED:
3a6820f2 8422 case IORING_OP_WRITE:
889fca73 8423 ret = io_write(req, issue_flags);
2b188cc1 8424 break;
c992fe29 8425 case IORING_OP_FSYNC:
45d189c6 8426 ret = io_fsync(req, issue_flags);
c992fe29 8427 break;
221c5eb2 8428 case IORING_OP_POLL_ADD:
61e98203 8429 ret = io_poll_add(req, issue_flags);
221c5eb2
JA
8430 break;
8431 case IORING_OP_POLL_REMOVE:
54739cc6 8432 ret = io_poll_remove(req, issue_flags);
221c5eb2 8433 break;
5d17b4a4 8434 case IORING_OP_SYNC_FILE_RANGE:
45d189c6 8435 ret = io_sync_file_range(req, issue_flags);
5d17b4a4 8436 break;
0fa03c62 8437 case IORING_OP_SENDMSG:
889fca73 8438 ret = io_sendmsg(req, issue_flags);
062d04d7 8439 break;
fddaface 8440 case IORING_OP_SEND:
889fca73 8441 ret = io_send(req, issue_flags);
0fa03c62 8442 break;
aa1fa28f 8443 case IORING_OP_RECVMSG:
889fca73 8444 ret = io_recvmsg(req, issue_flags);
062d04d7 8445 break;
fddaface 8446 case IORING_OP_RECV:
889fca73 8447 ret = io_recv(req, issue_flags);
aa1fa28f 8448 break;
5262f567 8449 case IORING_OP_TIMEOUT:
61e98203 8450 ret = io_timeout(req, issue_flags);
5262f567 8451 break;
11365043 8452 case IORING_OP_TIMEOUT_REMOVE:
61e98203 8453 ret = io_timeout_remove(req, issue_flags);
11365043 8454 break;
17f2fe35 8455 case IORING_OP_ACCEPT:
889fca73 8456 ret = io_accept(req, issue_flags);
17f2fe35 8457 break;
f8e85cf2 8458 case IORING_OP_CONNECT:
889fca73 8459 ret = io_connect(req, issue_flags);
f8e85cf2 8460 break;
62755e35 8461 case IORING_OP_ASYNC_CANCEL:
61e98203 8462 ret = io_async_cancel(req, issue_flags);
62755e35 8463 break;
d63d1b5e 8464 case IORING_OP_FALLOCATE:
45d189c6 8465 ret = io_fallocate(req, issue_flags);
d63d1b5e 8466 break;
15b71abe 8467 case IORING_OP_OPENAT:
45d189c6 8468 ret = io_openat(req, issue_flags);
15b71abe 8469 break;
b5dba59e 8470 case IORING_OP_CLOSE:
889fca73 8471 ret = io_close(req, issue_flags);
b5dba59e 8472 break;
05f3fb3c 8473 case IORING_OP_FILES_UPDATE:
889fca73 8474 ret = io_files_update(req, issue_flags);
05f3fb3c 8475 break;
eddc7ef5 8476 case IORING_OP_STATX:
45d189c6 8477 ret = io_statx(req, issue_flags);
eddc7ef5 8478 break;
4840e418 8479 case IORING_OP_FADVISE:
45d189c6 8480 ret = io_fadvise(req, issue_flags);
4840e418 8481 break;
c1ca757b 8482 case IORING_OP_MADVISE:
45d189c6 8483 ret = io_madvise(req, issue_flags);
c1ca757b 8484 break;
cebdb986 8485 case IORING_OP_OPENAT2:
45d189c6 8486 ret = io_openat2(req, issue_flags);
cebdb986 8487 break;
3e4827b0 8488 case IORING_OP_EPOLL_CTL:
889fca73 8489 ret = io_epoll_ctl(req, issue_flags);
3e4827b0 8490 break;
7d67af2c 8491 case IORING_OP_SPLICE:
45d189c6 8492 ret = io_splice(req, issue_flags);
7d67af2c 8493 break;
ddf0322d 8494 case IORING_OP_PROVIDE_BUFFERS:
889fca73 8495 ret = io_provide_buffers(req, issue_flags);
ddf0322d 8496 break;
067524e9 8497 case IORING_OP_REMOVE_BUFFERS:
889fca73 8498 ret = io_remove_buffers(req, issue_flags);
3e4827b0 8499 break;
f2a8d5c7 8500 case IORING_OP_TEE:
45d189c6 8501 ret = io_tee(req, issue_flags);
f2a8d5c7 8502 break;
36f4fa68 8503 case IORING_OP_SHUTDOWN:
45d189c6 8504 ret = io_shutdown(req, issue_flags);
36f4fa68 8505 break;
80a261fd 8506 case IORING_OP_RENAMEAT:
45d189c6 8507 ret = io_renameat(req, issue_flags);
80a261fd 8508 break;
14a1143b 8509 case IORING_OP_UNLINKAT:
45d189c6 8510 ret = io_unlinkat(req, issue_flags);
14a1143b 8511 break;
e34a02dc
DK
8512 case IORING_OP_MKDIRAT:
8513 ret = io_mkdirat(req, issue_flags);
8514 break;
7a8721f8
DK
8515 case IORING_OP_SYMLINKAT:
8516 ret = io_symlinkat(req, issue_flags);
8517 break;
cf30da90
DK
8518 case IORING_OP_LINKAT:
8519 ret = io_linkat(req, issue_flags);
8520 break;
4f57f06c
JA
8521 case IORING_OP_MSG_RING:
8522 ret = io_msg_ring(req, issue_flags);
8523 break;
e9621e2b
SR
8524 case IORING_OP_FSETXATTR:
8525 ret = io_fsetxattr(req, issue_flags);
8526 break;
8527 case IORING_OP_SETXATTR:
8528 ret = io_setxattr(req, issue_flags);
8529 break;
a56834e0
SR
8530 case IORING_OP_FGETXATTR:
8531 ret = io_fgetxattr(req, issue_flags);
8532 break;
8533 case IORING_OP_GETXATTR:
8534 ret = io_getxattr(req, issue_flags);
8535 break;
1374e08e
JA
8536 case IORING_OP_SOCKET:
8537 ret = io_socket(req, issue_flags);
8538 break;
ee692a21
JA
8539 case IORING_OP_URING_CMD:
8540 ret = io_uring_cmd(req, issue_flags);
8541 break;
2b188cc1
JA
8542 default:
8543 ret = -EINVAL;
8544 break;
8545 }
8546
fcde59fe 8547 if (!def->audit_skip)
5bd2182d
PM
8548 audit_uring_exit(!ret, ret);
8549
5730b27e
JA
8550 if (creds)
8551 revert_creds(creds);
def596e9
JA
8552 if (ret)
8553 return ret;
b532576e 8554 /* If the op doesn't have a file, we're not polling for it */
9983028e 8555 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file)
9882131c 8556 io_iopoll_req_issued(req, issue_flags);
def596e9
JA
8557
8558 return 0;
2b188cc1
JA
8559}
8560
ebc11b6c
PB
8561static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
8562{
8563 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8564
8565 req = io_put_req_find_next(req);
8566 return req ? &req->work : NULL;
8567}
8568
5280f7e5 8569static void io_wq_submit_work(struct io_wq_work *work)
2b188cc1
JA
8570{
8571 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6bf9c47a 8572 const struct io_op_def *def = &io_op_defs[req->opcode];
d01905db
PB
8573 unsigned int issue_flags = IO_URING_F_UNLOCKED;
8574 bool needs_poll = false;
6bf9c47a 8575 int ret = 0, err = -ECANCELED;
2b188cc1 8576
48dcd38d
PB
8577 /* one will be dropped by ->io_free_work() after returning to io-wq */
8578 if (!(req->flags & REQ_F_REFCOUNT))
8579 __io_req_set_refcount(req, 2);
8580 else
8581 req_ref_get(req);
5d5901a3 8582
cb2d344c 8583 io_arm_ltimeout(req);
6bf9c47a 8584
dadebc35 8585 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
d01905db 8586 if (work->flags & IO_WQ_WORK_CANCEL) {
0f8da75b 8587fail:
6bf9c47a 8588 io_req_task_queue_fail(req, err);
d01905db
PB
8589 return;
8590 }
0f8da75b
PB
8591 if (!io_assign_file(req, issue_flags)) {
8592 err = -EBADF;
8593 work->flags |= IO_WQ_WORK_CANCEL;
8594 goto fail;
8595 }
31b51510 8596
d01905db 8597 if (req->flags & REQ_F_FORCE_ASYNC) {
afb7f56f
PB
8598 bool opcode_poll = def->pollin || def->pollout;
8599
8600 if (opcode_poll && file_can_poll(req->file)) {
8601 needs_poll = true;
d01905db 8602 issue_flags |= IO_URING_F_NONBLOCK;
afb7f56f 8603 }
561fb04a 8604 }
31b51510 8605
d01905db
PB
8606 do {
8607 ret = io_issue_sqe(req, issue_flags);
8608 if (ret != -EAGAIN)
8609 break;
8610 /*
8611 * We can get EAGAIN for iopolled IO even though we're
8612 * forcing a sync submission from here, since we can't
8613 * wait for request slots on the block side.
8614 */
8615 if (!needs_poll) {
e0deb6a0
PB
8616 if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
8617 break;
d01905db
PB
8618 cond_resched();
8619 continue;
90fa0288
HX
8620 }
8621
4d9237e3 8622 if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK)
d01905db
PB
8623 return;
8624 /* aborted or ready, in either case retry blocking */
8625 needs_poll = false;
8626 issue_flags &= ~IO_URING_F_NONBLOCK;
8627 } while (1);
31b51510 8628
a3df7698 8629 /* avoid locking problems by failing it from a clean context */
5d5901a3 8630 if (ret)
a3df7698 8631 io_req_task_queue_fail(req, ret);
2b188cc1
JA
8632}
8633
aeca241b 8634static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
042b0d85 8635 unsigned i)
65e19f54 8636{
042b0d85 8637 return &table->files[i];
dafecf19
PB
8638}
8639
65e19f54
JA
8640static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
8641 int index)
8642{
aeca241b 8643 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
65e19f54 8644
a04b0ac0 8645 return (struct file *) (slot->file_ptr & FFS_MASK);
65e19f54
JA
8646}
8647
a04b0ac0 8648static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
9a321c98
PB
8649{
8650 unsigned long file_ptr = (unsigned long) file;
8651
88459b50 8652 file_ptr |= io_file_get_flags(file);
a04b0ac0 8653 file_slot->file_ptr = file_ptr;
65e19f54
JA
8654}
8655
5106dd6e
JA
8656static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
8657 unsigned int issue_flags)
09bb8394 8658{
5106dd6e
JA
8659 struct io_ring_ctx *ctx = req->ctx;
8660 struct file *file = NULL;
ac177053 8661 unsigned long file_ptr;
09bb8394 8662
93f052cb 8663 io_ring_submit_lock(ctx, issue_flags);
5106dd6e 8664
ac177053 8665 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
5106dd6e 8666 goto out;
ac177053
PB
8667 fd = array_index_nospec(fd, ctx->nr_user_files);
8668 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
8669 file = (struct file *) (file_ptr & FFS_MASK);
8670 file_ptr &= ~FFS_MASK;
8671 /* mask in overlapping REQ_F and FFS bits */
35645ac3 8672 req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
5106dd6e 8673 io_req_set_rsrc_node(req, ctx, 0);
d78bd8ad 8674 WARN_ON_ONCE(file && !test_bit(fd, ctx->file_table.bitmap));
5106dd6e 8675out:
93f052cb 8676 io_ring_submit_unlock(ctx, issue_flags);
ac177053
PB
8677 return file;
8678}
d44f554e 8679
5106dd6e 8680static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
ac177053 8681{
62906e89 8682 struct file *file = fget(fd);
ac177053 8683
cef216fc 8684 trace_io_uring_file_get(req->ctx, req, req->cqe.user_data, fd);
09bb8394 8685
ac177053 8686 /* we don't allow fixed io_uring files */
d5361233 8687 if (file && file->f_op == &io_uring_fops)
9cae36a0 8688 io_req_track_inflight(req);
8371adf5 8689 return file;
09bb8394
JA
8690}
8691
f237c30a 8692static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
89b263f6
JA
8693{
8694 struct io_kiocb *prev = req->timeout.prev;
617a8948 8695 int ret = -ENOENT;
89b263f6
JA
8696
8697 if (prev) {
b21432b4
JA
8698 if (!(req->task->flags & PF_EXITING)) {
8699 struct io_cancel_data cd = {
8700 .ctx = req->ctx,
8701 .data = prev->cqe.user_data,
8702 };
8703
8704 ret = io_try_cancel(req, &cd);
8705 }
505657bc 8706 io_req_complete_post(req, ret ?: -ETIME, 0);
89b263f6 8707 io_put_req(prev);
89b263f6
JA
8708 } else {
8709 io_req_complete_post(req, -ETIME, 0);
8710 }
8711}
8712
2665abfd 8713static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 8714{
ad8a48ac
JA
8715 struct io_timeout_data *data = container_of(timer,
8716 struct io_timeout_data, timer);
90cd7e42 8717 struct io_kiocb *prev, *req = data->req;
2665abfd 8718 struct io_ring_ctx *ctx = req->ctx;
2665abfd 8719 unsigned long flags;
2665abfd 8720
89b263f6 8721 spin_lock_irqsave(&ctx->timeout_lock, flags);
90cd7e42
PB
8722 prev = req->timeout.head;
8723 req->timeout.head = NULL;
2665abfd
JA
8724
8725 /*
8726 * We don't expect the list to be empty, that will only happen if we
8727 * race with the completion of the linked work.
8728 */
447c19f3 8729 if (prev) {
f2f87370 8730 io_remove_next_linked(prev);
447c19f3
PB
8731 if (!req_ref_inc_not_zero(prev))
8732 prev = NULL;
8733 }
ef9dd637 8734 list_del(&req->timeout.list);
89b263f6
JA
8735 req->timeout.prev = prev;
8736 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
2665abfd 8737
89b263f6 8738 req->io_task_work.func = io_req_task_link_timeout;
3fe07bcd 8739 io_req_task_work_add(req);
2665abfd
JA
8740 return HRTIMER_NORESTART;
8741}
8742
de968c18 8743static void io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 8744{
de968c18
PB
8745 struct io_ring_ctx *ctx = req->ctx;
8746
89b263f6 8747 spin_lock_irq(&ctx->timeout_lock);
76a46e06 8748 /*
f2f87370
PB
8749 * If the back reference is NULL, then our linked request finished
8750 * before we got a chance to setup the timer
76a46e06 8751 */
90cd7e42 8752 if (req->timeout.head) {
e8c2bc1f 8753 struct io_timeout_data *data = req->async_data;
94ae5e77 8754
ad8a48ac
JA
8755 data->timer.function = io_link_timeout_fn;
8756 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
8757 data->mode);
ef9dd637 8758 list_add_tail(&req->timeout.list, &ctx->ltimeout_list);
2665abfd 8759 }
89b263f6 8760 spin_unlock_irq(&ctx->timeout_lock);
2665abfd 8761 /* drop submission reference */
76a46e06
JA
8762 io_put_req(req);
8763}
2665abfd 8764
7bfa9bad 8765static void io_queue_async(struct io_kiocb *req, int ret)
d475a9a6
PB
8766 __must_hold(&req->ctx->uring_lock)
8767{
7bfa9bad
PB
8768 struct io_kiocb *linked_timeout;
8769
8770 if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
8771 io_req_complete_failed(req, ret);
8772 return;
8773 }
8774
8775 linked_timeout = io_prep_linked_timeout(req);
d475a9a6 8776
4d9237e3 8777 switch (io_arm_poll_handler(req, 0)) {
d475a9a6 8778 case IO_APOLL_READY:
d475a9a6
PB
8779 io_req_task_queue(req);
8780 break;
8781 case IO_APOLL_ABORTED:
8782 /*
8783 * Queued up for async execution, worker will release
8784 * submit reference when the iocb is actually submitted.
8785 */
77955efb 8786 io_queue_iowq(req, NULL);
d475a9a6 8787 break;
b1c62645 8788 case IO_APOLL_OK:
b1c62645 8789 break;
d475a9a6
PB
8790 }
8791
8792 if (linked_timeout)
8793 io_queue_linked_timeout(linked_timeout);
8794}
8795
cbc2e203 8796static inline void io_queue_sqe(struct io_kiocb *req)
282cdc86 8797 __must_hold(&req->ctx->uring_lock)
2b188cc1 8798{
e0c5c576 8799 int ret;
2b188cc1 8800
c5eef2b9 8801 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
193155c8 8802
fff4e40e
PB
8803 if (req->flags & REQ_F_COMPLETE_INLINE) {
8804 io_req_add_compl_list(req);
d9f9d284 8805 return;
fff4e40e 8806 }
491381ce
JA
8807 /*
8808 * We async punt it if the file wasn't marked NOWAIT, or if the file
8809 * doesn't support non-blocking read/write attempts
8810 */
7bfa9bad 8811 if (likely(!ret))
cb2d344c 8812 io_arm_ltimeout(req);
7bfa9bad
PB
8813 else
8814 io_queue_async(req, ret);
2b188cc1
JA
8815}
8816
4652fe3f 8817static void io_queue_sqe_fallback(struct io_kiocb *req)
282cdc86 8818 __must_hold(&req->ctx->uring_lock)
4fe2c963 8819{
17b147f6
PB
8820 if (unlikely(req->flags & REQ_F_FAIL)) {
8821 /*
8822 * We don't submit, fail them all, for that replace hardlinks
8823 * with normal links. Extra REQ_F_LINK is tolerated.
8824 */
8825 req->flags &= ~REQ_F_HARDLINK;
8826 req->flags |= REQ_F_LINK;
8827 io_req_complete_failed(req, req->cqe.res);
e0eb71dc
PB
8828 } else if (unlikely(req->ctx->drain_active)) {
8829 io_drain_req(req);
76cc33d7
PB
8830 } else {
8831 int ret = io_req_prep_async(req);
8832
8833 if (unlikely(ret))
8834 io_req_complete_failed(req, ret);
8835 else
77955efb 8836 io_queue_iowq(req, NULL);
ce35a47a 8837 }
4fe2c963
JL
8838}
8839
b16fed66
PB
8840/*
8841 * Check SQE restrictions (opcode and flags).
8842 *
8843 * Returns 'true' if SQE is allowed, 'false' otherwise.
8844 */
8845static inline bool io_check_restriction(struct io_ring_ctx *ctx,
8846 struct io_kiocb *req,
8847 unsigned int sqe_flags)
4fe2c963 8848{
b16fed66
PB
8849 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
8850 return false;
8851
8852 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
8853 ctx->restrictions.sqe_flags_required)
8854 return false;
8855
8856 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
8857 ctx->restrictions.sqe_flags_required))
8858 return false;
8859
8860 return true;
4fe2c963
JL
8861}
8862
22b2ca31
PB
8863static void io_init_req_drain(struct io_kiocb *req)
8864{
8865 struct io_ring_ctx *ctx = req->ctx;
8866 struct io_kiocb *head = ctx->submit_state.link.head;
8867
8868 ctx->drain_active = true;
8869 if (head) {
8870 /*
8871 * If we need to drain a request in the middle of a link, drain
8872 * the head request and the next request/link after the current
8873 * link. Considering sequential execution of links,
b6c7db32 8874 * REQ_F_IO_DRAIN will be maintained for every request of our
22b2ca31
PB
8875 * link.
8876 */
b6c7db32 8877 head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
22b2ca31
PB
8878 ctx->drain_next = true;
8879 }
8880}
8881
b16fed66
PB
8882static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
8883 const struct io_uring_sqe *sqe)
282cdc86 8884 __must_hold(&ctx->uring_lock)
b16fed66 8885{
fcde59fe 8886 const struct io_op_def *def;
b16fed66 8887 unsigned int sqe_flags;
fc0ae024 8888 int personality;
4a04d1d1 8889 u8 opcode;
b16fed66 8890
864ea921 8891 /* req is partially pre-initialised, see io_preinit_req() */
4a04d1d1 8892 req->opcode = opcode = READ_ONCE(sqe->opcode);
b16fed66
PB
8893 /* same numerical values with corresponding REQ_F_*, safe to copy */
8894 req->flags = sqe_flags = READ_ONCE(sqe->flags);
cef216fc 8895 req->cqe.user_data = READ_ONCE(sqe->user_data);
b16fed66 8896 req->file = NULL;
c1bdf8ed 8897 req->rsrc_node = NULL;
b16fed66 8898 req->task = current;
b16fed66 8899
4a04d1d1
PB
8900 if (unlikely(opcode >= IORING_OP_LAST)) {
8901 req->opcode = 0;
b16fed66 8902 return -EINVAL;
4a04d1d1 8903 }
fcde59fe 8904 def = &io_op_defs[opcode];
68fe256a
PB
8905 if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
8906 /* enforce forwards compatibility on users */
8907 if (sqe_flags & ~SQE_VALID_FLAGS)
8908 return -EINVAL;
4e906702 8909 if (sqe_flags & IOSQE_BUFFER_SELECT) {
fcde59fe 8910 if (!def->buffer_select)
4e906702
JA
8911 return -EOPNOTSUPP;
8912 req->buf_index = READ_ONCE(sqe->buf_group);
8913 }
5562a8d7
PB
8914 if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
8915 ctx->drain_disabled = true;
8916 if (sqe_flags & IOSQE_IO_DRAIN) {
8917 if (ctx->drain_disabled)
8918 return -EOPNOTSUPP;
22b2ca31 8919 io_init_req_drain(req);
5562a8d7 8920 }
2a56a9bd
PB
8921 }
8922 if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
8923 if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
8924 return -EACCES;
8925 /* knock it to the slow queue path, will be drained there */
8926 if (ctx->drain_active)
8927 req->flags |= REQ_F_FORCE_ASYNC;
8928 /* if there is no link, we're at "next" request and need to drain */
8929 if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
8930 ctx->drain_next = false;
8931 ctx->drain_active = true;
b6c7db32 8932 req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2a56a9bd 8933 }
68fe256a 8934 }
b16fed66 8935
fcde59fe 8936 if (!def->ioprio && sqe->ioprio)
73911426 8937 return -EINVAL;
fcde59fe 8938 if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
73911426
JA
8939 return -EINVAL;
8940
fcde59fe 8941 if (def->needs_file) {
6d63416d
PB
8942 struct io_submit_state *state = &ctx->submit_state;
8943
cef216fc 8944 req->cqe.fd = READ_ONCE(sqe->fd);
6bf9c47a 8945
6d63416d
PB
8946 /*
8947 * Plug now if we have more than 2 IO left after this, and the
8948 * target is potentially a read/write to block based storage.
8949 */
fcde59fe 8950 if (state->need_plug && def->plug) {
6d63416d
PB
8951 state->plug_started = true;
8952 state->need_plug = false;
5ca7a8b3 8953 blk_start_plug_nr_ios(&state->plug, state->submit_nr);
6d63416d 8954 }
b16fed66 8955 }
863e0560 8956
003e8dcc
JA
8957 personality = READ_ONCE(sqe->personality);
8958 if (personality) {
cdab10bf
LT
8959 int ret;
8960
c10d1f98
PB
8961 req->creds = xa_load(&ctx->personalities, personality);
8962 if (!req->creds)
003e8dcc 8963 return -EINVAL;
c10d1f98 8964 get_cred(req->creds);
cdc1404a
PM
8965 ret = security_uring_override_creds(req->creds);
8966 if (ret) {
8967 put_cred(req->creds);
8968 return ret;
8969 }
b8e64b53 8970 req->flags |= REQ_F_CREDS;
003e8dcc 8971 }
b16fed66 8972
fc0ae024 8973 return io_req_prep(req, sqe);
b16fed66
PB
8974}
8975
df3becde
PB
8976static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
8977 struct io_kiocb *req, int ret)
8978{
8979 struct io_ring_ctx *ctx = req->ctx;
8980 struct io_submit_link *link = &ctx->submit_state.link;
8981 struct io_kiocb *head = link->head;
8982
8983 trace_io_uring_req_failed(sqe, ctx, req, ret);
8984
8985 /*
8986 * Avoid breaking links in the middle as it renders links with SQPOLL
8987 * unusable. Instead of failing eagerly, continue assembling the link if
8988 * applicable and mark the head with REQ_F_FAIL. The link flushing code
8989 * should find the flag and handle the rest.
8990 */
8991 req_fail_link_node(req, ret);
8992 if (head && !(head->flags & REQ_F_FAIL))
8993 req_fail_link_node(head, -ECANCELED);
8994
8995 if (!(req->flags & IO_REQ_LINK_FLAGS)) {
8996 if (head) {
8997 link->last->link = req;
8998 link->head = NULL;
8999 req = head;
9000 }
9001 io_queue_sqe_fallback(req);
9002 return ret;
9003 }
9004
9005 if (head)
9006 link->last->link = req;
9007 else
9008 link->head = req;
9009 link->last = req;
9010 return 0;
9011}
9012
9013static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
a1ab7b35 9014 const struct io_uring_sqe *sqe)
282cdc86 9015 __must_hold(&ctx->uring_lock)
9e645e11 9016{
a1ab7b35 9017 struct io_submit_link *link = &ctx->submit_state.link;
ef4ff581 9018 int ret;
9e645e11 9019
a6b8cadc 9020 ret = io_init_req(ctx, req, sqe);
df3becde
PB
9021 if (unlikely(ret))
9022 return io_submit_fail_init(sqe, req, ret);
441b8a78 9023
be7053b7 9024 /* don't need @sqe from now on */
cef216fc 9025 trace_io_uring_submit_sqe(ctx, req, req->cqe.user_data, req->opcode,
236daeae
OL
9026 req->flags, true,
9027 ctx->flags & IORING_SETUP_SQPOLL);
a6b8cadc 9028
9e645e11
JA
9029 /*
9030 * If we already have a head request, queue this one for async
9031 * submittal once the head completes. If we don't have a head but
9032 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
9033 * submitted sync once the chain is complete. If none of those
9034 * conditions are true (normal request), then just queue it.
9035 */
924a07e4 9036 if (unlikely(link->head)) {
df3becde
PB
9037 ret = io_req_prep_async(req);
9038 if (unlikely(ret))
9039 return io_submit_fail_init(sqe, req, ret);
9040
9041 trace_io_uring_link(ctx, req, link->head);
f2f87370 9042 link->last->link = req;
863e0560 9043 link->last = req;
32fe525b 9044
da1a08c5 9045 if (req->flags & IO_REQ_LINK_FLAGS)
f15a3431 9046 return 0;
df3becde
PB
9047 /* last request of the link, flush it */
9048 req = link->head;
f15a3431 9049 link->head = NULL;
924a07e4
PB
9050 if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))
9051 goto fallback;
9052
9053 } else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS |
9054 REQ_F_FORCE_ASYNC | REQ_F_FAIL))) {
9055 if (req->flags & IO_REQ_LINK_FLAGS) {
9056 link->head = req;
9057 link->last = req;
9058 } else {
9059fallback:
9060 io_queue_sqe_fallback(req);
9061 }
f15a3431 9062 return 0;
9e645e11 9063 }
2e6e1fde 9064
f15a3431 9065 io_queue_sqe(req);
1d4240cc 9066 return 0;
9e645e11
JA
9067}
9068
9a56a232
JA
9069/*
9070 * Batched submission is done, ensure local IO is flushed out.
9071 */
553deffd 9072static void io_submit_state_end(struct io_ring_ctx *ctx)
9a56a232 9073{
553deffd
PB
9074 struct io_submit_state *state = &ctx->submit_state;
9075
e126391c
PB
9076 if (unlikely(state->link.head))
9077 io_queue_sqe_fallback(state->link.head);
553deffd 9078 /* flush only after queuing links as they can generate completions */
c450178d 9079 io_submit_flush_completions(ctx);
27926b68
JA
9080 if (state->plug_started)
9081 blk_finish_plug(&state->plug);
9a56a232
JA
9082}
9083
9084/*
9085 * Start submission side cache.
9086 */
9087static void io_submit_state_start(struct io_submit_state *state,
ba88ff11 9088 unsigned int max_ios)
9a56a232 9089{
27926b68 9090 state->plug_started = false;
4b628aeb 9091 state->need_plug = max_ios > 2;
5ca7a8b3 9092 state->submit_nr = max_ios;
a1ab7b35
PB
9093 /* set only head, no need to init link_last in advance */
9094 state->link.head = NULL;
9a56a232
JA
9095}
9096
2b188cc1
JA
9097static void io_commit_sqring(struct io_ring_ctx *ctx)
9098{
75b28aff 9099 struct io_rings *rings = ctx->rings;
2b188cc1 9100
caf582c6
PB
9101 /*
9102 * Ensure any loads from the SQEs are done at this point,
9103 * since once we write the new head, the application could
9104 * write new data to them.
9105 */
9106 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
9107}
9108
2b188cc1 9109/*
dd9ae8a0 9110 * Fetch an sqe, if one is available. Note this returns a pointer to memory
2b188cc1
JA
9111 * that is mapped by userspace. This means that care needs to be taken to
9112 * ensure that reads are stable, as we cannot rely on userspace always
9113 * being a good citizen. If members of the sqe are validated and then later
9114 * used, it's important that those reads are done through READ_ONCE() to
9115 * prevent a re-load down the line.
9116 */
709b302f 9117static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2b188cc1 9118{
ea5ab3b5 9119 unsigned head, mask = ctx->sq_entries - 1;
17d3aeb3 9120 unsigned sq_idx = ctx->cached_sq_head++ & mask;
2b188cc1
JA
9121
9122 /*
9123 * The cached sq head (or cq tail) serves two purposes:
9124 *
9125 * 1) allows us to batch the cost of updating the user visible
9126 * head updates.
9127 * 2) allows the kernel side to track the head on its own, even
9128 * though the application is the one updating it.
9129 */
17d3aeb3 9130 head = READ_ONCE(ctx->sq_array[sq_idx]);
ebdeb7c0
JA
9131 if (likely(head < ctx->sq_entries)) {
9132 /* double index for 128-byte SQEs, twice as long */
9133 if (ctx->flags & IORING_SETUP_SQE128)
9134 head <<= 1;
709b302f 9135 return &ctx->sq_sqes[head];
ebdeb7c0 9136 }
2b188cc1
JA
9137
9138 /* drop invalid entries */
15641e42
PB
9139 ctx->cq_extra--;
9140 WRITE_ONCE(ctx->rings->sq_dropped,
9141 READ_ONCE(ctx->rings->sq_dropped) + 1);
709b302f
PB
9142 return NULL;
9143}
9144
0f212204 9145static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
282cdc86 9146 __must_hold(&ctx->uring_lock)
6c271ce2 9147{
69629809 9148 unsigned int entries = io_sqring_entries(ctx);
8e6971a8
PB
9149 unsigned int left;
9150 int ret;
6c271ce2 9151
51d48dab 9152 if (unlikely(!entries))
69629809 9153 return 0;
ee7d46d9 9154 /* make sure SQ entry isn't read before tail */
8e6971a8
PB
9155 ret = left = min3(nr, ctx->sq_entries, entries);
9156 io_get_task_refs(left);
9157 io_submit_state_start(&ctx->submit_state, left);
6c271ce2 9158
69629809 9159 do {
3529d8c2 9160 const struct io_uring_sqe *sqe;
196be95c 9161 struct io_kiocb *req;
fb5ccc98 9162
8e6971a8 9163 if (unlikely(!io_alloc_req_refill(ctx)))
fb5ccc98 9164 break;
a33ae9ce 9165 req = io_alloc_req(ctx);
4fccfcbb
PB
9166 sqe = io_get_sqe(ctx);
9167 if (unlikely(!sqe)) {
fa05457a 9168 io_req_add_to_cache(req, ctx);
4fccfcbb
PB
9169 break;
9170 }
6c271ce2 9171
1cd15904
PB
9172 /*
9173 * Continue submitting even for sqe failure if the
9174 * ring was setup with IORING_SETUP_SUBMIT_ALL
9175 */
9176 if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
9177 !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
9178 left--;
9179 break;
bcbb7bf6 9180 }
1cd15904 9181 } while (--left);
9466f437 9182
8e6971a8
PB
9183 if (unlikely(left)) {
9184 ret -= left;
9185 /* try again if it submitted nothing and can't allocate a req */
9186 if (!ret && io_req_cache_empty(ctx))
9187 ret = -EAGAIN;
9188 current->io_uring->cached_refs += left;
9466f437 9189 }
6c271ce2 9190
553deffd 9191 io_submit_state_end(ctx);
ae9428ca
PB
9192 /* Commit SQ ring head once we've consumed and submitted all SQEs */
9193 io_commit_sqring(ctx);
8e6971a8 9194 return ret;
6c271ce2
JA
9195}
9196
e4b6d902
PB
9197static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
9198{
9199 return READ_ONCE(sqd->state);
9200}
9201
08369246 9202static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
6c271ce2 9203{
c8d1ba58 9204 unsigned int to_submit;
bdcd3eab 9205 int ret = 0;
6c271ce2 9206
c8d1ba58 9207 to_submit = io_sqring_entries(ctx);
e95eee2d 9208 /* if we're handling multiple rings, cap submit size for fairness */
4ce8ad95
OL
9209 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
9210 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
e95eee2d 9211
5eef4e87 9212 if (!wq_list_empty(&ctx->iopoll_list) || to_submit) {
948e1947
PB
9213 const struct cred *creds = NULL;
9214
9215 if (ctx->sq_creds != current_cred())
9216 creds = override_creds(ctx->sq_creds);
a4c0b3de 9217
c8d1ba58 9218 mutex_lock(&ctx->uring_lock);
5eef4e87 9219 if (!wq_list_empty(&ctx->iopoll_list))
5ba3c874 9220 io_do_iopoll(ctx, true);
906a3c6f 9221
3b763ba1
PB
9222 /*
9223 * Don't submit if refs are dying, good for io_uring_register(),
9224 * but also it is relied upon by io_ring_exit_work()
9225 */
0298ef96
PB
9226 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
9227 !(ctx->flags & IORING_SETUP_R_DISABLED))
08369246 9228 ret = io_submit_sqes(ctx, to_submit);
c8d1ba58 9229 mutex_unlock(&ctx->uring_lock);
cb318216 9230
acfb381d
PB
9231 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
9232 wake_up(&ctx->sqo_sq_wait);
948e1947
PB
9233 if (creds)
9234 revert_creds(creds);
acfb381d 9235 }
6c271ce2 9236
08369246
XW
9237 return ret;
9238}
6c271ce2 9239
c072481d 9240static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd)
08369246
XW
9241{
9242 struct io_ring_ctx *ctx;
9243 unsigned sq_thread_idle = 0;
6c271ce2 9244
c9dca27d
PB
9245 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
9246 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
08369246 9247 sqd->sq_thread_idle = sq_thread_idle;
c8d1ba58 9248}
6c271ce2 9249
e4b6d902
PB
9250static bool io_sqd_handle_event(struct io_sq_data *sqd)
9251{
9252 bool did_sig = false;
9253 struct ksignal ksig;
9254
9255 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
9256 signal_pending(current)) {
9257 mutex_unlock(&sqd->lock);
9258 if (signal_pending(current))
9259 did_sig = get_signal(&ksig);
9260 cond_resched();
9261 mutex_lock(&sqd->lock);
9262 }
e4b6d902
PB
9263 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
9264}
9265
c8d1ba58
JA
9266static int io_sq_thread(void *data)
9267{
69fb2131
JA
9268 struct io_sq_data *sqd = data;
9269 struct io_ring_ctx *ctx;
a0d9205f 9270 unsigned long timeout = 0;
37d1e2e3 9271 char buf[TASK_COMM_LEN];
08369246 9272 DEFINE_WAIT(wait);
6c271ce2 9273
696ee88a 9274 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
37d1e2e3 9275 set_task_comm(current, buf);
37d1e2e3
JA
9276
9277 if (sqd->sq_cpu != -1)
9278 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
9279 else
9280 set_cpus_allowed_ptr(current, cpu_online_mask);
9281 current->flags |= PF_NO_SETAFFINITY;
9282
5bd2182d
PM
9283 audit_alloc_kernel(current);
9284
09a6f4ef 9285 mutex_lock(&sqd->lock);
e4b6d902 9286 while (1) {
1a924a80 9287 bool cap_entries, sqt_spin = false;
c1edbf5f 9288
e4b6d902
PB
9289 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
9290 if (io_sqd_handle_event(sqd))
c7d95613 9291 break;
08369246
XW
9292 timeout = jiffies + sqd->sq_thread_idle;
9293 }
e4b6d902 9294
e95eee2d 9295 cap_entries = !list_is_singular(&sqd->ctx_list);
69fb2131 9296 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
948e1947 9297 int ret = __io_sq_thread(ctx, cap_entries);
7c30f36a 9298
5eef4e87 9299 if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list)))
08369246 9300 sqt_spin = true;
69fb2131 9301 }
dd432ea5
PB
9302 if (io_run_task_work())
9303 sqt_spin = true;
6c271ce2 9304
08369246 9305 if (sqt_spin || !time_after(jiffies, timeout)) {
c8d1ba58 9306 cond_resched();
08369246
XW
9307 if (sqt_spin)
9308 timeout = jiffies + sqd->sq_thread_idle;
9309 continue;
9310 }
9311
08369246 9312 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
7f62d40d 9313 if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) {
1a924a80
PB
9314 bool needs_sched = true;
9315
724cb4f9 9316 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
3a4b89a2
JA
9317 atomic_or(IORING_SQ_NEED_WAKEUP,
9318 &ctx->rings->sq_flags);
724cb4f9 9319 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
5eef4e87 9320 !wq_list_empty(&ctx->iopoll_list)) {
724cb4f9
HX
9321 needs_sched = false;
9322 break;
9323 }
649bb75d
AK
9324
9325 /*
9326 * Ensure the store of the wakeup flag is not
9327 * reordered with the load of the SQ tail
9328 */
f2e030dd 9329 smp_mb__after_atomic();
649bb75d 9330
724cb4f9
HX
9331 if (io_sqring_entries(ctx)) {
9332 needs_sched = false;
9333 break;
9334 }
9335 }
9336
9337 if (needs_sched) {
9338 mutex_unlock(&sqd->lock);
9339 schedule();
9340 mutex_lock(&sqd->lock);
9341 }
69fb2131 9342 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
3a4b89a2
JA
9343 atomic_andnot(IORING_SQ_NEED_WAKEUP,
9344 &ctx->rings->sq_flags);
6c271ce2 9345 }
08369246
XW
9346
9347 finish_wait(&sqd->wait, &wait);
9348 timeout = jiffies + sqd->sq_thread_idle;
6c271ce2 9349 }
28cea78a 9350
78cc687b 9351 io_uring_cancel_generic(true, sqd);
37d1e2e3 9352 sqd->thread = NULL;
05962f95 9353 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
3a4b89a2 9354 atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags);
521d6a73 9355 io_run_task_work();
734551df
PB
9356 mutex_unlock(&sqd->lock);
9357
5bd2182d
PM
9358 audit_free(current);
9359
37d1e2e3
JA
9360 complete(&sqd->exited);
9361 do_exit(0);
6c271ce2
JA
9362}
9363
bda52162
JA
9364struct io_wait_queue {
9365 struct wait_queue_entry wq;
9366 struct io_ring_ctx *ctx;
5fd46178 9367 unsigned cq_tail;
bda52162
JA
9368 unsigned nr_timeouts;
9369};
9370
6c503150 9371static inline bool io_should_wake(struct io_wait_queue *iowq)
bda52162
JA
9372{
9373 struct io_ring_ctx *ctx = iowq->ctx;
5fd46178 9374 int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
bda52162
JA
9375
9376 /*
d195a66e 9377 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
9378 * started waiting. For timeouts, we always want to return to userspace,
9379 * regardless of event count.
9380 */
5fd46178 9381 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
bda52162
JA
9382}
9383
9384static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
9385 int wake_flags, void *key)
9386{
9387 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
9388 wq);
9389
6c503150
PB
9390 /*
9391 * Cannot safely flush overflowed CQEs from here, ensure we wake up
9392 * the task, and the next invocation will do it.
9393 */
10988a0a
DY
9394 if (io_should_wake(iowq) ||
9395 test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &iowq->ctx->check_cq))
6c503150
PB
9396 return autoremove_wake_function(curr, mode, wake_flags, key);
9397 return -1;
bda52162
JA
9398}
9399
af9c1a44
JA
9400static int io_run_task_work_sig(void)
9401{
9402 if (io_run_task_work())
9403 return 1;
0b8cfa97 9404 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
792ee0f6 9405 return -ERESTARTSYS;
c5020bc8
OL
9406 if (task_sigpending(current))
9407 return -EINTR;
9408 return 0;
af9c1a44
JA
9409}
9410
eeb60b9a
PB
9411/* when returns >0, the caller should retry */
9412static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
9413 struct io_wait_queue *iowq,
22833966 9414 ktime_t timeout)
eeb60b9a
PB
9415{
9416 int ret;
155bc950 9417 unsigned long check_cq;
eeb60b9a
PB
9418
9419 /* make sure we run task_work before checking for signals */
9420 ret = io_run_task_work_sig();
9421 if (ret || io_should_wake(iowq))
9422 return ret;
155bc950 9423 check_cq = READ_ONCE(ctx->check_cq);
eeb60b9a 9424 /* let the caller flush overflows, retry */
155bc950 9425 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
eeb60b9a 9426 return 1;
155bc950
DY
9427 if (unlikely(check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)))
9428 return -EBADR;
22833966
JA
9429 if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
9430 return -ETIME;
9431 return 1;
eeb60b9a
PB
9432}
9433
2b188cc1
JA
9434/*
9435 * Wait until events become available, if we don't already have some. The
9436 * application must reap them itself, as they reside on the shared cq ring.
9437 */
9438static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
c73ebb68
HX
9439 const sigset_t __user *sig, size_t sigsz,
9440 struct __kernel_timespec __user *uts)
2b188cc1 9441{
90291099 9442 struct io_wait_queue iowq;
75b28aff 9443 struct io_rings *rings = ctx->rings;
22833966 9444 ktime_t timeout = KTIME_MAX;
c1d5a224 9445 int ret;
2b188cc1 9446
b41e9852 9447 do {
90f67366 9448 io_cqring_overflow_flush(ctx);
6c503150 9449 if (io_cqring_events(ctx) >= min_events)
b41e9852 9450 return 0;
4c6e277c 9451 if (!io_run_task_work())
b41e9852 9452 break;
b41e9852 9453 } while (1);
2b188cc1
JA
9454
9455 if (sig) {
9e75ad5d
AB
9456#ifdef CONFIG_COMPAT
9457 if (in_compat_syscall())
9458 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 9459 sigsz);
9e75ad5d
AB
9460 else
9461#endif
b772434b 9462 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 9463
2b188cc1
JA
9464 if (ret)
9465 return ret;
9466 }
9467
950e79dd
OL
9468 if (uts) {
9469 struct timespec64 ts;
9470
9471 if (get_timespec64(&ts, uts))
9472 return -EFAULT;
9473 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
9474 }
9475
90291099
PB
9476 init_waitqueue_func_entry(&iowq.wq, io_wake_function);
9477 iowq.wq.private = current;
9478 INIT_LIST_HEAD(&iowq.wq.entry);
9479 iowq.ctx = ctx;
bda52162 9480 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
5fd46178 9481 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
90291099 9482
c826bd7a 9483 trace_io_uring_cqring_wait(ctx, min_events);
bda52162 9484 do {
ca0a2651 9485 /* if we can't even flush overflow, don't wait for more */
90f67366 9486 if (!io_cqring_overflow_flush(ctx)) {
ca0a2651
JA
9487 ret = -EBUSY;
9488 break;
9489 }
311997b3 9490 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
bda52162 9491 TASK_INTERRUPTIBLE);
22833966 9492 ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
ca0a2651 9493 cond_resched();
eeb60b9a 9494 } while (ret > 0);
bda52162 9495
b4f20bb4 9496 finish_wait(&ctx->cq_wait, &iowq.wq);
b7db41c9 9497 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 9498
75b28aff 9499 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
9500}
9501
9123c8ff 9502static void io_free_page_table(void **table, size_t size)
05f3fb3c 9503{
9123c8ff 9504 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
05f3fb3c 9505
846a4ef2 9506 for (i = 0; i < nr_tables; i++)
9123c8ff
PB
9507 kfree(table[i]);
9508 kfree(table);
9509}
9510
c072481d 9511static __cold void **io_alloc_page_table(size_t size)
9123c8ff
PB
9512{
9513 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
9514 size_t init_size = size;
9515 void **table;
9516
0bea96f5 9517 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
9123c8ff
PB
9518 if (!table)
9519 return NULL;
9520
9521 for (i = 0; i < nr_tables; i++) {
27f6b318 9522 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
9123c8ff 9523
0bea96f5 9524 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
9123c8ff
PB
9525 if (!table[i]) {
9526 io_free_page_table(table, init_size);
9527 return NULL;
9528 }
9529 size -= this_size;
9530 }
9531 return table;
05f3fb3c
JA
9532}
9533
28a9fe25 9534static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
1642b445 9535{
28a9fe25
PB
9536 percpu_ref_exit(&ref_node->refs);
9537 kfree(ref_node);
1642b445
PB
9538}
9539
c072481d 9540static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref)
b9bd2bea
PB
9541{
9542 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
9543 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
9544 unsigned long flags;
9545 bool first_add = false;
b36a2050 9546 unsigned long delay = HZ;
b9bd2bea
PB
9547
9548 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
9549 node->done = true;
9550
b36a2050
DY
9551 /* if we are mid-quiesce then do not delay */
9552 if (node->rsrc_data->quiesce)
9553 delay = 0;
9554
b9bd2bea
PB
9555 while (!list_empty(&ctx->rsrc_ref_list)) {
9556 node = list_first_entry(&ctx->rsrc_ref_list,
9557 struct io_rsrc_node, node);
9558 /* recycle ref nodes in order */
9559 if (!node->done)
9560 break;
9561 list_del(&node->node);
9562 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
9563 }
9564 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
9565
9566 if (first_add)
b36a2050 9567 mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
b9bd2bea
PB
9568}
9569
f6133fbd 9570static struct io_rsrc_node *io_rsrc_node_alloc(void)
b9bd2bea
PB
9571{
9572 struct io_rsrc_node *ref_node;
9573
9574 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
9575 if (!ref_node)
9576 return NULL;
9577
9578 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
9579 0, GFP_KERNEL)) {
9580 kfree(ref_node);
9581 return NULL;
9582 }
9583 INIT_LIST_HEAD(&ref_node->node);
9584 INIT_LIST_HEAD(&ref_node->rsrc_list);
9585 ref_node->done = false;
9586 return ref_node;
9587}
9588
a7f0ed5a
PB
9589static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
9590 struct io_rsrc_data *data_to_kill)
ab409402 9591 __must_hold(&ctx->uring_lock)
6b06314c 9592{
a7f0ed5a
PB
9593 WARN_ON_ONCE(!ctx->rsrc_backup_node);
9594 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
6b06314c 9595
ab409402
PB
9596 io_rsrc_refs_drop(ctx);
9597
a7f0ed5a
PB
9598 if (data_to_kill) {
9599 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
82fbcfa9 9600
a7f0ed5a 9601 rsrc_node->rsrc_data = data_to_kill;
4956b9ea 9602 spin_lock_irq(&ctx->rsrc_ref_lock);
a7f0ed5a 9603 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
4956b9ea 9604 spin_unlock_irq(&ctx->rsrc_ref_lock);
82fbcfa9 9605
3e942498 9606 atomic_inc(&data_to_kill->refs);
a7f0ed5a
PB
9607 percpu_ref_kill(&rsrc_node->refs);
9608 ctx->rsrc_node = NULL;
9609 }
6b06314c 9610
a7f0ed5a
PB
9611 if (!ctx->rsrc_node) {
9612 ctx->rsrc_node = ctx->rsrc_backup_node;
9613 ctx->rsrc_backup_node = NULL;
9614 }
8bad28d8
HX
9615}
9616
a7f0ed5a 9617static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
8dd03afe
PB
9618{
9619 if (ctx->rsrc_backup_node)
9620 return 0;
f6133fbd 9621 ctx->rsrc_backup_node = io_rsrc_node_alloc();
8dd03afe 9622 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
8bad28d8
HX
9623}
9624
c072481d
PB
9625static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
9626 struct io_ring_ctx *ctx)
8bad28d8
HX
9627{
9628 int ret;
05589553 9629
215c3902 9630 /* As we may drop ->uring_lock, other task may have started quiesce */
8bad28d8
HX
9631 if (data->quiesce)
9632 return -ENXIO;
05589553 9633
8bad28d8 9634 data->quiesce = true;
1ffc5422 9635 do {
a7f0ed5a 9636 ret = io_rsrc_node_switch_start(ctx);
8dd03afe 9637 if (ret)
f2303b1f 9638 break;
a7f0ed5a 9639 io_rsrc_node_switch(ctx, data);
f2303b1f 9640
3e942498
PB
9641 /* kill initial ref, already quiesced if zero */
9642 if (atomic_dec_and_test(&data->refs))
9643 break;
c018db4a 9644 mutex_unlock(&ctx->uring_lock);
8bad28d8 9645 flush_delayed_work(&ctx->rsrc_put_work);
1ffc5422 9646 ret = wait_for_completion_interruptible(&data->done);
c018db4a
JA
9647 if (!ret) {
9648 mutex_lock(&ctx->uring_lock);
80912cef
DY
9649 if (atomic_read(&data->refs) > 0) {
9650 /*
9651 * it has been revived by another thread while
9652 * we were unlocked
9653 */
9654 mutex_unlock(&ctx->uring_lock);
9655 } else {
9656 break;
9657 }
c018db4a 9658 }
8bad28d8 9659
3e942498
PB
9660 atomic_inc(&data->refs);
9661 /* wait for all works potentially completing data->done */
9662 flush_delayed_work(&ctx->rsrc_put_work);
cb5e1b81 9663 reinit_completion(&data->done);
8dd03afe 9664
1ffc5422 9665 ret = io_run_task_work_sig();
8bad28d8 9666 mutex_lock(&ctx->uring_lock);
f2303b1f 9667 } while (ret >= 0);
8bad28d8 9668 data->quiesce = false;
05f3fb3c 9669
8bad28d8 9670 return ret;
d7954b2b
BM
9671}
9672
2d091d62
PB
9673static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
9674{
9675 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
9676 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
9677
9678 return &data->tags[table_idx][off];
9679}
9680
44b31f2f 9681static void io_rsrc_data_free(struct io_rsrc_data *data)
1ad555c6 9682{
2d091d62
PB
9683 size_t size = data->nr * sizeof(data->tags[0][0]);
9684
9685 if (data->tags)
9686 io_free_page_table((void **)data->tags, size);
44b31f2f
PB
9687 kfree(data);
9688}
9689
c072481d
PB
9690static __cold int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
9691 u64 __user *utags, unsigned nr,
9692 struct io_rsrc_data **pdata)
1ad555c6 9693{
b895c9a6 9694 struct io_rsrc_data *data;
2d091d62 9695 int ret = -ENOMEM;
d878c816 9696 unsigned i;
1ad555c6
BM
9697
9698 data = kzalloc(sizeof(*data), GFP_KERNEL);
9699 if (!data)
d878c816 9700 return -ENOMEM;
2d091d62 9701 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
b60c8dce 9702 if (!data->tags) {
1ad555c6 9703 kfree(data);
d878c816
PB
9704 return -ENOMEM;
9705 }
2d091d62
PB
9706
9707 data->nr = nr;
9708 data->ctx = ctx;
9709 data->do_put = do_put;
d878c816 9710 if (utags) {
2d091d62 9711 ret = -EFAULT;
d878c816 9712 for (i = 0; i < nr; i++) {
fdd1dc31
CIK
9713 u64 *tag_slot = io_get_tag_slot(data, i);
9714
9715 if (copy_from_user(tag_slot, &utags[i],
9716 sizeof(*tag_slot)))
2d091d62 9717 goto fail;
d878c816 9718 }
1ad555c6 9719 }
b60c8dce 9720
3e942498 9721 atomic_set(&data->refs, 1);
1ad555c6 9722 init_completion(&data->done);
d878c816
PB
9723 *pdata = data;
9724 return 0;
2d091d62
PB
9725fail:
9726 io_rsrc_data_free(data);
9727 return ret;
1ad555c6
BM
9728}
9729
9123c8ff
PB
9730static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
9731{
0bea96f5
PB
9732 table->files = kvcalloc(nr_files, sizeof(table->files[0]),
9733 GFP_KERNEL_ACCOUNT);
d78bd8ad
JA
9734 if (unlikely(!table->files))
9735 return false;
9736
9737 table->bitmap = bitmap_zalloc(nr_files, GFP_KERNEL_ACCOUNT);
9738 if (unlikely(!table->bitmap)) {
9739 kvfree(table->files);
9740 return false;
9741 }
9742
9743 return true;
9123c8ff
PB
9744}
9745
042b0d85 9746static void io_free_file_tables(struct io_file_table *table)
9123c8ff 9747{
042b0d85 9748 kvfree(table->files);
d78bd8ad 9749 bitmap_free(table->bitmap);
9123c8ff 9750 table->files = NULL;
d78bd8ad
JA
9751 table->bitmap = NULL;
9752}
9753
9754static inline void io_file_bitmap_set(struct io_file_table *table, int bit)
9755{
9756 WARN_ON_ONCE(test_bit(bit, table->bitmap));
9757 __set_bit(bit, table->bitmap);
4278a0de 9758 table->alloc_hint = bit + 1;
d78bd8ad
JA
9759}
9760
9761static inline void io_file_bitmap_clear(struct io_file_table *table, int bit)
9762{
9763 __clear_bit(bit, table->bitmap);
b70b8e33 9764 table->alloc_hint = bit;
9123c8ff
PB
9765}
9766
fff4db76 9767static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
1ad555c6 9768{
69cc1b6f 9769#if !defined(IO_URING_SCM_ALL)
1f59bc0f
PB
9770 int i;
9771
9772 for (i = 0; i < ctx->nr_user_files; i++) {
9773 struct file *file = io_file_from_index(ctx, i);
9774
5e45690a
JA
9775 if (!file)
9776 continue;
9777 if (io_fixed_file_slot(&ctx->file_table, i)->file_ptr & FFS_SCM)
1f59bc0f 9778 continue;
d78bd8ad 9779 io_file_bitmap_clear(&ctx->file_table, i);
1f59bc0f
PB
9780 fput(file);
9781 }
5e45690a 9782#endif
1f59bc0f 9783
fff4db76
PB
9784#if defined(CONFIG_UNIX)
9785 if (ctx->ring_sock) {
9786 struct sock *sock = ctx->ring_sock->sk;
9787 struct sk_buff *skb;
9788
9789 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
9790 kfree_skb(skb);
9791 }
fff4db76 9792#endif
042b0d85 9793 io_free_file_tables(&ctx->file_table);
44b31f2f 9794 io_rsrc_data_free(ctx->file_data);
fff4db76
PB
9795 ctx->file_data = NULL;
9796 ctx->nr_user_files = 0;
1ad555c6
BM
9797}
9798
d7954b2b
BM
9799static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
9800{
b0380bf6 9801 unsigned nr = ctx->nr_user_files;
d7954b2b
BM
9802 int ret;
9803
08480400 9804 if (!ctx->file_data)
d7954b2b 9805 return -ENXIO;
b0380bf6
PB
9806
9807 /*
9808 * Quiesce may unlock ->uring_lock, and while it's not held
9809 * prevent new requests using the table.
9810 */
9811 ctx->nr_user_files = 0;
08480400 9812 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
b0380bf6 9813 ctx->nr_user_files = nr;
08480400
PB
9814 if (!ret)
9815 __io_sqe_files_unregister(ctx);
9816 return ret;
6b06314c
JA
9817}
9818
37d1e2e3 9819static void io_sq_thread_unpark(struct io_sq_data *sqd)
09a6f4ef 9820 __releases(&sqd->lock)
37d1e2e3 9821{
521d6a73
PB
9822 WARN_ON_ONCE(sqd->thread == current);
9823
9e138a48
PB
9824 /*
9825 * Do the dance but not conditional clear_bit() because it'd race with
9826 * other threads incrementing park_pending and setting the bit.
9827 */
37d1e2e3 9828 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
9e138a48
PB
9829 if (atomic_dec_return(&sqd->park_pending))
9830 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
09a6f4ef 9831 mutex_unlock(&sqd->lock);
37d1e2e3
JA
9832}
9833
86e0d676 9834static void io_sq_thread_park(struct io_sq_data *sqd)
09a6f4ef 9835 __acquires(&sqd->lock)
37d1e2e3 9836{
521d6a73
PB
9837 WARN_ON_ONCE(sqd->thread == current);
9838
9e138a48 9839 atomic_inc(&sqd->park_pending);
86e0d676 9840 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
09a6f4ef 9841 mutex_lock(&sqd->lock);
05962f95 9842 if (sqd->thread)
86e0d676 9843 wake_up_process(sqd->thread);
37d1e2e3
JA
9844}
9845
9846static void io_sq_thread_stop(struct io_sq_data *sqd)
9847{
521d6a73 9848 WARN_ON_ONCE(sqd->thread == current);
88885f66 9849 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
521d6a73 9850
05962f95 9851 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
88885f66 9852 mutex_lock(&sqd->lock);
e8f98f24
JA
9853 if (sqd->thread)
9854 wake_up_process(sqd->thread);
09a6f4ef 9855 mutex_unlock(&sqd->lock);
05962f95 9856 wait_for_completion(&sqd->exited);
37d1e2e3
JA
9857}
9858
534ca6d6 9859static void io_put_sq_data(struct io_sq_data *sqd)
6c271ce2 9860{
534ca6d6 9861 if (refcount_dec_and_test(&sqd->refs)) {
9e138a48
PB
9862 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
9863
37d1e2e3
JA
9864 io_sq_thread_stop(sqd);
9865 kfree(sqd);
9866 }
9867}
9868
9869static void io_sq_thread_finish(struct io_ring_ctx *ctx)
9870{
9871 struct io_sq_data *sqd = ctx->sq_data;
9872
9873 if (sqd) {
05962f95 9874 io_sq_thread_park(sqd);
521d6a73 9875 list_del_init(&ctx->sqd_list);
37d1e2e3 9876 io_sqd_update_thread_idle(sqd);
05962f95 9877 io_sq_thread_unpark(sqd);
37d1e2e3
JA
9878
9879 io_put_sq_data(sqd);
9880 ctx->sq_data = NULL;
534ca6d6
JA
9881 }
9882}
9883
aa06165d
JA
9884static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
9885{
9886 struct io_ring_ctx *ctx_attach;
9887 struct io_sq_data *sqd;
9888 struct fd f;
9889
9890 f = fdget(p->wq_fd);
9891 if (!f.file)
9892 return ERR_PTR(-ENXIO);
9893 if (f.file->f_op != &io_uring_fops) {
9894 fdput(f);
9895 return ERR_PTR(-EINVAL);
9896 }
9897
9898 ctx_attach = f.file->private_data;
9899 sqd = ctx_attach->sq_data;
9900 if (!sqd) {
9901 fdput(f);
9902 return ERR_PTR(-EINVAL);
9903 }
5c2469e0
JA
9904 if (sqd->task_tgid != current->tgid) {
9905 fdput(f);
9906 return ERR_PTR(-EPERM);
9907 }
aa06165d
JA
9908
9909 refcount_inc(&sqd->refs);
9910 fdput(f);
9911 return sqd;
9912}
9913
26984fbf
PB
9914static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
9915 bool *attached)
534ca6d6
JA
9916{
9917 struct io_sq_data *sqd;
9918
26984fbf 9919 *attached = false;
5c2469e0
JA
9920 if (p->flags & IORING_SETUP_ATTACH_WQ) {
9921 sqd = io_attach_sq_data(p);
26984fbf
PB
9922 if (!IS_ERR(sqd)) {
9923 *attached = true;
5c2469e0 9924 return sqd;
26984fbf 9925 }
5c2469e0
JA
9926 /* fall through for EPERM case, setup new sqd/task */
9927 if (PTR_ERR(sqd) != -EPERM)
9928 return sqd;
9929 }
aa06165d 9930
534ca6d6
JA
9931 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
9932 if (!sqd)
9933 return ERR_PTR(-ENOMEM);
9934
9e138a48 9935 atomic_set(&sqd->park_pending, 0);
534ca6d6 9936 refcount_set(&sqd->refs, 1);
69fb2131 9937 INIT_LIST_HEAD(&sqd->ctx_list);
09a6f4ef 9938 mutex_init(&sqd->lock);
534ca6d6 9939 init_waitqueue_head(&sqd->wait);
37d1e2e3 9940 init_completion(&sqd->exited);
534ca6d6
JA
9941 return sqd;
9942}
9943
6b06314c
JA
9944/*
9945 * Ensure the UNIX gc is aware of our file set, so we are certain that
9946 * the io_uring can be safely unregistered on process exit, even if we have
1f59bc0f
PB
9947 * loops in the file referencing. We account only files that can hold other
9948 * files because otherwise they can't form a loop and so are not interesting
9949 * for GC.
6b06314c 9950 */
8b3171bd 9951static int io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
6b06314c 9952{
73b25d3b 9953#if defined(CONFIG_UNIX)
6b06314c 9954 struct sock *sk = ctx->ring_sock->sk;
73b25d3b 9955 struct sk_buff_head *head = &sk->sk_receive_queue;
6b06314c
JA
9956 struct scm_fp_list *fpl;
9957 struct sk_buff *skb;
6b06314c 9958
73b25d3b
PB
9959 if (likely(!io_file_need_scm(file)))
9960 return 0;
6b06314c 9961
73b25d3b
PB
9962 /*
9963 * See if we can merge this file into an existing skb SCM_RIGHTS
9964 * file set. If there's no room, fall back to allocating a new skb
9965 * and filling it in.
9966 */
9967 spin_lock_irq(&head->lock);
9968 skb = skb_peek(head);
9969 if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
9970 __skb_unlink(skb, head);
9971 else
9972 skb = NULL;
9973 spin_unlock_irq(&head->lock);
6b06314c 9974
6b06314c 9975 if (!skb) {
73b25d3b
PB
9976 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
9977 if (!fpl)
9978 return -ENOMEM;
6b06314c 9979
73b25d3b
PB
9980 skb = alloc_skb(0, GFP_KERNEL);
9981 if (!skb) {
9982 kfree(fpl);
9983 return -ENOMEM;
9984 }
6b06314c 9985
73b25d3b
PB
9986 fpl->user = get_uid(current_user());
9987 fpl->max = SCM_MAX_FD;
9988 fpl->count = 0;
65e19f54 9989
73b25d3b
PB
9990 UNIXCB(skb).fp = fpl;
9991 skb->sk = sk;
9992 skb->destructor = unix_destruct_scm;
9993 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
6b06314c
JA
9994 }
9995
73b25d3b
PB
9996 fpl = UNIXCB(skb).fp;
9997 fpl->fp[fpl->count++] = get_file(file);
9998 unix_inflight(fpl->user, file);
9999 skb_queue_head(head, skb);
dca58c6a 10000 fput(file);
73b25d3b 10001#endif
6b06314c
JA
10002 return 0;
10003}
6b06314c 10004
47e90392 10005static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
05f3fb3c 10006{
50238531 10007 struct file *file = prsrc->file;
05f3fb3c
JA
10008#if defined(CONFIG_UNIX)
10009 struct sock *sock = ctx->ring_sock->sk;
10010 struct sk_buff_head list, *head = &sock->sk_receive_queue;
10011 struct sk_buff *skb;
10012 int i;
10013
1f59bc0f
PB
10014 if (!io_file_need_scm(file)) {
10015 fput(file);
10016 return;
10017 }
10018
05f3fb3c
JA
10019 __skb_queue_head_init(&list);
10020
10021 /*
10022 * Find the skb that holds this file in its SCM_RIGHTS. When found,
10023 * remove this entry and rearrange the file array.
10024 */
10025 skb = skb_dequeue(head);
10026 while (skb) {
10027 struct scm_fp_list *fp;
10028
10029 fp = UNIXCB(skb).fp;
10030 for (i = 0; i < fp->count; i++) {
10031 int left;
10032
10033 if (fp->fp[i] != file)
10034 continue;
10035
10036 unix_notinflight(fp->user, fp->fp[i]);
10037 left = fp->count - 1 - i;
10038 if (left) {
10039 memmove(&fp->fp[i], &fp->fp[i + 1],
10040 left * sizeof(struct file *));
10041 }
10042 fp->count--;
10043 if (!fp->count) {
10044 kfree_skb(skb);
10045 skb = NULL;
10046 } else {
10047 __skb_queue_tail(&list, skb);
10048 }
10049 fput(file);
10050 file = NULL;
10051 break;
10052 }
10053
10054 if (!file)
10055 break;
10056
10057 __skb_queue_tail(&list, skb);
10058
10059 skb = skb_dequeue(head);
10060 }
10061
10062 if (skb_peek(&list)) {
10063 spin_lock_irq(&head->lock);
10064 while ((skb = __skb_dequeue(&list)) != NULL)
10065 __skb_queue_tail(head, skb);
10066 spin_unlock_irq(&head->lock);
10067 }
10068#else
10069 fput(file);
10070#endif
10071}
10072
b895c9a6 10073static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
65e19f54 10074{
b895c9a6 10075 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
269bbe5f
BM
10076 struct io_ring_ctx *ctx = rsrc_data->ctx;
10077 struct io_rsrc_put *prsrc, *tmp;
05589553 10078
269bbe5f
BM
10079 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
10080 list_del(&prsrc->list);
b60c8dce
PB
10081
10082 if (prsrc->tag) {
f8929630
PB
10083 if (ctx->flags & IORING_SETUP_IOPOLL)
10084 mutex_lock(&ctx->uring_lock);
b60c8dce 10085
79ebeaee 10086 spin_lock(&ctx->completion_lock);
913a571a 10087 io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
b60c8dce 10088 io_commit_cqring(ctx);
79ebeaee 10089 spin_unlock(&ctx->completion_lock);
b60c8dce 10090 io_cqring_ev_posted(ctx);
f8929630
PB
10091
10092 if (ctx->flags & IORING_SETUP_IOPOLL)
10093 mutex_unlock(&ctx->uring_lock);
b60c8dce
PB
10094 }
10095
40ae0ff7 10096 rsrc_data->do_put(ctx, prsrc);
269bbe5f 10097 kfree(prsrc);
65e19f54 10098 }
05589553 10099
28a9fe25 10100 io_rsrc_node_destroy(ref_node);
3e942498
PB
10101 if (atomic_dec_and_test(&rsrc_data->refs))
10102 complete(&rsrc_data->done);
2faf852d 10103}
65e19f54 10104
269bbe5f 10105static void io_rsrc_put_work(struct work_struct *work)
4a38aed2
JA
10106{
10107 struct io_ring_ctx *ctx;
10108 struct llist_node *node;
10109
269bbe5f
BM
10110 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
10111 node = llist_del_all(&ctx->rsrc_put_llist);
4a38aed2
JA
10112
10113 while (node) {
b895c9a6 10114 struct io_rsrc_node *ref_node;
4a38aed2
JA
10115 struct llist_node *next = node->next;
10116
b895c9a6 10117 ref_node = llist_entry(node, struct io_rsrc_node, llist);
269bbe5f 10118 __io_rsrc_put_work(ref_node);
4a38aed2
JA
10119 node = next;
10120 }
10121}
10122
6b06314c 10123static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
792e3582 10124 unsigned nr_args, u64 __user *tags)
6b06314c
JA
10125{
10126 __s32 __user *fds = (__s32 __user *) arg;
05f3fb3c 10127 struct file *file;
f3baed39 10128 int fd, ret;
846a4ef2 10129 unsigned i;
6b06314c 10130
05f3fb3c 10131 if (ctx->file_data)
6b06314c
JA
10132 return -EBUSY;
10133 if (!nr_args)
10134 return -EINVAL;
10135 if (nr_args > IORING_MAX_FIXED_FILES)
10136 return -EMFILE;
3a1b8a4e
PB
10137 if (nr_args > rlimit(RLIMIT_NOFILE))
10138 return -EMFILE;
a7f0ed5a 10139 ret = io_rsrc_node_switch_start(ctx);
f3baed39
PB
10140 if (ret)
10141 return ret;
d878c816
PB
10142 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
10143 &ctx->file_data);
10144 if (ret)
10145 return ret;
6b06314c 10146
a03a2a20
PB
10147 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
10148 io_rsrc_data_free(ctx->file_data);
10149 ctx->file_data = NULL;
10150 return -ENOMEM;
10151 }
65e19f54 10152
08a45173 10153 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
a03a2a20
PB
10154 struct io_fixed_file *file_slot;
10155
a8da73a3 10156 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
600cf3f8 10157 ret = -EFAULT;
a03a2a20 10158 goto fail;
600cf3f8 10159 }
08a45173 10160 /* allow sparse sets */
a8da73a3 10161 if (!fds || fd == -1) {
792e3582 10162 ret = -EINVAL;
2d091d62 10163 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
a03a2a20 10164 goto fail;
08a45173 10165 continue;
792e3582 10166 }
6b06314c 10167
05f3fb3c 10168 file = fget(fd);
6b06314c 10169 ret = -EBADF;
792e3582 10170 if (unlikely(!file))
a03a2a20 10171 goto fail;
05f3fb3c 10172
6b06314c
JA
10173 /*
10174 * Don't allow io_uring instances to be registered. If UNIX
10175 * isn't enabled, then this causes a reference cycle and this
10176 * instance can never get freed. If UNIX is enabled we'll
10177 * handle it just fine, but there's still no point in allowing
10178 * a ring fd as it doesn't support regular read/write anyway.
10179 */
05f3fb3c
JA
10180 if (file->f_op == &io_uring_fops) {
10181 fput(file);
a03a2a20 10182 goto fail;
6b06314c 10183 }
8b3171bd 10184 ret = io_scm_file_account(ctx, file);
a03a2a20 10185 if (ret) {
600cf3f8 10186 fput(file);
a03a2a20 10187 goto fail;
c3a31e60 10188 }
e390510a
PB
10189 file_slot = io_fixed_file_slot(&ctx->file_table, i);
10190 io_fixed_file_set(file_slot, file);
d78bd8ad 10191 io_file_bitmap_set(&ctx->file_table, i);
c3a31e60
JA
10192 }
10193
a7f0ed5a 10194 io_rsrc_node_switch(ctx, NULL);
c3a31e60 10195 return 0;
a03a2a20
PB
10196fail:
10197 __io_sqe_files_unregister(ctx);
6b06314c 10198 return ret;
c3a31e60
JA
10199}
10200
9c7b0ba8
PB
10201static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
10202 struct io_rsrc_node *node, void *rsrc)
10203{
8f0a2480 10204 u64 *tag_slot = io_get_tag_slot(data, idx);
9c7b0ba8
PB
10205 struct io_rsrc_put *prsrc;
10206
10207 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
10208 if (!prsrc)
10209 return -ENOMEM;
10210
8f0a2480
PB
10211 prsrc->tag = *tag_slot;
10212 *tag_slot = 0;
9c7b0ba8
PB
10213 prsrc->rsrc = rsrc;
10214 list_add(&prsrc->list, &node->rsrc_list);
10215 return 0;
10216}
10217
b9445598
PB
10218static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
10219 unsigned int issue_flags, u32 slot_index)
61c1b44a 10220 __must_hold(&req->ctx->uring_lock)
b9445598
PB
10221{
10222 struct io_ring_ctx *ctx = req->ctx;
9c7b0ba8 10223 bool needs_switch = false;
b9445598 10224 struct io_fixed_file *file_slot;
61c1b44a 10225 int ret;
b9445598 10226
b9445598 10227 if (file->f_op == &io_uring_fops)
61c1b44a 10228 return -EBADF;
b9445598 10229 if (!ctx->file_data)
61c1b44a 10230 return -ENXIO;
b9445598 10231 if (slot_index >= ctx->nr_user_files)
61c1b44a 10232 return -EINVAL;
b9445598
PB
10233
10234 slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
10235 file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
9c7b0ba8
PB
10236
10237 if (file_slot->file_ptr) {
10238 struct file *old_file;
10239
10240 ret = io_rsrc_node_switch_start(ctx);
10241 if (ret)
10242 goto err;
10243
10244 old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
10245 ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
10246 ctx->rsrc_node, old_file);
10247 if (ret)
10248 goto err;
10249 file_slot->file_ptr = 0;
d78bd8ad 10250 io_file_bitmap_clear(&ctx->file_table, slot_index);
9c7b0ba8
PB
10251 needs_switch = true;
10252 }
b9445598 10253
8b3171bd 10254 ret = io_scm_file_account(ctx, file);
e390510a
PB
10255 if (!ret) {
10256 *io_get_tag_slot(ctx->file_data, slot_index) = 0;
10257 io_fixed_file_set(file_slot, file);
d78bd8ad 10258 io_file_bitmap_set(&ctx->file_table, slot_index);
b9445598 10259 }
b9445598 10260err:
9c7b0ba8
PB
10261 if (needs_switch)
10262 io_rsrc_node_switch(ctx, ctx->file_data);
b9445598
PB
10263 if (ret)
10264 fput(file);
10265 return ret;
10266}
10267
a7c41b46
XW
10268static int __io_close_fixed(struct io_kiocb *req, unsigned int issue_flags,
10269 unsigned int offset)
7df778be 10270{
7df778be
PB
10271 struct io_ring_ctx *ctx = req->ctx;
10272 struct io_fixed_file *file_slot;
10273 struct file *file;
4cdd158b 10274 int ret;
7df778be 10275
f8929630 10276 io_ring_submit_lock(ctx, issue_flags);
7df778be
PB
10277 ret = -ENXIO;
10278 if (unlikely(!ctx->file_data))
10279 goto out;
10280 ret = -EINVAL;
10281 if (offset >= ctx->nr_user_files)
10282 goto out;
10283 ret = io_rsrc_node_switch_start(ctx);
10284 if (ret)
10285 goto out;
10286
4cdd158b
PB
10287 offset = array_index_nospec(offset, ctx->nr_user_files);
10288 file_slot = io_fixed_file_slot(&ctx->file_table, offset);
7df778be
PB
10289 ret = -EBADF;
10290 if (!file_slot->file_ptr)
10291 goto out;
10292
10293 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
10294 ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
10295 if (ret)
10296 goto out;
10297
10298 file_slot->file_ptr = 0;
d78bd8ad 10299 io_file_bitmap_clear(&ctx->file_table, offset);
7df778be
PB
10300 io_rsrc_node_switch(ctx, ctx->file_data);
10301 ret = 0;
10302out:
f8929630 10303 io_ring_submit_unlock(ctx, issue_flags);
7df778be
PB
10304 return ret;
10305}
10306
a7c41b46
XW
10307static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
10308{
10309 return __io_close_fixed(req, issue_flags, req->close.file_slot - 1);
10310}
10311
05f3fb3c 10312static int __io_sqe_files_update(struct io_ring_ctx *ctx,
c3bdad02 10313 struct io_uring_rsrc_update2 *up,
05f3fb3c
JA
10314 unsigned nr_args)
10315{
c3bdad02 10316 u64 __user *tags = u64_to_user_ptr(up->tags);
98f0b3b4 10317 __s32 __user *fds = u64_to_user_ptr(up->data);
b895c9a6 10318 struct io_rsrc_data *data = ctx->file_data;
a04b0ac0
PB
10319 struct io_fixed_file *file_slot;
10320 struct file *file;
98f0b3b4
PB
10321 int fd, i, err = 0;
10322 unsigned int done;
05589553 10323 bool needs_switch = false;
c3a31e60 10324
98f0b3b4
PB
10325 if (!ctx->file_data)
10326 return -ENXIO;
10327 if (up->offset + nr_args > ctx->nr_user_files)
c3a31e60
JA
10328 return -EINVAL;
10329
67973b93 10330 for (done = 0; done < nr_args; done++) {
c3bdad02
PB
10331 u64 tag = 0;
10332
10333 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
10334 copy_from_user(&fd, &fds[done], sizeof(fd))) {
c3a31e60
JA
10335 err = -EFAULT;
10336 break;
10337 }
c3bdad02
PB
10338 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
10339 err = -EINVAL;
10340 break;
10341 }
4e0377a1 10342 if (fd == IORING_REGISTER_FILES_SKIP)
10343 continue;
10344
67973b93 10345 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
aeca241b 10346 file_slot = io_fixed_file_slot(&ctx->file_table, i);
ea64ec02 10347
a04b0ac0
PB
10348 if (file_slot->file_ptr) {
10349 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
4cdd158b 10350 err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
a5318d3c
HD
10351 if (err)
10352 break;
a04b0ac0 10353 file_slot->file_ptr = 0;
d78bd8ad 10354 io_file_bitmap_clear(&ctx->file_table, i);
05589553 10355 needs_switch = true;
c3a31e60
JA
10356 }
10357 if (fd != -1) {
c3a31e60
JA
10358 file = fget(fd);
10359 if (!file) {
10360 err = -EBADF;
10361 break;
10362 }
10363 /*
10364 * Don't allow io_uring instances to be registered. If
10365 * UNIX isn't enabled, then this causes a reference
10366 * cycle and this instance can never get freed. If UNIX
10367 * is enabled we'll handle it just fine, but there's
10368 * still no point in allowing a ring fd as it doesn't
10369 * support regular read/write anyway.
10370 */
10371 if (file->f_op == &io_uring_fops) {
10372 fput(file);
10373 err = -EBADF;
10374 break;
10375 }
8b3171bd 10376 err = io_scm_file_account(ctx, file);
f3bd9dae
YY
10377 if (err) {
10378 fput(file);
c3a31e60 10379 break;
f3bd9dae 10380 }
e390510a
PB
10381 *io_get_tag_slot(data, i) = tag;
10382 io_fixed_file_set(file_slot, file);
d78bd8ad 10383 io_file_bitmap_set(&ctx->file_table, i);
c3a31e60 10384 }
05f3fb3c
JA
10385 }
10386
a7f0ed5a
PB
10387 if (needs_switch)
10388 io_rsrc_node_switch(ctx, data);
c3a31e60
JA
10389 return done ? done : err;
10390}
05589553 10391
685fe7fe
JA
10392static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
10393 struct task_struct *task)
24369c2e 10394{
e941894e 10395 struct io_wq_hash *hash;
24369c2e 10396 struct io_wq_data data;
24369c2e 10397 unsigned int concurrency;
24369c2e 10398
362a9e65 10399 mutex_lock(&ctx->uring_lock);
e941894e
JA
10400 hash = ctx->hash_map;
10401 if (!hash) {
10402 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
362a9e65
YY
10403 if (!hash) {
10404 mutex_unlock(&ctx->uring_lock);
e941894e 10405 return ERR_PTR(-ENOMEM);
362a9e65 10406 }
e941894e
JA
10407 refcount_set(&hash->refs, 1);
10408 init_waitqueue_head(&hash->wait);
10409 ctx->hash_map = hash;
24369c2e 10410 }
362a9e65 10411 mutex_unlock(&ctx->uring_lock);
24369c2e 10412
e941894e 10413 data.hash = hash;
685fe7fe 10414 data.task = task;
ebc11b6c 10415 data.free_work = io_wq_free_work;
f5fa38c5 10416 data.do_work = io_wq_submit_work;
24369c2e 10417
d25e3a3d
JA
10418 /* Do QD, or 4 * CPUS, whatever is smallest */
10419 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
24369c2e 10420
5aa75ed5 10421 return io_wq_create(concurrency, &data);
24369c2e
PB
10422}
10423
c072481d
PB
10424static __cold int io_uring_alloc_task_context(struct task_struct *task,
10425 struct io_ring_ctx *ctx)
0f212204
JA
10426{
10427 struct io_uring_task *tctx;
d8a6df10 10428 int ret;
0f212204 10429
09899b19 10430 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
0f212204
JA
10431 if (unlikely(!tctx))
10432 return -ENOMEM;
10433
e7a6c00d
JA
10434 tctx->registered_rings = kcalloc(IO_RINGFD_REG_MAX,
10435 sizeof(struct file *), GFP_KERNEL);
10436 if (unlikely(!tctx->registered_rings)) {
10437 kfree(tctx);
10438 return -ENOMEM;
10439 }
10440
d8a6df10
JA
10441 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
10442 if (unlikely(ret)) {
e7a6c00d 10443 kfree(tctx->registered_rings);
d8a6df10
JA
10444 kfree(tctx);
10445 return ret;
10446 }
10447
685fe7fe 10448 tctx->io_wq = io_init_wq_offload(ctx, task);
5aa75ed5
JA
10449 if (IS_ERR(tctx->io_wq)) {
10450 ret = PTR_ERR(tctx->io_wq);
10451 percpu_counter_destroy(&tctx->inflight);
e7a6c00d 10452 kfree(tctx->registered_rings);
5aa75ed5
JA
10453 kfree(tctx);
10454 return ret;
10455 }
10456
0f212204
JA
10457 xa_init(&tctx->xa);
10458 init_waitqueue_head(&tctx->wait);
fdaf083c 10459 atomic_set(&tctx->in_idle, 0);
9cae36a0 10460 atomic_set(&tctx->inflight_tracked, 0);
0f212204 10461 task->io_uring = tctx;
7cbf1722
JA
10462 spin_lock_init(&tctx->task_lock);
10463 INIT_WQ_LIST(&tctx->task_list);
3fe07bcd 10464 INIT_WQ_LIST(&tctx->prio_task_list);
7cbf1722 10465 init_task_work(&tctx->task_work, tctx_task_work);
0f212204
JA
10466 return 0;
10467}
10468
10469void __io_uring_free(struct task_struct *tsk)
10470{
10471 struct io_uring_task *tctx = tsk->io_uring;
10472
10473 WARN_ON_ONCE(!xa_empty(&tctx->xa));
ef8eaa4e 10474 WARN_ON_ONCE(tctx->io_wq);
09899b19 10475 WARN_ON_ONCE(tctx->cached_refs);
ef8eaa4e 10476
e7a6c00d 10477 kfree(tctx->registered_rings);
d8a6df10 10478 percpu_counter_destroy(&tctx->inflight);
0f212204
JA
10479 kfree(tctx);
10480 tsk->io_uring = NULL;
10481}
10482
c072481d
PB
10483static __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
10484 struct io_uring_params *p)
2b188cc1
JA
10485{
10486 int ret;
10487
d25e3a3d
JA
10488 /* Retain compatibility with failing for an invalid attach attempt */
10489 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
10490 IORING_SETUP_ATTACH_WQ) {
10491 struct fd f;
10492
10493 f = fdget(p->wq_fd);
10494 if (!f.file)
10495 return -ENXIO;
0cc936f7
JA
10496 if (f.file->f_op != &io_uring_fops) {
10497 fdput(f);
f2a48dd0 10498 return -EINVAL;
0cc936f7
JA
10499 }
10500 fdput(f);
d25e3a3d 10501 }
6c271ce2 10502 if (ctx->flags & IORING_SETUP_SQPOLL) {
46fe18b1 10503 struct task_struct *tsk;
534ca6d6 10504 struct io_sq_data *sqd;
26984fbf 10505 bool attached;
534ca6d6 10506
cdc1404a
PM
10507 ret = security_uring_sqpoll();
10508 if (ret)
10509 return ret;
10510
26984fbf 10511 sqd = io_get_sq_data(p, &attached);
534ca6d6
JA
10512 if (IS_ERR(sqd)) {
10513 ret = PTR_ERR(sqd);
10514 goto err;
10515 }
69fb2131 10516
7c30f36a 10517 ctx->sq_creds = get_current_cred();
534ca6d6 10518 ctx->sq_data = sqd;
917257da
JA
10519 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
10520 if (!ctx->sq_thread_idle)
10521 ctx->sq_thread_idle = HZ;
10522
78d7f6ba 10523 io_sq_thread_park(sqd);
de75a3d3
PB
10524 list_add(&ctx->sqd_list, &sqd->ctx_list);
10525 io_sqd_update_thread_idle(sqd);
26984fbf 10526 /* don't attach to a dying SQPOLL thread, would be racy */
f2a48dd0 10527 ret = (attached && !sqd->thread) ? -ENXIO : 0;
78d7f6ba
PB
10528 io_sq_thread_unpark(sqd);
10529
de75a3d3
PB
10530 if (ret < 0)
10531 goto err;
10532 if (attached)
5aa75ed5 10533 return 0;
aa06165d 10534
6c271ce2 10535 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 10536 int cpu = p->sq_thread_cpu;
6c271ce2 10537
917257da 10538 ret = -EINVAL;
f2a48dd0 10539 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
e8f98f24 10540 goto err_sqpoll;
37d1e2e3 10541 sqd->sq_cpu = cpu;
6c271ce2 10542 } else {
37d1e2e3 10543 sqd->sq_cpu = -1;
6c271ce2 10544 }
37d1e2e3
JA
10545
10546 sqd->task_pid = current->pid;
5c2469e0 10547 sqd->task_tgid = current->tgid;
46fe18b1
JA
10548 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
10549 if (IS_ERR(tsk)) {
10550 ret = PTR_ERR(tsk);
e8f98f24 10551 goto err_sqpoll;
6c271ce2 10552 }
97a73a0f 10553
46fe18b1 10554 sqd->thread = tsk;
97a73a0f 10555 ret = io_uring_alloc_task_context(tsk, ctx);
46fe18b1 10556 wake_up_new_task(tsk);
0f212204
JA
10557 if (ret)
10558 goto err;
6c271ce2
JA
10559 } else if (p->flags & IORING_SETUP_SQ_AFF) {
10560 /* Can't have SQ_AFF without SQPOLL */
10561 ret = -EINVAL;
10562 goto err;
10563 }
10564
2b188cc1 10565 return 0;
f2a48dd0
PB
10566err_sqpoll:
10567 complete(&ctx->sq_data->exited);
2b188cc1 10568err:
37d1e2e3 10569 io_sq_thread_finish(ctx);
2b188cc1
JA
10570 return ret;
10571}
10572
a087e2b5
BM
10573static inline void __io_unaccount_mem(struct user_struct *user,
10574 unsigned long nr_pages)
2b188cc1
JA
10575{
10576 atomic_long_sub(nr_pages, &user->locked_vm);
10577}
10578
a087e2b5
BM
10579static inline int __io_account_mem(struct user_struct *user,
10580 unsigned long nr_pages)
2b188cc1
JA
10581{
10582 unsigned long page_limit, cur_pages, new_pages;
10583
10584 /* Don't allow more pages than we can safely lock */
10585 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
10586
10587 do {
10588 cur_pages = atomic_long_read(&user->locked_vm);
10589 new_pages = cur_pages + nr_pages;
10590 if (new_pages > page_limit)
10591 return -ENOMEM;
10592 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
10593 new_pages) != cur_pages);
10594
10595 return 0;
10596}
10597
26bfa89e 10598static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
a087e2b5 10599{
62e398be 10600 if (ctx->user)
a087e2b5 10601 __io_unaccount_mem(ctx->user, nr_pages);
30975825 10602
26bfa89e
JA
10603 if (ctx->mm_account)
10604 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
a087e2b5
BM
10605}
10606
26bfa89e 10607static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
a087e2b5 10608{
30975825
BM
10609 int ret;
10610
62e398be 10611 if (ctx->user) {
30975825
BM
10612 ret = __io_account_mem(ctx->user, nr_pages);
10613 if (ret)
10614 return ret;
10615 }
10616
26bfa89e
JA
10617 if (ctx->mm_account)
10618 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
a087e2b5
BM
10619
10620 return 0;
10621}
10622
2b188cc1
JA
10623static void io_mem_free(void *ptr)
10624{
52e04ef4
MR
10625 struct page *page;
10626
10627 if (!ptr)
10628 return;
2b188cc1 10629
52e04ef4 10630 page = virt_to_head_page(ptr);
2b188cc1
JA
10631 if (put_page_testzero(page))
10632 free_compound_page(page);
10633}
10634
10635static void *io_mem_alloc(size_t size)
10636{
0a3f1e0b 10637 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
2b188cc1 10638
0a3f1e0b 10639 return (void *) __get_free_pages(gfp, get_order(size));
2b188cc1
JA
10640}
10641
baf9cb64
SR
10642static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
10643 unsigned int cq_entries, size_t *sq_offset)
75b28aff
HV
10644{
10645 struct io_rings *rings;
10646 size_t off, sq_array_size;
10647
10648 off = struct_size(rings, cqes, cq_entries);
10649 if (off == SIZE_MAX)
10650 return SIZE_MAX;
baf9cb64
SR
10651 if (ctx->flags & IORING_SETUP_CQE32) {
10652 if (check_shl_overflow(off, 1, &off))
10653 return SIZE_MAX;
10654 }
75b28aff
HV
10655
10656#ifdef CONFIG_SMP
10657 off = ALIGN(off, SMP_CACHE_BYTES);
10658 if (off == 0)
10659 return SIZE_MAX;
10660#endif
10661
b36200f5
DV
10662 if (sq_offset)
10663 *sq_offset = off;
10664
75b28aff
HV
10665 sq_array_size = array_size(sizeof(u32), sq_entries);
10666 if (sq_array_size == SIZE_MAX)
10667 return SIZE_MAX;
10668
10669 if (check_add_overflow(off, sq_array_size, &off))
10670 return SIZE_MAX;
10671
75b28aff
HV
10672 return off;
10673}
10674
41edf1a5 10675static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
7f61a1e9 10676{
41edf1a5 10677 struct io_mapped_ubuf *imu = *slot;
7f61a1e9
PB
10678 unsigned int i;
10679
6224843d
PB
10680 if (imu != ctx->dummy_ubuf) {
10681 for (i = 0; i < imu->nr_bvecs; i++)
10682 unpin_user_page(imu->bvec[i].bv_page);
10683 if (imu->acct_pages)
10684 io_unaccount_mem(ctx, imu->acct_pages);
10685 kvfree(imu);
10686 }
41edf1a5 10687 *slot = NULL;
7f61a1e9
PB
10688}
10689
bd54b6fe 10690static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
edafccee 10691{
634d00df
PB
10692 io_buffer_unmap(ctx, &prsrc->buf);
10693 prsrc->buf = NULL;
bd54b6fe 10694}
edafccee 10695
bd54b6fe
BM
10696static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
10697{
10698 unsigned int i;
edafccee 10699
7f61a1e9
PB
10700 for (i = 0; i < ctx->nr_user_bufs; i++)
10701 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
edafccee 10702 kfree(ctx->user_bufs);
bb6659cc 10703 io_rsrc_data_free(ctx->buf_data);
edafccee 10704 ctx->user_bufs = NULL;
bd54b6fe 10705 ctx->buf_data = NULL;
edafccee 10706 ctx->nr_user_bufs = 0;
bd54b6fe
BM
10707}
10708
0a96bbe4 10709static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
edafccee 10710{
d11d31fc 10711 unsigned nr = ctx->nr_user_bufs;
bd54b6fe 10712 int ret;
edafccee 10713
bd54b6fe 10714 if (!ctx->buf_data)
edafccee
JA
10715 return -ENXIO;
10716
d11d31fc
PB
10717 /*
10718 * Quiesce may unlock ->uring_lock, and while it's not held
10719 * prevent new requests using the table.
10720 */
10721 ctx->nr_user_bufs = 0;
bd54b6fe 10722 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
d11d31fc 10723 ctx->nr_user_bufs = nr;
bd54b6fe
BM
10724 if (!ret)
10725 __io_sqe_buffers_unregister(ctx);
10726 return ret;
edafccee
JA
10727}
10728
10729static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
10730 void __user *arg, unsigned index)
10731{
10732 struct iovec __user *src;
10733
10734#ifdef CONFIG_COMPAT
10735 if (ctx->compat) {
10736 struct compat_iovec __user *ciovs;
10737 struct compat_iovec ciov;
10738
10739 ciovs = (struct compat_iovec __user *) arg;
10740 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
10741 return -EFAULT;
10742
d55e5f5b 10743 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
10744 dst->iov_len = ciov.iov_len;
10745 return 0;
10746 }
10747#endif
10748 src = (struct iovec __user *) arg;
10749 if (copy_from_user(dst, &src[index], sizeof(*dst)))
10750 return -EFAULT;
10751 return 0;
10752}
10753
de293938
JA
10754/*
10755 * Not super efficient, but this is just a registration time. And we do cache
10756 * the last compound head, so generally we'll only do a full search if we don't
10757 * match that one.
10758 *
10759 * We check if the given compound head page has already been accounted, to
10760 * avoid double accounting it. This allows us to account the full size of the
10761 * page, not just the constituent pages of a huge page.
10762 */
10763static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
10764 int nr_pages, struct page *hpage)
10765{
10766 int i, j;
10767
10768 /* check current page array */
10769 for (i = 0; i < nr_pages; i++) {
10770 if (!PageCompound(pages[i]))
10771 continue;
10772 if (compound_head(pages[i]) == hpage)
10773 return true;
10774 }
10775
10776 /* check previously registered pages */
10777 for (i = 0; i < ctx->nr_user_bufs; i++) {
41edf1a5 10778 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
de293938
JA
10779
10780 for (j = 0; j < imu->nr_bvecs; j++) {
10781 if (!PageCompound(imu->bvec[j].bv_page))
10782 continue;
10783 if (compound_head(imu->bvec[j].bv_page) == hpage)
10784 return true;
10785 }
10786 }
10787
10788 return false;
10789}
10790
10791static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
10792 int nr_pages, struct io_mapped_ubuf *imu,
10793 struct page **last_hpage)
10794{
10795 int i, ret;
10796
216e5835 10797 imu->acct_pages = 0;
de293938
JA
10798 for (i = 0; i < nr_pages; i++) {
10799 if (!PageCompound(pages[i])) {
10800 imu->acct_pages++;
10801 } else {
10802 struct page *hpage;
10803
10804 hpage = compound_head(pages[i]);
10805 if (hpage == *last_hpage)
10806 continue;
10807 *last_hpage = hpage;
10808 if (headpage_already_acct(ctx, pages, i, hpage))
10809 continue;
10810 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
10811 }
10812 }
10813
10814 if (!imu->acct_pages)
10815 return 0;
10816
26bfa89e 10817 ret = io_account_mem(ctx, imu->acct_pages);
de293938
JA
10818 if (ret)
10819 imu->acct_pages = 0;
10820 return ret;
10821}
10822
d8c2237d
JA
10823static struct page **io_pin_pages(unsigned long ubuf, unsigned long len,
10824 int *npages)
edafccee 10825{
d8c2237d 10826 unsigned long start, end, nr_pages;
edafccee
JA
10827 struct vm_area_struct **vmas = NULL;
10828 struct page **pages = NULL;
d8c2237d 10829 int i, pret, ret = -ENOMEM;
6224843d 10830
d8c2237d 10831 end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
0a96bbe4
BM
10832 start = ubuf >> PAGE_SHIFT;
10833 nr_pages = end - start;
10834
0a96bbe4
BM
10835 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
10836 if (!pages)
10837 goto done;
10838
10839 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
10840 GFP_KERNEL);
10841 if (!vmas)
10842 goto done;
edafccee 10843
0a96bbe4
BM
10844 ret = 0;
10845 mmap_read_lock(current->mm);
10846 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
10847 pages, vmas);
10848 if (pret == nr_pages) {
10849 /* don't support file backed memory */
10850 for (i = 0; i < nr_pages; i++) {
10851 struct vm_area_struct *vma = vmas[i];
10852
40dad765
PB
10853 if (vma_is_shmem(vma))
10854 continue;
0a96bbe4
BM
10855 if (vma->vm_file &&
10856 !is_file_hugepages(vma->vm_file)) {
10857 ret = -EOPNOTSUPP;
10858 break;
10859 }
10860 }
d8c2237d 10861 *npages = nr_pages;
0a96bbe4
BM
10862 } else {
10863 ret = pret < 0 ? pret : -EFAULT;
10864 }
10865 mmap_read_unlock(current->mm);
10866 if (ret) {
10867 /*
10868 * if we did partial map, or found file backed vmas,
10869 * release any pages we did get
10870 */
10871 if (pret > 0)
10872 unpin_user_pages(pages, pret);
0a96bbe4
BM
10873 goto done;
10874 }
d8c2237d
JA
10875 ret = 0;
10876done:
10877 kvfree(vmas);
10878 if (ret < 0) {
10879 kvfree(pages);
10880 pages = ERR_PTR(ret);
10881 }
10882 return pages;
10883}
0a96bbe4 10884
d8c2237d
JA
10885static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
10886 struct io_mapped_ubuf **pimu,
10887 struct page **last_hpage)
10888{
10889 struct io_mapped_ubuf *imu = NULL;
10890 struct page **pages = NULL;
10891 unsigned long off;
10892 size_t size;
10893 int ret, nr_pages, i;
10894
10895 if (!iov->iov_base) {
10896 *pimu = ctx->dummy_ubuf;
10897 return 0;
10898 }
10899
10900 *pimu = NULL;
10901 ret = -ENOMEM;
10902
10903 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
10904 &nr_pages);
10905 if (IS_ERR(pages)) {
10906 ret = PTR_ERR(pages);
10907 pages = NULL;
10908 goto done;
10909 }
10910
10911 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
10912 if (!imu)
10913 goto done;
0a96bbe4 10914
d8c2237d 10915 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
0a96bbe4 10916 if (ret) {
d8c2237d 10917 unpin_user_pages(pages, nr_pages);
0a96bbe4
BM
10918 goto done;
10919 }
10920
d8c2237d 10921 off = (unsigned long) iov->iov_base & ~PAGE_MASK;
0a96bbe4
BM
10922 size = iov->iov_len;
10923 for (i = 0; i < nr_pages; i++) {
10924 size_t vec_len;
10925
10926 vec_len = min_t(size_t, size, PAGE_SIZE - off);
10927 imu->bvec[i].bv_page = pages[i];
10928 imu->bvec[i].bv_len = vec_len;
10929 imu->bvec[i].bv_offset = off;
10930 off = 0;
10931 size -= vec_len;
10932 }
10933 /* store original address for later verification */
d8c2237d
JA
10934 imu->ubuf = (unsigned long) iov->iov_base;
10935 imu->ubuf_end = imu->ubuf + iov->iov_len;
0a96bbe4 10936 imu->nr_bvecs = nr_pages;
41edf1a5 10937 *pimu = imu;
0a96bbe4
BM
10938 ret = 0;
10939done:
41edf1a5
PB
10940 if (ret)
10941 kvfree(imu);
0a96bbe4 10942 kvfree(pages);
0a96bbe4
BM
10943 return ret;
10944}
10945
2b358604 10946static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
0a96bbe4 10947{
87094465
PB
10948 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
10949 return ctx->user_bufs ? 0 : -ENOMEM;
2b358604 10950}
edafccee 10951
2b358604
BM
10952static int io_buffer_validate(struct iovec *iov)
10953{
50e96989
PB
10954 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
10955
2b358604
BM
10956 /*
10957 * Don't impose further limits on the size and buffer
10958 * constraints here, we'll -EINVAL later when IO is
10959 * submitted if they are wrong.
10960 */
6224843d
PB
10961 if (!iov->iov_base)
10962 return iov->iov_len ? -EFAULT : 0;
10963 if (!iov->iov_len)
2b358604 10964 return -EFAULT;
edafccee 10965
2b358604
BM
10966 /* arbitrary limit, but we need something */
10967 if (iov->iov_len > SZ_1G)
10968 return -EFAULT;
edafccee 10969
50e96989
PB
10970 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
10971 return -EOVERFLOW;
10972
2b358604
BM
10973 return 0;
10974}
edafccee 10975
2b358604 10976static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
634d00df 10977 unsigned int nr_args, u64 __user *tags)
2b358604 10978{
bd54b6fe
BM
10979 struct page *last_hpage = NULL;
10980 struct io_rsrc_data *data;
2b358604
BM
10981 int i, ret;
10982 struct iovec iov;
edafccee 10983
87094465
PB
10984 if (ctx->user_bufs)
10985 return -EBUSY;
489809e2 10986 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
87094465 10987 return -EINVAL;
bd54b6fe 10988 ret = io_rsrc_node_switch_start(ctx);
2b358604
BM
10989 if (ret)
10990 return ret;
d878c816
PB
10991 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
10992 if (ret)
10993 return ret;
bd54b6fe
BM
10994 ret = io_buffers_map_alloc(ctx, nr_args);
10995 if (ret) {
bb6659cc 10996 io_rsrc_data_free(data);
bd54b6fe
BM
10997 return ret;
10998 }
edafccee 10999
87094465 11000 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
0184f08e
PB
11001 if (arg) {
11002 ret = io_copy_iov(ctx, &iov, arg, i);
11003 if (ret)
11004 break;
11005 ret = io_buffer_validate(&iov);
11006 if (ret)
11007 break;
11008 } else {
11009 memset(&iov, 0, sizeof(iov));
11010 }
11011
2d091d62 11012 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
cf3770e7
CIK
11013 ret = -EINVAL;
11014 break;
11015 }
edafccee 11016
41edf1a5
PB
11017 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
11018 &last_hpage);
0a96bbe4
BM
11019 if (ret)
11020 break;
edafccee 11021 }
0a96bbe4 11022
bd54b6fe 11023 WARN_ON_ONCE(ctx->buf_data);
0a96bbe4 11024
bd54b6fe
BM
11025 ctx->buf_data = data;
11026 if (ret)
11027 __io_sqe_buffers_unregister(ctx);
11028 else
11029 io_rsrc_node_switch(ctx, NULL);
edafccee
JA
11030 return ret;
11031}
11032
634d00df
PB
11033static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
11034 struct io_uring_rsrc_update2 *up,
11035 unsigned int nr_args)
11036{
11037 u64 __user *tags = u64_to_user_ptr(up->tags);
11038 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
634d00df
PB
11039 struct page *last_hpage = NULL;
11040 bool needs_switch = false;
11041 __u32 done;
11042 int i, err;
11043
11044 if (!ctx->buf_data)
11045 return -ENXIO;
11046 if (up->offset + nr_args > ctx->nr_user_bufs)
11047 return -EINVAL;
11048
11049 for (done = 0; done < nr_args; done++) {
0b8c0e7c
PB
11050 struct io_mapped_ubuf *imu;
11051 int offset = up->offset + done;
634d00df
PB
11052 u64 tag = 0;
11053
11054 err = io_copy_iov(ctx, &iov, iovs, done);
11055 if (err)
11056 break;
11057 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
11058 err = -EFAULT;
11059 break;
11060 }
0b8c0e7c
PB
11061 err = io_buffer_validate(&iov);
11062 if (err)
11063 break;
cf3770e7
CIK
11064 if (!iov.iov_base && tag) {
11065 err = -EINVAL;
11066 break;
11067 }
0b8c0e7c
PB
11068 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
11069 if (err)
11070 break;
634d00df 11071
0b8c0e7c 11072 i = array_index_nospec(offset, ctx->nr_user_bufs);
6224843d 11073 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
4cdd158b 11074 err = io_queue_rsrc_removal(ctx->buf_data, i,
0b8c0e7c
PB
11075 ctx->rsrc_node, ctx->user_bufs[i]);
11076 if (unlikely(err)) {
11077 io_buffer_unmap(ctx, &imu);
634d00df 11078 break;
0b8c0e7c 11079 }
634d00df
PB
11080 ctx->user_bufs[i] = NULL;
11081 needs_switch = true;
11082 }
11083
0b8c0e7c 11084 ctx->user_bufs[i] = imu;
2d091d62 11085 *io_get_tag_slot(ctx->buf_data, offset) = tag;
634d00df
PB
11086 }
11087
11088 if (needs_switch)
11089 io_rsrc_node_switch(ctx, ctx->buf_data);
11090 return done ? done : err;
11091}
11092
c75312dd
UA
11093static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
11094 unsigned int eventfd_async)
9b402849 11095{
77bc59b4 11096 struct io_ev_fd *ev_fd;
9b402849 11097 __s32 __user *fds = arg;
f0a4e62b 11098 int fd;
9b402849 11099
77bc59b4
UA
11100 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
11101 lockdep_is_held(&ctx->uring_lock));
11102 if (ev_fd)
9b402849
JA
11103 return -EBUSY;
11104
11105 if (copy_from_user(&fd, fds, sizeof(*fds)))
11106 return -EFAULT;
11107
77bc59b4
UA
11108 ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
11109 if (!ev_fd)
11110 return -ENOMEM;
fe7e3257 11111
77bc59b4
UA
11112 ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
11113 if (IS_ERR(ev_fd->cq_ev_fd)) {
f0a4e62b 11114 int ret = PTR_ERR(ev_fd->cq_ev_fd);
77bc59b4 11115 kfree(ev_fd);
9b402849
JA
11116 return ret;
11117 }
c75312dd 11118 ev_fd->eventfd_async = eventfd_async;
9aa8dfde 11119 ctx->has_evfd = true;
77bc59b4 11120 rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
f0a4e62b 11121 return 0;
77bc59b4
UA
11122}
11123
11124static void io_eventfd_put(struct rcu_head *rcu)
11125{
11126 struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
11127
11128 eventfd_ctx_put(ev_fd->cq_ev_fd);
11129 kfree(ev_fd);
9b402849
JA
11130}
11131
11132static int io_eventfd_unregister(struct io_ring_ctx *ctx)
11133{
77bc59b4
UA
11134 struct io_ev_fd *ev_fd;
11135
11136 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
11137 lockdep_is_held(&ctx->uring_lock));
11138 if (ev_fd) {
9aa8dfde 11139 ctx->has_evfd = false;
77bc59b4
UA
11140 rcu_assign_pointer(ctx->io_ev_fd, NULL);
11141 call_rcu(&ev_fd->rcu, io_eventfd_put);
9b402849
JA
11142 return 0;
11143 }
11144
11145 return -ENXIO;
11146}
11147
5a2e745d
JA
11148static void io_destroy_buffers(struct io_ring_ctx *ctx)
11149{
9cfc7e94
JA
11150 struct io_buffer_list *bl;
11151 unsigned long index;
dbc7d452
JA
11152 int i;
11153
9cfc7e94
JA
11154 for (i = 0; i < BGID_ARRAY; i++) {
11155 if (!ctx->io_bl)
11156 break;
11157 __io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
11158 }
dbc7d452 11159
9cfc7e94
JA
11160 xa_for_each(&ctx->io_bl_xa, index, bl) {
11161 xa_erase(&ctx->io_bl_xa, bl->bgid);
11162 __io_remove_buffers(ctx, bl, -1U);
21870e02 11163 kfree(bl);
dbc7d452 11164 }
cc3cec83
JA
11165
11166 while (!list_empty(&ctx->io_buffers_pages)) {
11167 struct page *page;
11168
11169 page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
11170 list_del_init(&page->lru);
11171 __free_page(page);
11172 }
5a2e745d
JA
11173}
11174
4010fec4 11175static void io_req_caches_free(struct io_ring_ctx *ctx)
2b188cc1 11176{
cd0ca2e0 11177 struct io_submit_state *state = &ctx->submit_state;
37f0e767 11178 int nr = 0;
bf019da7 11179
9a4fdbd8 11180 mutex_lock(&ctx->uring_lock);
cd0ca2e0 11181 io_flush_cached_locked_reqs(ctx, state);
9a4fdbd8 11182
88ab95be 11183 while (!io_req_cache_empty(ctx)) {
c2b6c6bc
PB
11184 struct io_wq_work_node *node;
11185 struct io_kiocb *req;
9a4fdbd8 11186
c2b6c6bc
PB
11187 node = wq_stack_extract(&state->free_list);
11188 req = container_of(node, struct io_kiocb, comp_list);
11189 kmem_cache_free(req_cachep, req);
37f0e767 11190 nr++;
c2b6c6bc 11191 }
37f0e767
PB
11192 if (nr)
11193 percpu_ref_put_many(&ctx->refs, nr);
9a4fdbd8
JA
11194 mutex_unlock(&ctx->uring_lock);
11195}
11196
43597aac 11197static void io_wait_rsrc_data(struct io_rsrc_data *data)
2b188cc1 11198{
43597aac 11199 if (data && !atomic_dec_and_test(&data->refs))
bd54b6fe 11200 wait_for_completion(&data->done);
bd54b6fe 11201}
04fc6c80 11202
4d9237e3
JA
11203static void io_flush_apoll_cache(struct io_ring_ctx *ctx)
11204{
11205 struct async_poll *apoll;
11206
11207 while (!list_empty(&ctx->apoll_cache)) {
11208 apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
11209 poll.wait.entry);
11210 list_del(&apoll->poll.wait.entry);
11211 kfree(apoll);
11212 }
11213}
11214
c072481d 11215static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
2b188cc1 11216{
37d1e2e3 11217 io_sq_thread_finish(ctx);
2aede0e4 11218
37d1e2e3 11219 if (ctx->mm_account) {
2aede0e4
JA
11220 mmdrop(ctx->mm_account);
11221 ctx->mm_account = NULL;
30975825 11222 }
def596e9 11223
ab409402 11224 io_rsrc_refs_drop(ctx);
43597aac
PB
11225 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
11226 io_wait_rsrc_data(ctx->buf_data);
11227 io_wait_rsrc_data(ctx->file_data);
11228
8bad28d8 11229 mutex_lock(&ctx->uring_lock);
43597aac 11230 if (ctx->buf_data)
bd54b6fe 11231 __io_sqe_buffers_unregister(ctx);
43597aac 11232 if (ctx->file_data)
08480400 11233 __io_sqe_files_unregister(ctx);
c4ea060e
PB
11234 if (ctx->rings)
11235 __io_cqring_overflow_flush(ctx, true);
9b402849 11236 io_eventfd_unregister(ctx);
4d9237e3 11237 io_flush_apoll_cache(ctx);
77bc59b4 11238 mutex_unlock(&ctx->uring_lock);
5a2e745d 11239 io_destroy_buffers(ctx);
07db298a
PB
11240 if (ctx->sq_creds)
11241 put_cred(ctx->sq_creds);
def596e9 11242
a7f0ed5a
PB
11243 /* there are no registered resources left, nobody uses it */
11244 if (ctx->rsrc_node)
11245 io_rsrc_node_destroy(ctx->rsrc_node);
8dd03afe 11246 if (ctx->rsrc_backup_node)
b895c9a6 11247 io_rsrc_node_destroy(ctx->rsrc_backup_node);
a7f0ed5a 11248 flush_delayed_work(&ctx->rsrc_put_work);
756ab7c0 11249 flush_delayed_work(&ctx->fallback_work);
a7f0ed5a
PB
11250
11251 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
11252 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
def596e9 11253
2b188cc1 11254#if defined(CONFIG_UNIX)
355e8d26
EB
11255 if (ctx->ring_sock) {
11256 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 11257 sock_release(ctx->ring_sock);
355e8d26 11258 }
2b188cc1 11259#endif
ef9dd637 11260 WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
2b188cc1 11261
75b28aff 11262 io_mem_free(ctx->rings);
2b188cc1 11263 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
11264
11265 percpu_ref_exit(&ctx->refs);
2b188cc1 11266 free_uid(ctx->user);
4010fec4 11267 io_req_caches_free(ctx);
e941894e
JA
11268 if (ctx->hash_map)
11269 io_wq_put_hash(ctx->hash_map);
78076bb6 11270 kfree(ctx->cancel_hash);
6224843d 11271 kfree(ctx->dummy_ubuf);
9cfc7e94
JA
11272 kfree(ctx->io_bl);
11273 xa_destroy(&ctx->io_bl_xa);
2b188cc1
JA
11274 kfree(ctx);
11275}
11276
11277static __poll_t io_uring_poll(struct file *file, poll_table *wait)
11278{
11279 struct io_ring_ctx *ctx = file->private_data;
11280 __poll_t mask = 0;
11281
d60aa65b 11282 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
11283 /*
11284 * synchronizes with barrier from wq_has_sleeper call in
11285 * io_commit_cqring
11286 */
2b188cc1 11287 smp_rmb();
90554200 11288 if (!io_sqring_full(ctx))
2b188cc1 11289 mask |= EPOLLOUT | EPOLLWRNORM;
ed670c3f
HX
11290
11291 /*
11292 * Don't flush cqring overflow list here, just do a simple check.
11293 * Otherwise there could possible be ABBA deadlock:
11294 * CPU0 CPU1
11295 * ---- ----
11296 * lock(&ctx->uring_lock);
11297 * lock(&ep->mtx);
11298 * lock(&ctx->uring_lock);
11299 * lock(&ep->mtx);
11300 *
11301 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
11302 * pushs them to do the flush.
11303 */
10988a0a
DY
11304 if (io_cqring_events(ctx) ||
11305 test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
2b188cc1
JA
11306 mask |= EPOLLIN | EPOLLRDNORM;
11307
11308 return mask;
11309}
11310
0bead8cd 11311static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
071698e1 11312{
4379bf8b 11313 const struct cred *creds;
071698e1 11314
61cf9370 11315 creds = xa_erase(&ctx->personalities, id);
4379bf8b
JA
11316 if (creds) {
11317 put_cred(creds);
0bead8cd 11318 return 0;
1e6fa521 11319 }
0bead8cd
YD
11320
11321 return -EINVAL;
11322}
11323
d56d938b
PB
11324struct io_tctx_exit {
11325 struct callback_head task_work;
11326 struct completion completion;
baf186c4 11327 struct io_ring_ctx *ctx;
d56d938b
PB
11328};
11329
c072481d 11330static __cold void io_tctx_exit_cb(struct callback_head *cb)
d56d938b
PB
11331{
11332 struct io_uring_task *tctx = current->io_uring;
11333 struct io_tctx_exit *work;
11334
11335 work = container_of(cb, struct io_tctx_exit, task_work);
11336 /*
11337 * When @in_idle, we're in cancellation and it's racy to remove the
11338 * node. It'll be removed by the end of cancellation, just ignore it.
11339 */
11340 if (!atomic_read(&tctx->in_idle))
eef51daa 11341 io_uring_del_tctx_node((unsigned long)work->ctx);
d56d938b
PB
11342 complete(&work->completion);
11343}
11344
c072481d 11345static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
28090c13
PB
11346{
11347 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
11348
11349 return req->ctx == data;
11350}
11351
c072481d 11352static __cold void io_ring_exit_work(struct work_struct *work)
85faa7b8 11353{
d56d938b 11354 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
b5bb3a24 11355 unsigned long timeout = jiffies + HZ * 60 * 5;
58d3be2c 11356 unsigned long interval = HZ / 20;
d56d938b
PB
11357 struct io_tctx_exit exit;
11358 struct io_tctx_node *node;
11359 int ret;
85faa7b8 11360
56952e91
JA
11361 /*
11362 * If we're doing polled IO and end up having requests being
11363 * submitted async (out-of-line), then completions can come in while
11364 * we're waiting for refs to drop. We need to reap these manually,
11365 * as nobody else will be looking for them.
11366 */
b2edc0a7 11367 do {
3dd0c97a 11368 io_uring_try_cancel_requests(ctx, NULL, true);
28090c13
PB
11369 if (ctx->sq_data) {
11370 struct io_sq_data *sqd = ctx->sq_data;
11371 struct task_struct *tsk;
11372
11373 io_sq_thread_park(sqd);
11374 tsk = sqd->thread;
11375 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
11376 io_wq_cancel_cb(tsk->io_uring->io_wq,
11377 io_cancel_ctx_cb, ctx, true);
11378 io_sq_thread_unpark(sqd);
11379 }
b5bb3a24 11380
37f0e767
PB
11381 io_req_caches_free(ctx);
11382
58d3be2c
PB
11383 if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
11384 /* there is little hope left, don't run it too often */
11385 interval = HZ * 60;
11386 }
11387 } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
d56d938b 11388
7f00651a
PB
11389 init_completion(&exit.completion);
11390 init_task_work(&exit.task_work, io_tctx_exit_cb);
11391 exit.ctx = ctx;
89b5066e
PB
11392 /*
11393 * Some may use context even when all refs and requests have been put,
11394 * and they are free to do so while still holding uring_lock or
5b0a6acc 11395 * completion_lock, see io_req_task_submit(). Apart from other work,
89b5066e
PB
11396 * this lock/unlock section also waits them to finish.
11397 */
d56d938b
PB
11398 mutex_lock(&ctx->uring_lock);
11399 while (!list_empty(&ctx->tctx_list)) {
b5bb3a24
PB
11400 WARN_ON_ONCE(time_after(jiffies, timeout));
11401
d56d938b
PB
11402 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
11403 ctx_node);
7f00651a
PB
11404 /* don't spin on a single task if cancellation failed */
11405 list_rotate_left(&ctx->tctx_list);
d56d938b
PB
11406 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
11407 if (WARN_ON_ONCE(ret))
11408 continue;
d56d938b
PB
11409
11410 mutex_unlock(&ctx->uring_lock);
11411 wait_for_completion(&exit.completion);
d56d938b
PB
11412 mutex_lock(&ctx->uring_lock);
11413 }
11414 mutex_unlock(&ctx->uring_lock);
79ebeaee
JA
11415 spin_lock(&ctx->completion_lock);
11416 spin_unlock(&ctx->completion_lock);
d56d938b 11417
85faa7b8
JA
11418 io_ring_ctx_free(ctx);
11419}
11420
80c4cbdb 11421/* Returns true if we found and killed one or more timeouts */
c072481d
PB
11422static __cold bool io_kill_timeouts(struct io_ring_ctx *ctx,
11423 struct task_struct *tsk, bool cancel_all)
80c4cbdb
PB
11424{
11425 struct io_kiocb *req, *tmp;
11426 int canceled = 0;
11427
79ebeaee
JA
11428 spin_lock(&ctx->completion_lock);
11429 spin_lock_irq(&ctx->timeout_lock);
80c4cbdb 11430 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
3dd0c97a 11431 if (io_match_task(req, tsk, cancel_all)) {
80c4cbdb
PB
11432 io_kill_timeout(req, -ECANCELED);
11433 canceled++;
11434 }
11435 }
79ebeaee 11436 spin_unlock_irq(&ctx->timeout_lock);
60053be8 11437 io_commit_cqring(ctx);
79ebeaee 11438 spin_unlock(&ctx->completion_lock);
80c4cbdb
PB
11439 if (canceled != 0)
11440 io_cqring_ev_posted(ctx);
11441 return canceled != 0;
11442}
11443
c072481d 11444static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
2b188cc1 11445{
61cf9370
MWO
11446 unsigned long index;
11447 struct creds *creds;
11448
2b188cc1
JA
11449 mutex_lock(&ctx->uring_lock);
11450 percpu_ref_kill(&ctx->refs);
634578f8 11451 if (ctx->rings)
6c2450ae 11452 __io_cqring_overflow_flush(ctx, true);
61cf9370
MWO
11453 xa_for_each(&ctx->personalities, index, creds)
11454 io_unregister_personality(ctx, index);
2b188cc1
JA
11455 mutex_unlock(&ctx->uring_lock);
11456
60053be8
PB
11457 /* failed during ring init, it couldn't have issued any requests */
11458 if (ctx->rings) {
11459 io_kill_timeouts(ctx, NULL, true);
11460 io_poll_remove_all(ctx, NULL, true);
11461 /* if we failed setting up the ctx, we might not have any rings */
11462 io_iopoll_try_reap_events(ctx);
11463 }
309fc03a 11464
85faa7b8 11465 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
fc666777
JA
11466 /*
11467 * Use system_unbound_wq to avoid spawning tons of event kworkers
11468 * if we're exiting a ton of rings at the same time. It just adds
11469 * noise and overhead, there's no discernable change in runtime
11470 * over using system_wq.
11471 */
11472 queue_work(system_unbound_wq, &ctx->exit_work);
2b188cc1
JA
11473}
11474
11475static int io_uring_release(struct inode *inode, struct file *file)
11476{
11477 struct io_ring_ctx *ctx = file->private_data;
11478
11479 file->private_data = NULL;
11480 io_ring_ctx_wait_and_kill(ctx);
11481 return 0;
11482}
11483
f6edbabb
PB
11484struct io_task_cancel {
11485 struct task_struct *task;
3dd0c97a 11486 bool all;
f6edbabb 11487};
f254ac04 11488
f6edbabb 11489static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
b711d4ea 11490{
9a472ef7 11491 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
f6edbabb 11492 struct io_task_cancel *cancel = data;
9a472ef7 11493
6af3f48b 11494 return io_match_task_safe(req, cancel->task, cancel->all);
b711d4ea
JA
11495}
11496
c072481d
PB
11497static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
11498 struct task_struct *task,
11499 bool cancel_all)
b7ddce3c 11500{
e1915f76 11501 struct io_defer_entry *de;
b7ddce3c
PB
11502 LIST_HEAD(list);
11503
79ebeaee 11504 spin_lock(&ctx->completion_lock);
b7ddce3c 11505 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
6af3f48b 11506 if (io_match_task_safe(de->req, task, cancel_all)) {
b7ddce3c
PB
11507 list_cut_position(&list, &ctx->defer_list, &de->list);
11508 break;
11509 }
11510 }
79ebeaee 11511 spin_unlock(&ctx->completion_lock);
e1915f76
PB
11512 if (list_empty(&list))
11513 return false;
b7ddce3c
PB
11514
11515 while (!list_empty(&list)) {
11516 de = list_first_entry(&list, struct io_defer_entry, list);
11517 list_del_init(&de->list);
f41db273 11518 io_req_complete_failed(de->req, -ECANCELED);
b7ddce3c
PB
11519 kfree(de);
11520 }
e1915f76 11521 return true;
b7ddce3c
PB
11522}
11523
c072481d 11524static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
1b00764f
PB
11525{
11526 struct io_tctx_node *node;
11527 enum io_wq_cancel cret;
11528 bool ret = false;
11529
11530 mutex_lock(&ctx->uring_lock);
11531 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
11532 struct io_uring_task *tctx = node->task->io_uring;
11533
11534 /*
11535 * io_wq will stay alive while we hold uring_lock, because it's
11536 * killed after ctx nodes, which requires to take the lock.
11537 */
11538 if (!tctx || !tctx->io_wq)
11539 continue;
11540 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
11541 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
11542 }
11543 mutex_unlock(&ctx->uring_lock);
11544
11545 return ret;
11546}
11547
c072481d
PB
11548static __cold void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
11549 struct task_struct *task,
11550 bool cancel_all)
9936c7c2 11551{
3dd0c97a 11552 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
1b00764f 11553 struct io_uring_task *tctx = task ? task->io_uring : NULL;
9936c7c2 11554
60053be8
PB
11555 /* failed during ring init, it couldn't have issued any requests */
11556 if (!ctx->rings)
11557 return;
11558
9936c7c2
PB
11559 while (1) {
11560 enum io_wq_cancel cret;
11561 bool ret = false;
11562
1b00764f
PB
11563 if (!task) {
11564 ret |= io_uring_try_cancel_iowq(ctx);
11565 } else if (tctx && tctx->io_wq) {
11566 /*
11567 * Cancels requests of all rings, not only @ctx, but
11568 * it's fine as the task is in exit/exec.
11569 */
5aa75ed5 11570 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
9936c7c2
PB
11571 &cancel, true);
11572 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
11573 }
11574
11575 /* SQPOLL thread does its own polling */
3dd0c97a 11576 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
d052d1d6 11577 (ctx->sq_data && ctx->sq_data->thread == current)) {
5eef4e87 11578 while (!wq_list_empty(&ctx->iopoll_list)) {
9936c7c2
PB
11579 io_iopoll_try_reap_events(ctx);
11580 ret = true;
11581 }
11582 }
11583
3dd0c97a
PB
11584 ret |= io_cancel_defer_files(ctx, task, cancel_all);
11585 ret |= io_poll_remove_all(ctx, task, cancel_all);
11586 ret |= io_kill_timeouts(ctx, task, cancel_all);
e5dc480d
PB
11587 if (task)
11588 ret |= io_run_task_work();
9936c7c2
PB
11589 if (!ret)
11590 break;
11591 cond_resched();
11592 }
11593}
11594
eef51daa 11595static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
0f212204 11596{
236434c3 11597 struct io_uring_task *tctx = current->io_uring;
13bf43f5 11598 struct io_tctx_node *node;
a528b04e 11599 int ret;
236434c3
MWO
11600
11601 if (unlikely(!tctx)) {
5aa75ed5 11602 ret = io_uring_alloc_task_context(current, ctx);
0f212204
JA
11603 if (unlikely(ret))
11604 return ret;
e139a1ec 11605
236434c3 11606 tctx = current->io_uring;
e139a1ec
PB
11607 if (ctx->iowq_limits_set) {
11608 unsigned int limits[2] = { ctx->iowq_limits[0],
11609 ctx->iowq_limits[1], };
11610
11611 ret = io_wq_max_workers(tctx->io_wq, limits);
11612 if (ret)
11613 return ret;
11614 }
0f212204 11615 }
cf27f3b1
PB
11616 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
11617 node = kmalloc(sizeof(*node), GFP_KERNEL);
11618 if (!node)
11619 return -ENOMEM;
11620 node->ctx = ctx;
11621 node->task = current;
13bf43f5 11622
cf27f3b1
PB
11623 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
11624 node, GFP_KERNEL));
11625 if (ret) {
11626 kfree(node);
11627 return ret;
0f212204 11628 }
cf27f3b1
PB
11629
11630 mutex_lock(&ctx->uring_lock);
11631 list_add(&node->ctx_node, &ctx->tctx_list);
11632 mutex_unlock(&ctx->uring_lock);
0f212204 11633 }
cf27f3b1 11634 tctx->last = ctx;
0f212204
JA
11635 return 0;
11636}
11637
cf27f3b1
PB
11638/*
11639 * Note that this task has used io_uring. We use it for cancelation purposes.
11640 */
eef51daa 11641static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
cf27f3b1
PB
11642{
11643 struct io_uring_task *tctx = current->io_uring;
11644
11645 if (likely(tctx && tctx->last == ctx))
11646 return 0;
eef51daa 11647 return __io_uring_add_tctx_node(ctx);
cf27f3b1
PB
11648}
11649
0f212204
JA
11650/*
11651 * Remove this io_uring_file -> task mapping.
11652 */
c072481d 11653static __cold void io_uring_del_tctx_node(unsigned long index)
0f212204
JA
11654{
11655 struct io_uring_task *tctx = current->io_uring;
13bf43f5 11656 struct io_tctx_node *node;
2941267b 11657
eebd2e37
PB
11658 if (!tctx)
11659 return;
13bf43f5
PB
11660 node = xa_erase(&tctx->xa, index);
11661 if (!node)
2941267b 11662 return;
0f212204 11663
13bf43f5
PB
11664 WARN_ON_ONCE(current != node->task);
11665 WARN_ON_ONCE(list_empty(&node->ctx_node));
11666
11667 mutex_lock(&node->ctx->uring_lock);
11668 list_del(&node->ctx_node);
11669 mutex_unlock(&node->ctx->uring_lock);
11670
baf186c4 11671 if (tctx->last == node->ctx)
0f212204 11672 tctx->last = NULL;
13bf43f5 11673 kfree(node);
0f212204
JA
11674}
11675
c072481d 11676static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
de7f1d9e 11677{
ba5ef6dc 11678 struct io_wq *wq = tctx->io_wq;
13bf43f5 11679 struct io_tctx_node *node;
de7f1d9e
PB
11680 unsigned long index;
11681
8bab4c09 11682 xa_for_each(&tctx->xa, index, node) {
eef51daa 11683 io_uring_del_tctx_node(index);
8bab4c09
JA
11684 cond_resched();
11685 }
b16ef427
ME
11686 if (wq) {
11687 /*
f6f9b278 11688 * Must be after io_uring_del_tctx_node() (removes nodes under
b16ef427
ME
11689 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
11690 */
ba5ef6dc 11691 io_wq_put_and_exit(wq);
dadebc35 11692 tctx->io_wq = NULL;
b16ef427 11693 }
de7f1d9e
PB
11694}
11695
3f48cf18 11696static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
521d6a73 11697{
3f48cf18 11698 if (tracked)
9cae36a0 11699 return atomic_read(&tctx->inflight_tracked);
521d6a73
PB
11700 return percpu_counter_sum(&tctx->inflight);
11701}
11702
78cc687b
PB
11703/*
11704 * Find any io_uring ctx that this task has registered or done IO on, and cancel
78a78060 11705 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
78cc687b 11706 */
c072481d
PB
11707static __cold void io_uring_cancel_generic(bool cancel_all,
11708 struct io_sq_data *sqd)
0e9ddb39 11709{
521d6a73 11710 struct io_uring_task *tctx = current->io_uring;
734551df 11711 struct io_ring_ctx *ctx;
0e9ddb39
PB
11712 s64 inflight;
11713 DEFINE_WAIT(wait);
fdaf083c 11714
78cc687b
PB
11715 WARN_ON_ONCE(sqd && sqd->thread != current);
11716
6d042ffb
PO
11717 if (!current->io_uring)
11718 return;
17a91051
PB
11719 if (tctx->io_wq)
11720 io_wq_exit_start(tctx->io_wq);
11721
0e9ddb39
PB
11722 atomic_inc(&tctx->in_idle);
11723 do {
e9dbe221 11724 io_uring_drop_tctx_refs(current);
0e9ddb39 11725 /* read completions before cancelations */
78cc687b 11726 inflight = tctx_inflight(tctx, !cancel_all);
0e9ddb39
PB
11727 if (!inflight)
11728 break;
fdaf083c 11729
78cc687b
PB
11730 if (!sqd) {
11731 struct io_tctx_node *node;
11732 unsigned long index;
0f212204 11733
78cc687b
PB
11734 xa_for_each(&tctx->xa, index, node) {
11735 /* sqpoll task will cancel all its requests */
11736 if (node->ctx->sq_data)
11737 continue;
11738 io_uring_try_cancel_requests(node->ctx, current,
11739 cancel_all);
11740 }
11741 } else {
11742 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
11743 io_uring_try_cancel_requests(ctx, current,
11744 cancel_all);
11745 }
17a91051 11746
78a78060
JA
11747 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
11748 io_run_task_work();
e9dbe221 11749 io_uring_drop_tctx_refs(current);
78a78060 11750
0f212204 11751 /*
a1bb3cd5
PB
11752 * If we've seen completions, retry without waiting. This
11753 * avoids a race where a completion comes in before we did
11754 * prepare_to_wait().
0f212204 11755 */
3dd0c97a 11756 if (inflight == tctx_inflight(tctx, !cancel_all))
a1bb3cd5 11757 schedule();
f57555ed 11758 finish_wait(&tctx->wait, &wait);
d8a6df10 11759 } while (1);
de7f1d9e 11760
8452d4a6 11761 io_uring_clean_tctx(tctx);
3dd0c97a 11762 if (cancel_all) {
3cc7fdb9
PB
11763 /*
11764 * We shouldn't run task_works after cancel, so just leave
11765 * ->in_idle set for normal exit.
11766 */
11767 atomic_dec(&tctx->in_idle);
3f48cf18
PB
11768 /* for exec all current's requests should be gone, kill tctx */
11769 __io_uring_free(current);
11770 }
44e728b8
PB
11771}
11772
f552a27a 11773void __io_uring_cancel(bool cancel_all)
78cc687b 11774{
f552a27a 11775 io_uring_cancel_generic(cancel_all, NULL);
78cc687b
PB
11776}
11777
e7a6c00d
JA
11778void io_uring_unreg_ringfd(void)
11779{
11780 struct io_uring_task *tctx = current->io_uring;
11781 int i;
11782
11783 for (i = 0; i < IO_RINGFD_REG_MAX; i++) {
11784 if (tctx->registered_rings[i]) {
11785 fput(tctx->registered_rings[i]);
11786 tctx->registered_rings[i] = NULL;
11787 }
11788 }
11789}
11790
11791static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
11792 int start, int end)
11793{
11794 struct file *file;
11795 int offset;
11796
11797 for (offset = start; offset < end; offset++) {
11798 offset = array_index_nospec(offset, IO_RINGFD_REG_MAX);
11799 if (tctx->registered_rings[offset])
11800 continue;
11801
11802 file = fget(fd);
11803 if (!file) {
11804 return -EBADF;
11805 } else if (file->f_op != &io_uring_fops) {
11806 fput(file);
11807 return -EOPNOTSUPP;
11808 }
11809 tctx->registered_rings[offset] = file;
11810 return offset;
11811 }
11812
11813 return -EBUSY;
11814}
11815
11816/*
11817 * Register a ring fd to avoid fdget/fdput for each io_uring_enter()
11818 * invocation. User passes in an array of struct io_uring_rsrc_update
11819 * with ->data set to the ring_fd, and ->offset given for the desired
11820 * index. If no index is desired, application may set ->offset == -1U
11821 * and we'll find an available index. Returns number of entries
11822 * successfully processed, or < 0 on error if none were processed.
11823 */
11824static int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
11825 unsigned nr_args)
11826{
11827 struct io_uring_rsrc_update __user *arg = __arg;
11828 struct io_uring_rsrc_update reg;
11829 struct io_uring_task *tctx;
11830 int ret, i;
11831
11832 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
11833 return -EINVAL;
11834
11835 mutex_unlock(&ctx->uring_lock);
11836 ret = io_uring_add_tctx_node(ctx);
11837 mutex_lock(&ctx->uring_lock);
11838 if (ret)
11839 return ret;
11840
11841 tctx = current->io_uring;
11842 for (i = 0; i < nr_args; i++) {
11843 int start, end;
11844
11845 if (copy_from_user(&reg, &arg[i], sizeof(reg))) {
11846 ret = -EFAULT;
11847 break;
11848 }
11849
6fb53cf8
DY
11850 if (reg.resv) {
11851 ret = -EINVAL;
11852 break;
11853 }
11854
e7a6c00d
JA
11855 if (reg.offset == -1U) {
11856 start = 0;
11857 end = IO_RINGFD_REG_MAX;
11858 } else {
11859 if (reg.offset >= IO_RINGFD_REG_MAX) {
11860 ret = -EINVAL;
11861 break;
11862 }
11863 start = reg.offset;
11864 end = start + 1;
11865 }
11866
11867 ret = io_ring_add_registered_fd(tctx, reg.data, start, end);
11868 if (ret < 0)
11869 break;
11870
11871 reg.offset = ret;
11872 if (copy_to_user(&arg[i], &reg, sizeof(reg))) {
11873 fput(tctx->registered_rings[reg.offset]);
11874 tctx->registered_rings[reg.offset] = NULL;
11875 ret = -EFAULT;
11876 break;
11877 }
11878 }
11879
11880 return i ? i : ret;
11881}
11882
11883static int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
11884 unsigned nr_args)
11885{
11886 struct io_uring_rsrc_update __user *arg = __arg;
11887 struct io_uring_task *tctx = current->io_uring;
11888 struct io_uring_rsrc_update reg;
11889 int ret = 0, i;
11890
11891 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
11892 return -EINVAL;
11893 if (!tctx)
11894 return 0;
11895
11896 for (i = 0; i < nr_args; i++) {
11897 if (copy_from_user(&reg, &arg[i], sizeof(reg))) {
11898 ret = -EFAULT;
11899 break;
11900 }
303cc749 11901 if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) {
e7a6c00d
JA
11902 ret = -EINVAL;
11903 break;
11904 }
11905
11906 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX);
11907 if (tctx->registered_rings[reg.offset]) {
11908 fput(tctx->registered_rings[reg.offset]);
11909 tctx->registered_rings[reg.offset] = NULL;
11910 }
11911 }
11912
11913 return i ? i : ret;
11914}
11915
6c5c240e
RP
11916static void *io_uring_validate_mmap_request(struct file *file,
11917 loff_t pgoff, size_t sz)
2b188cc1 11918{
2b188cc1 11919 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 11920 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
11921 struct page *page;
11922 void *ptr;
11923
11924 switch (offset) {
11925 case IORING_OFF_SQ_RING:
75b28aff
HV
11926 case IORING_OFF_CQ_RING:
11927 ptr = ctx->rings;
2b188cc1
JA
11928 break;
11929 case IORING_OFF_SQES:
11930 ptr = ctx->sq_sqes;
11931 break;
2b188cc1 11932 default:
6c5c240e 11933 return ERR_PTR(-EINVAL);
2b188cc1
JA
11934 }
11935
11936 page = virt_to_head_page(ptr);
a50b854e 11937 if (sz > page_size(page))
6c5c240e
RP
11938 return ERR_PTR(-EINVAL);
11939
11940 return ptr;
11941}
11942
11943#ifdef CONFIG_MMU
11944
c072481d 11945static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
6c5c240e
RP
11946{
11947 size_t sz = vma->vm_end - vma->vm_start;
11948 unsigned long pfn;
11949 void *ptr;
11950
11951 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
11952 if (IS_ERR(ptr))
11953 return PTR_ERR(ptr);
2b188cc1
JA
11954
11955 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
11956 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
11957}
11958
6c5c240e
RP
11959#else /* !CONFIG_MMU */
11960
11961static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
11962{
11963 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
11964}
11965
11966static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
11967{
11968 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
11969}
11970
11971static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
11972 unsigned long addr, unsigned long len,
11973 unsigned long pgoff, unsigned long flags)
11974{
11975 void *ptr;
11976
11977 ptr = io_uring_validate_mmap_request(file, pgoff, len);
11978 if (IS_ERR(ptr))
11979 return PTR_ERR(ptr);
11980
11981 return (unsigned long) ptr;
11982}
11983
11984#endif /* !CONFIG_MMU */
11985
d9d05217 11986static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
90554200
JA
11987{
11988 DEFINE_WAIT(wait);
11989
11990 do {
11991 if (!io_sqring_full(ctx))
11992 break;
90554200
JA
11993 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
11994
11995 if (!io_sqring_full(ctx))
11996 break;
90554200
JA
11997 schedule();
11998 } while (!signal_pending(current));
11999
12000 finish_wait(&ctx->sqo_sq_wait, &wait);
5199328a 12001 return 0;
90554200
JA
12002}
12003
f81440d3
PB
12004static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
12005{
12006 if (flags & IORING_ENTER_EXT_ARG) {
12007 struct io_uring_getevents_arg arg;
12008
12009 if (argsz != sizeof(arg))
12010 return -EINVAL;
12011 if (copy_from_user(&arg, argp, sizeof(arg)))
12012 return -EFAULT;
12013 }
12014 return 0;
12015}
12016
c73ebb68
HX
12017static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
12018 struct __kernel_timespec __user **ts,
12019 const sigset_t __user **sig)
12020{
12021 struct io_uring_getevents_arg arg;
12022
12023 /*
12024 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
12025 * is just a pointer to the sigset_t.
12026 */
12027 if (!(flags & IORING_ENTER_EXT_ARG)) {
12028 *sig = (const sigset_t __user *) argp;
12029 *ts = NULL;
12030 return 0;
12031 }
12032
12033 /*
12034 * EXT_ARG is set - ensure we agree on the size of it and copy in our
12035 * timespec and sigset_t pointers if good.
12036 */
12037 if (*argsz != sizeof(arg))
12038 return -EINVAL;
12039 if (copy_from_user(&arg, argp, sizeof(arg)))
12040 return -EFAULT;
d2347b96
DY
12041 if (arg.pad)
12042 return -EINVAL;
c73ebb68
HX
12043 *sig = u64_to_user_ptr(arg.sigmask);
12044 *argsz = arg.sigmask_sz;
12045 *ts = u64_to_user_ptr(arg.ts);
12046 return 0;
12047}
12048
2b188cc1 12049SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
c73ebb68
HX
12050 u32, min_complete, u32, flags, const void __user *, argp,
12051 size_t, argsz)
2b188cc1
JA
12052{
12053 struct io_ring_ctx *ctx;
2b188cc1 12054 struct fd f;
33f993da 12055 long ret;
2b188cc1 12056
4c6e277c 12057 io_run_task_work();
b41e9852 12058
33f993da 12059 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
e7a6c00d
JA
12060 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
12061 IORING_ENTER_REGISTERED_RING)))
2b188cc1
JA
12062 return -EINVAL;
12063
e7a6c00d
JA
12064 /*
12065 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
12066 * need only dereference our task private array to find it.
12067 */
12068 if (flags & IORING_ENTER_REGISTERED_RING) {
12069 struct io_uring_task *tctx = current->io_uring;
12070
12071 if (!tctx || fd >= IO_RINGFD_REG_MAX)
12072 return -EINVAL;
12073 fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
12074 f.file = tctx->registered_rings[fd];
4329490a 12075 f.flags = 0;
e7a6c00d
JA
12076 } else {
12077 f = fdget(fd);
e7a6c00d 12078 }
2b188cc1 12079
4329490a
AV
12080 if (unlikely(!f.file))
12081 return -EBADF;
12082
2b188cc1 12083 ret = -EOPNOTSUPP;
33f993da 12084 if (unlikely(f.file->f_op != &io_uring_fops))
2b188cc1
JA
12085 goto out_fput;
12086
12087 ret = -ENXIO;
12088 ctx = f.file->private_data;
33f993da 12089 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
2b188cc1
JA
12090 goto out_fput;
12091
7e84e1c7 12092 ret = -EBADFD;
33f993da 12093 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
7e84e1c7
SG
12094 goto out;
12095
6c271ce2
JA
12096 /*
12097 * For SQ polling, the thread will do all submissions and completions.
12098 * Just return the requested submit count, and wake the thread if
12099 * we were asked to.
12100 */
b2a9eada 12101 ret = 0;
6c271ce2 12102 if (ctx->flags & IORING_SETUP_SQPOLL) {
90f67366 12103 io_cqring_overflow_flush(ctx);
89448c47 12104
21f96522
JA
12105 if (unlikely(ctx->sq_data->thread == NULL)) {
12106 ret = -EOWNERDEAD;
04147488 12107 goto out;
21f96522 12108 }
6c271ce2 12109 if (flags & IORING_ENTER_SQ_WAKEUP)
534ca6d6 12110 wake_up(&ctx->sq_data->wait);
d9d05217
PB
12111 if (flags & IORING_ENTER_SQ_WAIT) {
12112 ret = io_sqpoll_wait_sq(ctx);
12113 if (ret)
12114 goto out;
12115 }
3e813c90 12116 ret = to_submit;
b2a9eada 12117 } else if (to_submit) {
eef51daa 12118 ret = io_uring_add_tctx_node(ctx);
0f212204
JA
12119 if (unlikely(ret))
12120 goto out;
7c504e65 12121
2b188cc1 12122 mutex_lock(&ctx->uring_lock);
3e813c90
DY
12123 ret = io_submit_sqes(ctx, to_submit);
12124 if (ret != to_submit) {
d487b43c 12125 mutex_unlock(&ctx->uring_lock);
7c504e65 12126 goto out;
d487b43c
PB
12127 }
12128 if ((flags & IORING_ENTER_GETEVENTS) && ctx->syscall_iopoll)
12129 goto iopoll_locked;
12130 mutex_unlock(&ctx->uring_lock);
2b188cc1
JA
12131 }
12132 if (flags & IORING_ENTER_GETEVENTS) {
3e813c90 12133 int ret2;
773697b6 12134 if (ctx->syscall_iopoll) {
d487b43c
PB
12135 /*
12136 * We disallow the app entering submit/complete with
12137 * polling, but we still need to lock the ring to
12138 * prevent racing with polled issue that got punted to
12139 * a workqueue.
12140 */
12141 mutex_lock(&ctx->uring_lock);
12142iopoll_locked:
3e813c90
DY
12143 ret2 = io_validate_ext_arg(flags, argp, argsz);
12144 if (likely(!ret2)) {
12145 min_complete = min(min_complete,
12146 ctx->cq_entries);
12147 ret2 = io_iopoll_check(ctx, min_complete);
d487b43c
PB
12148 }
12149 mutex_unlock(&ctx->uring_lock);
def596e9 12150 } else {
f81440d3
PB
12151 const sigset_t __user *sig;
12152 struct __kernel_timespec __user *ts;
12153
3e813c90
DY
12154 ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
12155 if (likely(!ret2)) {
12156 min_complete = min(min_complete,
12157 ctx->cq_entries);
12158 ret2 = io_cqring_wait(ctx, min_complete, sig,
12159 argsz, ts);
12160 }
def596e9 12161 }
c73ebb68 12162
155bc950 12163 if (!ret) {
3e813c90 12164 ret = ret2;
2b188cc1 12165
155bc950
DY
12166 /*
12167 * EBADR indicates that one or more CQE were dropped.
12168 * Once the user has been informed we can clear the bit
12169 * as they are obviously ok with those drops.
12170 */
12171 if (unlikely(ret2 == -EBADR))
12172 clear_bit(IO_CHECK_CQ_DROPPED_BIT,
12173 &ctx->check_cq);
def596e9 12174 }
2b188cc1
JA
12175 }
12176
7c504e65 12177out:
6805b32e 12178 percpu_ref_put(&ctx->refs);
2b188cc1 12179out_fput:
4329490a 12180 fdput(f);
3e813c90 12181 return ret;
2b188cc1
JA
12182}
12183
bebdb65e 12184#ifdef CONFIG_PROC_FS
c072481d 12185static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
61cf9370 12186 const struct cred *cred)
87ce955b 12187{
87ce955b
JA
12188 struct user_namespace *uns = seq_user_ns(m);
12189 struct group_info *gi;
12190 kernel_cap_t cap;
12191 unsigned __capi;
12192 int g;
12193
12194 seq_printf(m, "%5d\n", id);
12195 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
12196 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
12197 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
12198 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
12199 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
12200 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
12201 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
12202 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
12203 seq_puts(m, "\n\tGroups:\t");
12204 gi = cred->group_info;
12205 for (g = 0; g < gi->ngroups; g++) {
12206 seq_put_decimal_ull(m, g ? " " : "",
12207 from_kgid_munged(uns, gi->gid[g]));
12208 }
12209 seq_puts(m, "\n\tCapEff:\t");
12210 cap = cred->cap_effective;
12211 CAP_FOR_EACH_U32(__capi)
12212 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
12213 seq_putc(m, '\n');
12214 return 0;
12215}
12216
c072481d
PB
12217static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
12218 struct seq_file *m)
87ce955b 12219{
dbbe9c64 12220 struct io_sq_data *sq = NULL;
83f84356
HX
12221 struct io_overflow_cqe *ocqe;
12222 struct io_rings *r = ctx->rings;
12223 unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
83f84356
HX
12224 unsigned int sq_head = READ_ONCE(r->sq.head);
12225 unsigned int sq_tail = READ_ONCE(r->sq.tail);
12226 unsigned int cq_head = READ_ONCE(r->cq.head);
12227 unsigned int cq_tail = READ_ONCE(r->cq.tail);
f9b3dfcc 12228 unsigned int cq_shift = 0;
f75d1183 12229 unsigned int sq_entries, cq_entries;
fad8e0de 12230 bool has_lock;
f9b3dfcc 12231 bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
83f84356
HX
12232 unsigned int i;
12233
f9b3dfcc
SR
12234 if (is_cqe32)
12235 cq_shift = 1;
12236
83f84356
HX
12237 /*
12238 * we may get imprecise sqe and cqe info if uring is actively running
12239 * since we get cached_sq_head and cached_cq_tail without uring_lock
12240 * and sq_tail and cq_head are changed by userspace. But it's ok since
12241 * we usually use these info when it is stuck.
12242 */
c0235652 12243 seq_printf(m, "SqMask:\t0x%x\n", sq_mask);
f75d1183
JA
12244 seq_printf(m, "SqHead:\t%u\n", sq_head);
12245 seq_printf(m, "SqTail:\t%u\n", sq_tail);
12246 seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head);
12247 seq_printf(m, "CqMask:\t0x%x\n", cq_mask);
12248 seq_printf(m, "CqHead:\t%u\n", cq_head);
12249 seq_printf(m, "CqTail:\t%u\n", cq_tail);
12250 seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
12251 seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head);
12252 sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
12253 for (i = 0; i < sq_entries; i++) {
12254 unsigned int entry = i + sq_head;
12255 unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
a1957780 12256 struct io_uring_sqe *sqe;
f75d1183
JA
12257
12258 if (sq_idx > sq_mask)
12259 continue;
12260 sqe = &ctx->sq_sqes[sq_idx];
12261 seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n",
12262 sq_idx, sqe->opcode, sqe->fd, sqe->flags,
12263 sqe->user_data);
83f84356 12264 }
f75d1183
JA
12265 seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
12266 cq_entries = min(cq_tail - cq_head, ctx->cq_entries);
12267 for (i = 0; i < cq_entries; i++) {
12268 unsigned int entry = i + cq_head;
f9b3dfcc 12269 struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift];
83f84356 12270
f9b3dfcc
SR
12271 if (!is_cqe32) {
12272 seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n",
f75d1183
JA
12273 entry & cq_mask, cqe->user_data, cqe->res,
12274 cqe->flags);
f9b3dfcc
SR
12275 } else {
12276 seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x, "
12277 "extra1:%llu, extra2:%llu\n",
12278 entry & cq_mask, cqe->user_data, cqe->res,
12279 cqe->flags, cqe->big_cqe[0], cqe->big_cqe[1]);
12280 }
83f84356 12281 }
87ce955b 12282
fad8e0de
JA
12283 /*
12284 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
12285 * since fdinfo case grabs it in the opposite direction of normal use
12286 * cases. If we fail to get the lock, we just don't iterate any
12287 * structures that could be going away outside the io_uring mutex.
12288 */
12289 has_lock = mutex_trylock(&ctx->uring_lock);
12290
5f3f26f9 12291 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
dbbe9c64 12292 sq = ctx->sq_data;
5f3f26f9
JA
12293 if (!sq->thread)
12294 sq = NULL;
12295 }
dbbe9c64
JQ
12296
12297 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
12298 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
87ce955b 12299 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
fad8e0de 12300 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
7b29f92d 12301 struct file *f = io_file_from_index(ctx, i);
87ce955b 12302
87ce955b
JA
12303 if (f)
12304 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
12305 else
12306 seq_printf(m, "%5u: <none>\n", i);
12307 }
12308 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
fad8e0de 12309 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
41edf1a5 12310 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
4751f53d 12311 unsigned int len = buf->ubuf_end - buf->ubuf;
87ce955b 12312
4751f53d 12313 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
87ce955b 12314 }
61cf9370
MWO
12315 if (has_lock && !xa_empty(&ctx->personalities)) {
12316 unsigned long index;
12317 const struct cred *cred;
12318
87ce955b 12319 seq_printf(m, "Personalities:\n");
61cf9370
MWO
12320 xa_for_each(&ctx->personalities, index, cred)
12321 io_uring_show_cred(m, index, cred);
87ce955b 12322 }
83f84356
HX
12323 if (has_lock)
12324 mutex_unlock(&ctx->uring_lock);
12325
12326 seq_puts(m, "PollList:\n");
79ebeaee 12327 spin_lock(&ctx->completion_lock);
d7718a9d
JA
12328 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
12329 struct hlist_head *list = &ctx->cancel_hash[i];
12330 struct io_kiocb *req;
12331
12332 hlist_for_each_entry(req, list, hash_node)
12333 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
7f62d40d 12334 task_work_pending(req->task));
d7718a9d 12335 }
83f84356
HX
12336
12337 seq_puts(m, "CqOverflowList:\n");
12338 list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
12339 struct io_uring_cqe *cqe = &ocqe->cqe;
12340
12341 seq_printf(m, " user_data=%llu, res=%d, flags=%x\n",
12342 cqe->user_data, cqe->res, cqe->flags);
12343
12344 }
12345
79ebeaee 12346 spin_unlock(&ctx->completion_lock);
87ce955b
JA
12347}
12348
c072481d 12349static __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
87ce955b
JA
12350{
12351 struct io_ring_ctx *ctx = f->private_data;
12352
12353 if (percpu_ref_tryget(&ctx->refs)) {
12354 __io_uring_show_fdinfo(ctx, m);
12355 percpu_ref_put(&ctx->refs);
12356 }
12357}
bebdb65e 12358#endif
87ce955b 12359
2b188cc1
JA
12360static const struct file_operations io_uring_fops = {
12361 .release = io_uring_release,
12362 .mmap = io_uring_mmap,
6c5c240e
RP
12363#ifndef CONFIG_MMU
12364 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
12365 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
12366#endif
2b188cc1 12367 .poll = io_uring_poll,
bebdb65e 12368#ifdef CONFIG_PROC_FS
87ce955b 12369 .show_fdinfo = io_uring_show_fdinfo,
bebdb65e 12370#endif
2b188cc1
JA
12371};
12372
c072481d
PB
12373static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
12374 struct io_uring_params *p)
2b188cc1 12375{
75b28aff
HV
12376 struct io_rings *rings;
12377 size_t size, sq_array_offset;
2b188cc1 12378
bd740481
JA
12379 /* make sure these are sane, as we already accounted them */
12380 ctx->sq_entries = p->sq_entries;
12381 ctx->cq_entries = p->cq_entries;
12382
baf9cb64 12383 size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset);
75b28aff
HV
12384 if (size == SIZE_MAX)
12385 return -EOVERFLOW;
12386
12387 rings = io_mem_alloc(size);
12388 if (!rings)
2b188cc1
JA
12389 return -ENOMEM;
12390
75b28aff
HV
12391 ctx->rings = rings;
12392 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
12393 rings->sq_ring_mask = p->sq_entries - 1;
12394 rings->cq_ring_mask = p->cq_entries - 1;
12395 rings->sq_ring_entries = p->sq_entries;
12396 rings->cq_ring_entries = p->cq_entries;
2b188cc1 12397
ebdeb7c0
JA
12398 if (p->flags & IORING_SETUP_SQE128)
12399 size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries);
12400 else
12401 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
12402 if (size == SIZE_MAX) {
12403 io_mem_free(ctx->rings);
12404 ctx->rings = NULL;
2b188cc1 12405 return -EOVERFLOW;
eb065d30 12406 }
2b188cc1
JA
12407
12408 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
12409 if (!ctx->sq_sqes) {
12410 io_mem_free(ctx->rings);
12411 ctx->rings = NULL;
2b188cc1 12412 return -ENOMEM;
eb065d30 12413 }
2b188cc1 12414
2b188cc1
JA
12415 return 0;
12416}
12417
9faadcc8
PB
12418static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
12419{
12420 int ret, fd;
12421
12422 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
12423 if (fd < 0)
12424 return fd;
12425
eef51daa 12426 ret = io_uring_add_tctx_node(ctx);
9faadcc8
PB
12427 if (ret) {
12428 put_unused_fd(fd);
12429 return ret;
12430 }
12431 fd_install(fd, file);
12432 return fd;
12433}
12434
2b188cc1
JA
12435/*
12436 * Allocate an anonymous fd, this is what constitutes the application
12437 * visible backing of an io_uring instance. The application mmaps this
12438 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
12439 * we have to tie this fd to a socket for file garbage collection purposes.
12440 */
9faadcc8 12441static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
2b188cc1
JA
12442{
12443 struct file *file;
9faadcc8 12444#if defined(CONFIG_UNIX)
2b188cc1
JA
12445 int ret;
12446
2b188cc1
JA
12447 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
12448 &ctx->ring_sock);
12449 if (ret)
9faadcc8 12450 return ERR_PTR(ret);
2b188cc1
JA
12451#endif
12452
91a9ab7c
PM
12453 file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
12454 O_RDWR | O_CLOEXEC, NULL);
2b188cc1 12455#if defined(CONFIG_UNIX)
9faadcc8
PB
12456 if (IS_ERR(file)) {
12457 sock_release(ctx->ring_sock);
12458 ctx->ring_sock = NULL;
12459 } else {
12460 ctx->ring_sock->file = file;
0f212204 12461 }
2b188cc1 12462#endif
9faadcc8 12463 return file;
2b188cc1
JA
12464}
12465
c072481d
PB
12466static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
12467 struct io_uring_params __user *params)
2b188cc1 12468{
2b188cc1 12469 struct io_ring_ctx *ctx;
9faadcc8 12470 struct file *file;
2b188cc1
JA
12471 int ret;
12472
8110c1a6 12473 if (!entries)
2b188cc1 12474 return -EINVAL;
8110c1a6
JA
12475 if (entries > IORING_MAX_ENTRIES) {
12476 if (!(p->flags & IORING_SETUP_CLAMP))
12477 return -EINVAL;
12478 entries = IORING_MAX_ENTRIES;
12479 }
2b188cc1
JA
12480
12481 /*
12482 * Use twice as many entries for the CQ ring. It's possible for the
12483 * application to drive a higher depth than the size of the SQ ring,
12484 * since the sqes are only used at submission time. This allows for
33a107f0
JA
12485 * some flexibility in overcommitting a bit. If the application has
12486 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
12487 * of CQ ring entries manually.
2b188cc1
JA
12488 */
12489 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
12490 if (p->flags & IORING_SETUP_CQSIZE) {
12491 /*
12492 * If IORING_SETUP_CQSIZE is set, we do the same roundup
12493 * to a power-of-two, if it isn't already. We do NOT impose
12494 * any cq vs sq ring sizing.
12495 */
eb2667b3 12496 if (!p->cq_entries)
33a107f0 12497 return -EINVAL;
8110c1a6
JA
12498 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
12499 if (!(p->flags & IORING_SETUP_CLAMP))
12500 return -EINVAL;
12501 p->cq_entries = IORING_MAX_CQ_ENTRIES;
12502 }
eb2667b3
JQ
12503 p->cq_entries = roundup_pow_of_two(p->cq_entries);
12504 if (p->cq_entries < p->sq_entries)
12505 return -EINVAL;
33a107f0
JA
12506 } else {
12507 p->cq_entries = 2 * p->sq_entries;
12508 }
2b188cc1 12509
2b188cc1 12510 ctx = io_ring_ctx_alloc(p);
62e398be 12511 if (!ctx)
2b188cc1 12512 return -ENOMEM;
773697b6
PB
12513
12514 /*
12515 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
12516 * space applications don't need to do io completion events
12517 * polling again, they can rely on io_sq_thread to do polling
12518 * work, which can reduce cpu usage and uring_lock contention.
12519 */
12520 if (ctx->flags & IORING_SETUP_IOPOLL &&
12521 !(ctx->flags & IORING_SETUP_SQPOLL))
12522 ctx->syscall_iopoll = 1;
12523
2b188cc1 12524 ctx->compat = in_compat_syscall();
62e398be
JA
12525 if (!capable(CAP_IPC_LOCK))
12526 ctx->user = get_uid(current_user());
2aede0e4 12527
9f010507 12528 /*
e1169f06
JA
12529 * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
12530 * COOP_TASKRUN is set, then IPIs are never needed by the app.
9f010507 12531 */
e1169f06
JA
12532 ret = -EINVAL;
12533 if (ctx->flags & IORING_SETUP_SQPOLL) {
12534 /* IPI related flags don't make sense with SQPOLL */
ef060ea9
JA
12535 if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
12536 IORING_SETUP_TASKRUN_FLAG))
e1169f06 12537 goto err;
9f010507 12538 ctx->notify_method = TWA_SIGNAL_NO_IPI;
e1169f06
JA
12539 } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
12540 ctx->notify_method = TWA_SIGNAL_NO_IPI;
12541 } else {
ef060ea9
JA
12542 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
12543 goto err;
9f010507 12544 ctx->notify_method = TWA_SIGNAL;
e1169f06 12545 }
9f010507 12546
2aede0e4
JA
12547 /*
12548 * This is just grabbed for accounting purposes. When a process exits,
12549 * the mm is exited and dropped before the files, hence we need to hang
12550 * on to this mm purely for the purposes of being able to unaccount
12551 * memory (locked/pinned vm). It's not used for anything else.
12552 */
6b7898eb 12553 mmgrab(current->mm);
2aede0e4 12554 ctx->mm_account = current->mm;
6b7898eb 12555
2b188cc1
JA
12556 ret = io_allocate_scq_urings(ctx, p);
12557 if (ret)
12558 goto err;
12559
7e84e1c7 12560 ret = io_sq_offload_create(ctx, p);
2b188cc1
JA
12561 if (ret)
12562 goto err;
eae071c9 12563 /* always set a rsrc node */
47b228ce
PB
12564 ret = io_rsrc_node_switch_start(ctx);
12565 if (ret)
12566 goto err;
eae071c9 12567 io_rsrc_node_switch(ctx, NULL);
2b188cc1 12568
2b188cc1 12569 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
12570 p->sq_off.head = offsetof(struct io_rings, sq.head);
12571 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
12572 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
12573 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
12574 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
12575 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
12576 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
12577
12578 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
12579 p->cq_off.head = offsetof(struct io_rings, cq.head);
12580 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
12581 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
12582 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
12583 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
12584 p->cq_off.cqes = offsetof(struct io_rings, cqes);
0d9b5b3a 12585 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
ac90f249 12586
7f13657d
XW
12587 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
12588 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
5769a351 12589 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
c73ebb68 12590 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
9690557e 12591 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
c4212f3e
JA
12592 IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
12593 IORING_FEAT_LINKED_FILE;
7f13657d
XW
12594
12595 if (copy_to_user(params, p, sizeof(*p))) {
12596 ret = -EFAULT;
12597 goto err;
12598 }
d1719f70 12599
9faadcc8
PB
12600 file = io_uring_get_file(ctx);
12601 if (IS_ERR(file)) {
12602 ret = PTR_ERR(file);
12603 goto err;
12604 }
12605
044c1ab3
JA
12606 /*
12607 * Install ring fd as the very last thing, so we don't risk someone
12608 * having closed it before we finish setup
12609 */
9faadcc8
PB
12610 ret = io_uring_install_fd(ctx, file);
12611 if (ret < 0) {
12612 /* fput will clean it up */
12613 fput(file);
12614 return ret;
12615 }
044c1ab3 12616
c826bd7a 12617 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
12618 return ret;
12619err:
12620 io_ring_ctx_wait_and_kill(ctx);
12621 return ret;
12622}
12623
12624/*
12625 * Sets up an aio uring context, and returns the fd. Applications asks for a
12626 * ring size, we return the actual sq/cq ring sizes (among other things) in the
12627 * params structure passed in.
12628 */
12629static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
12630{
12631 struct io_uring_params p;
2b188cc1
JA
12632 int i;
12633
12634 if (copy_from_user(&p, params, sizeof(p)))
12635 return -EFAULT;
12636 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
12637 if (p.resv[i])
12638 return -EINVAL;
12639 }
12640
6c271ce2 12641 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 12642 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
7e84e1c7 12643 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
e1169f06 12644 IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
ebdeb7c0 12645 IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
76c68fbf 12646 IORING_SETUP_SQE128 | IORING_SETUP_CQE32))
2b188cc1
JA
12647 return -EINVAL;
12648
ef060ea9 12649 return io_uring_create(entries, &p, params);
2b188cc1
JA
12650}
12651
12652SYSCALL_DEFINE2(io_uring_setup, u32, entries,
12653 struct io_uring_params __user *, params)
12654{
12655 return io_uring_setup(entries, params);
12656}
12657
c072481d
PB
12658static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
12659 unsigned nr_args)
66f4af93
JA
12660{
12661 struct io_uring_probe *p;
12662 size_t size;
12663 int i, ret;
12664
12665 size = struct_size(p, ops, nr_args);
12666 if (size == SIZE_MAX)
12667 return -EOVERFLOW;
12668 p = kzalloc(size, GFP_KERNEL);
12669 if (!p)
12670 return -ENOMEM;
12671
12672 ret = -EFAULT;
12673 if (copy_from_user(p, arg, size))
12674 goto out;
12675 ret = -EINVAL;
12676 if (memchr_inv(p, 0, size))
12677 goto out;
12678
12679 p->last_op = IORING_OP_LAST - 1;
12680 if (nr_args > IORING_OP_LAST)
12681 nr_args = IORING_OP_LAST;
12682
12683 for (i = 0; i < nr_args; i++) {
12684 p->ops[i].op = i;
12685 if (!io_op_defs[i].not_supported)
12686 p->ops[i].flags = IO_URING_OP_SUPPORTED;
12687 }
12688 p->ops_len = i;
12689
12690 ret = 0;
12691 if (copy_to_user(arg, p, size))
12692 ret = -EFAULT;
12693out:
12694 kfree(p);
12695 return ret;
12696}
12697
071698e1
JA
12698static int io_register_personality(struct io_ring_ctx *ctx)
12699{
4379bf8b 12700 const struct cred *creds;
61cf9370 12701 u32 id;
1e6fa521 12702 int ret;
071698e1 12703
4379bf8b 12704 creds = get_current_cred();
1e6fa521 12705
61cf9370
MWO
12706 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
12707 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
a30f895a
JA
12708 if (ret < 0) {
12709 put_cred(creds);
12710 return ret;
12711 }
12712 return id;
071698e1
JA
12713}
12714
c072481d
PB
12715static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
12716 void __user *arg, unsigned int nr_args)
21b55dbc
SG
12717{
12718 struct io_uring_restriction *res;
12719 size_t size;
12720 int i, ret;
12721
7e84e1c7
SG
12722 /* Restrictions allowed only if rings started disabled */
12723 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
12724 return -EBADFD;
12725
21b55dbc 12726 /* We allow only a single restrictions registration */
7e84e1c7 12727 if (ctx->restrictions.registered)
21b55dbc
SG
12728 return -EBUSY;
12729
12730 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
12731 return -EINVAL;
12732
12733 size = array_size(nr_args, sizeof(*res));
12734 if (size == SIZE_MAX)
12735 return -EOVERFLOW;
12736
12737 res = memdup_user(arg, size);
12738 if (IS_ERR(res))
12739 return PTR_ERR(res);
12740
12741 ret = 0;
12742
12743 for (i = 0; i < nr_args; i++) {
12744 switch (res[i].opcode) {
12745 case IORING_RESTRICTION_REGISTER_OP:
12746 if (res[i].register_op >= IORING_REGISTER_LAST) {
12747 ret = -EINVAL;
12748 goto out;
12749 }
12750
12751 __set_bit(res[i].register_op,
12752 ctx->restrictions.register_op);
12753 break;
12754 case IORING_RESTRICTION_SQE_OP:
12755 if (res[i].sqe_op >= IORING_OP_LAST) {
12756 ret = -EINVAL;
12757 goto out;
12758 }
12759
12760 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
12761 break;
12762 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
12763 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
12764 break;
12765 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
12766 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
12767 break;
12768 default:
12769 ret = -EINVAL;
12770 goto out;
12771 }
12772 }
12773
12774out:
12775 /* Reset all restrictions if an error happened */
12776 if (ret != 0)
12777 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
12778 else
7e84e1c7 12779 ctx->restrictions.registered = true;
21b55dbc
SG
12780
12781 kfree(res);
12782 return ret;
12783}
12784
7e84e1c7
SG
12785static int io_register_enable_rings(struct io_ring_ctx *ctx)
12786{
12787 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
12788 return -EBADFD;
12789
12790 if (ctx->restrictions.registered)
12791 ctx->restricted = 1;
12792
0298ef96
PB
12793 ctx->flags &= ~IORING_SETUP_R_DISABLED;
12794 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
12795 wake_up(&ctx->sq_data->wait);
7e84e1c7
SG
12796 return 0;
12797}
12798
fdecb662 12799static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
c3bdad02 12800 struct io_uring_rsrc_update2 *up,
98f0b3b4
PB
12801 unsigned nr_args)
12802{
12803 __u32 tmp;
12804 int err;
12805
12806 if (check_add_overflow(up->offset, nr_args, &tmp))
12807 return -EOVERFLOW;
12808 err = io_rsrc_node_switch_start(ctx);
12809 if (err)
12810 return err;
12811
fdecb662
PB
12812 switch (type) {
12813 case IORING_RSRC_FILE:
98f0b3b4 12814 return __io_sqe_files_update(ctx, up, nr_args);
634d00df
PB
12815 case IORING_RSRC_BUFFER:
12816 return __io_sqe_buffers_update(ctx, up, nr_args);
98f0b3b4
PB
12817 }
12818 return -EINVAL;
12819}
12820
c3bdad02
PB
12821static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
12822 unsigned nr_args)
98f0b3b4 12823{
c3bdad02 12824 struct io_uring_rsrc_update2 up;
98f0b3b4
PB
12825
12826 if (!nr_args)
12827 return -EINVAL;
c3bdad02
PB
12828 memset(&up, 0, sizeof(up));
12829 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
12830 return -EFAULT;
d8a3ba9c 12831 if (up.resv || up.resv2)
565c5e61 12832 return -EINVAL;
c3bdad02
PB
12833 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
12834}
12835
12836static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
992da01a 12837 unsigned size, unsigned type)
c3bdad02
PB
12838{
12839 struct io_uring_rsrc_update2 up;
12840
12841 if (size != sizeof(up))
12842 return -EINVAL;
98f0b3b4
PB
12843 if (copy_from_user(&up, arg, sizeof(up)))
12844 return -EFAULT;
d8a3ba9c 12845 if (!up.nr || up.resv || up.resv2)
98f0b3b4 12846 return -EINVAL;
992da01a 12847 return __io_register_rsrc_update(ctx, type, &up, up.nr);
98f0b3b4
PB
12848}
12849
c072481d 12850static __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
992da01a 12851 unsigned int size, unsigned int type)
792e3582
PB
12852{
12853 struct io_uring_rsrc_register rr;
12854
12855 /* keep it extendible */
12856 if (size != sizeof(rr))
12857 return -EINVAL;
12858
12859 memset(&rr, 0, sizeof(rr));
12860 if (copy_from_user(&rr, arg, size))
12861 return -EFAULT;
a8da73a3
JA
12862 if (!rr.nr || rr.resv2)
12863 return -EINVAL;
12864 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
792e3582
PB
12865 return -EINVAL;
12866
992da01a 12867 switch (type) {
792e3582 12868 case IORING_RSRC_FILE:
a8da73a3
JA
12869 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
12870 break;
792e3582
PB
12871 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
12872 rr.nr, u64_to_user_ptr(rr.tags));
634d00df 12873 case IORING_RSRC_BUFFER:
0184f08e 12874 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
a8da73a3 12875 break;
634d00df
PB
12876 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
12877 rr.nr, u64_to_user_ptr(rr.tags));
792e3582
PB
12878 }
12879 return -EINVAL;
12880}
12881
c072481d
PB
12882static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
12883 void __user *arg, unsigned len)
fe76421d
JA
12884{
12885 struct io_uring_task *tctx = current->io_uring;
12886 cpumask_var_t new_mask;
12887 int ret;
12888
12889 if (!tctx || !tctx->io_wq)
12890 return -EINVAL;
12891
12892 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
12893 return -ENOMEM;
12894
12895 cpumask_clear(new_mask);
12896 if (len > cpumask_size())
12897 len = cpumask_size();
12898
0f5e4b83
ES
12899 if (in_compat_syscall()) {
12900 ret = compat_get_bitmap(cpumask_bits(new_mask),
12901 (const compat_ulong_t __user *)arg,
12902 len * 8 /* CHAR_BIT */);
12903 } else {
12904 ret = copy_from_user(new_mask, arg, len);
12905 }
12906
12907 if (ret) {
fe76421d
JA
12908 free_cpumask_var(new_mask);
12909 return -EFAULT;
12910 }
12911
12912 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
12913 free_cpumask_var(new_mask);
12914 return ret;
12915}
12916
c072481d 12917static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
fe76421d
JA
12918{
12919 struct io_uring_task *tctx = current->io_uring;
12920
12921 if (!tctx || !tctx->io_wq)
12922 return -EINVAL;
12923
12924 return io_wq_cpu_affinity(tctx->io_wq, NULL);
12925}
12926
c072481d
PB
12927static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
12928 void __user *arg)
b22fa62a 12929 __must_hold(&ctx->uring_lock)
2e480058 12930{
b22fa62a 12931 struct io_tctx_node *node;
fa84693b
JA
12932 struct io_uring_task *tctx = NULL;
12933 struct io_sq_data *sqd = NULL;
2e480058
JA
12934 __u32 new_count[2];
12935 int i, ret;
12936
2e480058
JA
12937 if (copy_from_user(new_count, arg, sizeof(new_count)))
12938 return -EFAULT;
12939 for (i = 0; i < ARRAY_SIZE(new_count); i++)
12940 if (new_count[i] > INT_MAX)
12941 return -EINVAL;
12942
fa84693b
JA
12943 if (ctx->flags & IORING_SETUP_SQPOLL) {
12944 sqd = ctx->sq_data;
12945 if (sqd) {
009ad9f0
JA
12946 /*
12947 * Observe the correct sqd->lock -> ctx->uring_lock
12948 * ordering. Fine to drop uring_lock here, we hold
12949 * a ref to the ctx.
12950 */
41d3a6bd 12951 refcount_inc(&sqd->refs);
009ad9f0 12952 mutex_unlock(&ctx->uring_lock);
fa84693b 12953 mutex_lock(&sqd->lock);
009ad9f0 12954 mutex_lock(&ctx->uring_lock);
41d3a6bd
JA
12955 if (sqd->thread)
12956 tctx = sqd->thread->io_uring;
fa84693b
JA
12957 }
12958 } else {
12959 tctx = current->io_uring;
12960 }
12961
e139a1ec 12962 BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
fa84693b 12963
bad119b9
PB
12964 for (i = 0; i < ARRAY_SIZE(new_count); i++)
12965 if (new_count[i])
12966 ctx->iowq_limits[i] = new_count[i];
e139a1ec
PB
12967 ctx->iowq_limits_set = true;
12968
e139a1ec
PB
12969 if (tctx && tctx->io_wq) {
12970 ret = io_wq_max_workers(tctx->io_wq, new_count);
12971 if (ret)
12972 goto err;
12973 } else {
12974 memset(new_count, 0, sizeof(new_count));
12975 }
fa84693b 12976
41d3a6bd 12977 if (sqd) {
fa84693b 12978 mutex_unlock(&sqd->lock);
41d3a6bd
JA
12979 io_put_sq_data(sqd);
12980 }
2e480058
JA
12981
12982 if (copy_to_user(arg, new_count, sizeof(new_count)))
12983 return -EFAULT;
12984
b22fa62a
PB
12985 /* that's it for SQPOLL, only the SQPOLL task creates requests */
12986 if (sqd)
12987 return 0;
12988
12989 /* now propagate the restriction to all registered users */
12990 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
12991 struct io_uring_task *tctx = node->task->io_uring;
12992
12993 if (WARN_ON_ONCE(!tctx->io_wq))
12994 continue;
12995
12996 for (i = 0; i < ARRAY_SIZE(new_count); i++)
12997 new_count[i] = ctx->iowq_limits[i];
12998 /* ignore errors, it always returns zero anyway */
12999 (void)io_wq_max_workers(tctx->io_wq, new_count);
13000 }
2e480058 13001 return 0;
fa84693b 13002err:
41d3a6bd 13003 if (sqd) {
fa84693b 13004 mutex_unlock(&sqd->lock);
41d3a6bd
JA
13005 io_put_sq_data(sqd);
13006 }
fa84693b 13007 return ret;
2e480058
JA
13008}
13009
c7fb1942
JA
13010static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
13011{
13012 struct io_uring_buf_ring *br;
13013 struct io_uring_buf_reg reg;
13014 struct io_buffer_list *bl;
13015 struct page **pages;
13016 int nr_pages;
13017
13018 if (copy_from_user(&reg, arg, sizeof(reg)))
13019 return -EFAULT;
13020
13021 if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2])
13022 return -EINVAL;
13023 if (!reg.ring_addr)
13024 return -EFAULT;
13025 if (reg.ring_addr & ~PAGE_MASK)
13026 return -EINVAL;
13027 if (!is_power_of_2(reg.ring_entries))
13028 return -EINVAL;
13029
f9437ac0
DY
13030 /* cannot disambiguate full vs empty due to head/tail size */
13031 if (reg.ring_entries >= 65536)
13032 return -EINVAL;
13033
c7fb1942
JA
13034 if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
13035 int ret = io_init_bl_list(ctx);
13036 if (ret)
13037 return ret;
13038 }
13039
13040 bl = io_buffer_get_list(ctx, reg.bgid);
2fcabce2
JA
13041 if (bl) {
13042 /* if mapped buffer ring OR classic exists, don't allow */
13043 if (bl->buf_nr_pages || !list_empty(&bl->buf_list))
13044 return -EEXIST;
13045 } else {
c7fb1942
JA
13046 bl = kzalloc(sizeof(*bl), GFP_KERNEL);
13047 if (!bl)
13048 return -ENOMEM;
13049 }
13050
13051 pages = io_pin_pages(reg.ring_addr,
13052 struct_size(br, bufs, reg.ring_entries),
13053 &nr_pages);
13054 if (IS_ERR(pages)) {
13055 kfree(bl);
13056 return PTR_ERR(pages);
13057 }
13058
13059 br = page_address(pages[0]);
13060 bl->buf_pages = pages;
13061 bl->buf_nr_pages = nr_pages;
13062 bl->nr_entries = reg.ring_entries;
13063 bl->buf_ring = br;
13064 bl->mask = reg.ring_entries - 1;
13065 io_buffer_add_list(ctx, bl, reg.bgid);
13066 return 0;
13067}
13068
13069static int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
13070{
13071 struct io_uring_buf_reg reg;
13072 struct io_buffer_list *bl;
13073
13074 if (copy_from_user(&reg, arg, sizeof(reg)))
13075 return -EFAULT;
13076 if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2])
13077 return -EINVAL;
13078
13079 bl = io_buffer_get_list(ctx, reg.bgid);
13080 if (!bl)
13081 return -ENOENT;
13082 if (!bl->buf_nr_pages)
13083 return -EINVAL;
13084
13085 __io_remove_buffers(ctx, bl, -1U);
13086 if (bl->bgid >= BGID_ARRAY) {
13087 xa_erase(&ctx->io_bl_xa, bl->bgid);
13088 kfree(bl);
13089 }
13090 return 0;
13091}
13092
edafccee
JA
13093static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
13094 void __user *arg, unsigned nr_args)
b19062a5
JA
13095 __releases(ctx->uring_lock)
13096 __acquires(ctx->uring_lock)
edafccee
JA
13097{
13098 int ret;
13099
35fa71a0
JA
13100 /*
13101 * We're inside the ring mutex, if the ref is already dying, then
13102 * someone else killed the ctx or is already going through
13103 * io_uring_register().
13104 */
13105 if (percpu_ref_is_dying(&ctx->refs))
13106 return -ENXIO;
13107
75c4021a
PB
13108 if (ctx->restricted) {
13109 if (opcode >= IORING_REGISTER_LAST)
13110 return -EINVAL;
13111 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
13112 if (!test_bit(opcode, ctx->restrictions.register_op))
13113 return -EACCES;
13114 }
13115
edafccee
JA
13116 switch (opcode) {
13117 case IORING_REGISTER_BUFFERS:
0184f08e
PB
13118 ret = -EFAULT;
13119 if (!arg)
13120 break;
634d00df 13121 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
edafccee
JA
13122 break;
13123 case IORING_UNREGISTER_BUFFERS:
13124 ret = -EINVAL;
13125 if (arg || nr_args)
13126 break;
0a96bbe4 13127 ret = io_sqe_buffers_unregister(ctx);
edafccee 13128 break;
6b06314c 13129 case IORING_REGISTER_FILES:
a8da73a3
JA
13130 ret = -EFAULT;
13131 if (!arg)
13132 break;
792e3582 13133 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
6b06314c
JA
13134 break;
13135 case IORING_UNREGISTER_FILES:
13136 ret = -EINVAL;
13137 if (arg || nr_args)
13138 break;
13139 ret = io_sqe_files_unregister(ctx);
13140 break;
c3a31e60 13141 case IORING_REGISTER_FILES_UPDATE:
c3bdad02 13142 ret = io_register_files_update(ctx, arg, nr_args);
c3a31e60 13143 break;
9b402849
JA
13144 case IORING_REGISTER_EVENTFD:
13145 ret = -EINVAL;
13146 if (nr_args != 1)
13147 break;
c75312dd
UA
13148 ret = io_eventfd_register(ctx, arg, 0);
13149 break;
13150 case IORING_REGISTER_EVENTFD_ASYNC:
13151 ret = -EINVAL;
13152 if (nr_args != 1)
f2842ab5 13153 break;
c75312dd 13154 ret = io_eventfd_register(ctx, arg, 1);
9b402849
JA
13155 break;
13156 case IORING_UNREGISTER_EVENTFD:
13157 ret = -EINVAL;
13158 if (arg || nr_args)
13159 break;
13160 ret = io_eventfd_unregister(ctx);
13161 break;
66f4af93
JA
13162 case IORING_REGISTER_PROBE:
13163 ret = -EINVAL;
13164 if (!arg || nr_args > 256)
13165 break;
13166 ret = io_probe(ctx, arg, nr_args);
13167 break;
071698e1
JA
13168 case IORING_REGISTER_PERSONALITY:
13169 ret = -EINVAL;
13170 if (arg || nr_args)
13171 break;
13172 ret = io_register_personality(ctx);
13173 break;
13174 case IORING_UNREGISTER_PERSONALITY:
13175 ret = -EINVAL;
13176 if (arg)
13177 break;
13178 ret = io_unregister_personality(ctx, nr_args);
13179 break;
7e84e1c7
SG
13180 case IORING_REGISTER_ENABLE_RINGS:
13181 ret = -EINVAL;
13182 if (arg || nr_args)
13183 break;
13184 ret = io_register_enable_rings(ctx);
13185 break;
21b55dbc
SG
13186 case IORING_REGISTER_RESTRICTIONS:
13187 ret = io_register_restrictions(ctx, arg, nr_args);
13188 break;
992da01a
PB
13189 case IORING_REGISTER_FILES2:
13190 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
13191 break;
13192 case IORING_REGISTER_FILES_UPDATE2:
13193 ret = io_register_rsrc_update(ctx, arg, nr_args,
13194 IORING_RSRC_FILE);
13195 break;
13196 case IORING_REGISTER_BUFFERS2:
13197 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
792e3582 13198 break;
992da01a
PB
13199 case IORING_REGISTER_BUFFERS_UPDATE:
13200 ret = io_register_rsrc_update(ctx, arg, nr_args,
13201 IORING_RSRC_BUFFER);
c3bdad02 13202 break;
fe76421d
JA
13203 case IORING_REGISTER_IOWQ_AFF:
13204 ret = -EINVAL;
13205 if (!arg || !nr_args)
13206 break;
13207 ret = io_register_iowq_aff(ctx, arg, nr_args);
13208 break;
13209 case IORING_UNREGISTER_IOWQ_AFF:
13210 ret = -EINVAL;
13211 if (arg || nr_args)
13212 break;
13213 ret = io_unregister_iowq_aff(ctx);
13214 break;
2e480058
JA
13215 case IORING_REGISTER_IOWQ_MAX_WORKERS:
13216 ret = -EINVAL;
13217 if (!arg || nr_args != 2)
13218 break;
13219 ret = io_register_iowq_max_workers(ctx, arg);
13220 break;
e7a6c00d
JA
13221 case IORING_REGISTER_RING_FDS:
13222 ret = io_ringfd_register(ctx, arg, nr_args);
13223 break;
13224 case IORING_UNREGISTER_RING_FDS:
13225 ret = io_ringfd_unregister(ctx, arg, nr_args);
13226 break;
c7fb1942
JA
13227 case IORING_REGISTER_PBUF_RING:
13228 ret = -EINVAL;
13229 if (!arg || nr_args != 1)
13230 break;
13231 ret = io_register_pbuf_ring(ctx, arg);
13232 break;
13233 case IORING_UNREGISTER_PBUF_RING:
13234 ret = -EINVAL;
13235 if (!arg || nr_args != 1)
13236 break;
13237 ret = io_unregister_pbuf_ring(ctx, arg);
13238 break;
edafccee
JA
13239 default:
13240 ret = -EINVAL;
13241 break;
13242 }
13243
edafccee
JA
13244 return ret;
13245}
13246
13247SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
13248 void __user *, arg, unsigned int, nr_args)
13249{
13250 struct io_ring_ctx *ctx;
13251 long ret = -EBADF;
13252 struct fd f;
13253
13254 f = fdget(fd);
13255 if (!f.file)
13256 return -EBADF;
13257
13258 ret = -EOPNOTSUPP;
13259 if (f.file->f_op != &io_uring_fops)
13260 goto out_fput;
13261
13262 ctx = f.file->private_data;
13263
b6c23dd5
PB
13264 io_run_task_work();
13265
edafccee
JA
13266 mutex_lock(&ctx->uring_lock);
13267 ret = __io_uring_register(ctx, opcode, arg, nr_args);
13268 mutex_unlock(&ctx->uring_lock);
2757be22 13269 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
edafccee
JA
13270out_fput:
13271 fdput(f);
13272 return ret;
13273}
13274
2b188cc1
JA
13275static int __init io_uring_init(void)
13276{
d7f62e82
SM
13277#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
13278 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
13279 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
13280} while (0)
13281
13282#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
13283 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
13284 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
13285 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
13286 BUILD_BUG_SQE_ELEM(1, __u8, flags);
13287 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
13288 BUILD_BUG_SQE_ELEM(4, __s32, fd);
13289 BUILD_BUG_SQE_ELEM(8, __u64, off);
13290 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
13291 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7d67af2c 13292 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
d7f62e82
SM
13293 BUILD_BUG_SQE_ELEM(24, __u32, len);
13294 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
13295 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
13296 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
13297 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
5769a351
JX
13298 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
13299 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
d7f62e82
SM
13300 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
13301 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
13302 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
13303 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
13304 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
13305 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
13306 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
13307 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7d67af2c 13308 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
d7f62e82
SM
13309 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
13310 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
16340eab 13311 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
d7f62e82 13312 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7d67af2c 13313 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
b9445598 13314 BUILD_BUG_SQE_ELEM(44, __u32, file_index);
e9621e2b 13315 BUILD_BUG_SQE_ELEM(48, __u64, addr3);
d7f62e82 13316
b0d658ec
PB
13317 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
13318 sizeof(struct io_uring_rsrc_update));
13319 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
13320 sizeof(struct io_uring_rsrc_update2));
90499ad0
PB
13321
13322 /* ->buf_index is u16 */
13323 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
9cfc7e94 13324 BUILD_BUG_ON(BGID_ARRAY * sizeof(struct io_buffer_list) > PAGE_SIZE);
c7fb1942
JA
13325 BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0);
13326 BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) !=
13327 offsetof(struct io_uring_buf_ring, tail));
90499ad0 13328
b0d658ec
PB
13329 /* should fit into one byte */
13330 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
68fe256a
PB
13331 BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
13332 BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
b0d658ec 13333
d3656344 13334 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
32c2d33e 13335 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
16340eab 13336
3a4b89a2
JA
13337 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32));
13338
ee692a21
JA
13339 BUILD_BUG_ON(sizeof(struct io_uring_cmd) > 64);
13340
91f245d5
JA
13341 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
13342 SLAB_ACCOUNT);
2b188cc1
JA
13343 return 0;
13344};
13345__initcall(io_uring_init);