io_uring: lazy get task
[linux-2.6-block.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
52de1fe1 47#include <net/compat.h>
2b188cc1
JA
48#include <linux/refcount.h>
49#include <linux/uio.h>
6b47ee6e 50#include <linux/bits.h>
2b188cc1
JA
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
2b188cc1
JA
58#include <linux/percpu.h>
59#include <linux/slab.h>
6c271ce2 60#include <linux/kthread.h>
2b188cc1 61#include <linux/blkdev.h>
edafccee 62#include <linux/bvec.h>
2b188cc1
JA
63#include <linux/net.h>
64#include <net/sock.h>
65#include <net/af_unix.h>
6b06314c 66#include <net/scm.h>
2b188cc1
JA
67#include <linux/anon_inodes.h>
68#include <linux/sched/mm.h>
69#include <linux/uaccess.h>
70#include <linux/nospec.h>
edafccee
JA
71#include <linux/sizes.h>
72#include <linux/hugetlb.h>
aa4c3967 73#include <linux/highmem.h>
15b71abe
JA
74#include <linux/namei.h>
75#include <linux/fsnotify.h>
4840e418 76#include <linux/fadvise.h>
3e4827b0 77#include <linux/eventpoll.h>
ff002b30 78#include <linux/fs_struct.h>
7d67af2c 79#include <linux/splice.h>
b41e9852 80#include <linux/task_work.h>
2b188cc1 81
c826bd7a
DD
82#define CREATE_TRACE_POINTS
83#include <trace/events/io_uring.h>
84
2b188cc1
JA
85#include <uapi/linux/io_uring.h>
86
87#include "internal.h"
561fb04a 88#include "io-wq.h"
2b188cc1 89
5277deaa 90#define IORING_MAX_ENTRIES 32768
33a107f0 91#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
65e19f54
JA
92
93/*
94 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
95 */
96#define IORING_FILE_TABLE_SHIFT 9
97#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
98#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
99#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
2b188cc1
JA
100
101struct io_uring {
102 u32 head ____cacheline_aligned_in_smp;
103 u32 tail ____cacheline_aligned_in_smp;
104};
105
1e84b97b 106/*
75b28aff
HV
107 * This data is shared with the application through the mmap at offsets
108 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
109 *
110 * The offsets to the member fields are published through struct
111 * io_sqring_offsets when calling io_uring_setup.
112 */
75b28aff 113struct io_rings {
1e84b97b
SB
114 /*
115 * Head and tail offsets into the ring; the offsets need to be
116 * masked to get valid indices.
117 *
75b28aff
HV
118 * The kernel controls head of the sq ring and the tail of the cq ring,
119 * and the application controls tail of the sq ring and the head of the
120 * cq ring.
1e84b97b 121 */
75b28aff 122 struct io_uring sq, cq;
1e84b97b 123 /*
75b28aff 124 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
125 * ring_entries - 1)
126 */
75b28aff
HV
127 u32 sq_ring_mask, cq_ring_mask;
128 /* Ring sizes (constant, power of 2) */
129 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
130 /*
131 * Number of invalid entries dropped by the kernel due to
132 * invalid index stored in array
133 *
134 * Written by the kernel, shouldn't be modified by the
135 * application (i.e. get number of "new events" by comparing to
136 * cached value).
137 *
138 * After a new SQ head value was read by the application this
139 * counter includes all submissions that were dropped reaching
140 * the new SQ head (and possibly more).
141 */
75b28aff 142 u32 sq_dropped;
1e84b97b 143 /*
0d9b5b3a 144 * Runtime SQ flags
1e84b97b
SB
145 *
146 * Written by the kernel, shouldn't be modified by the
147 * application.
148 *
149 * The application needs a full memory barrier before checking
150 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
151 */
75b28aff 152 u32 sq_flags;
0d9b5b3a
SG
153 /*
154 * Runtime CQ flags
155 *
156 * Written by the application, shouldn't be modified by the
157 * kernel.
158 */
159 u32 cq_flags;
1e84b97b
SB
160 /*
161 * Number of completion events lost because the queue was full;
162 * this should be avoided by the application by making sure
0b4295b5 163 * there are not more requests pending than there is space in
1e84b97b
SB
164 * the completion queue.
165 *
166 * Written by the kernel, shouldn't be modified by the
167 * application (i.e. get number of "new events" by comparing to
168 * cached value).
169 *
170 * As completion events come in out of order this counter is not
171 * ordered with any other data.
172 */
75b28aff 173 u32 cq_overflow;
1e84b97b
SB
174 /*
175 * Ring buffer of completion events.
176 *
177 * The kernel writes completion events fresh every time they are
178 * produced, so the application is allowed to modify pending
179 * entries.
180 */
75b28aff 181 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
182};
183
edafccee
JA
184struct io_mapped_ubuf {
185 u64 ubuf;
186 size_t len;
187 struct bio_vec *bvec;
188 unsigned int nr_bvecs;
189};
190
65e19f54
JA
191struct fixed_file_table {
192 struct file **files;
31b51510
JA
193};
194
05589553
XW
195struct fixed_file_ref_node {
196 struct percpu_ref refs;
197 struct list_head node;
198 struct list_head file_list;
199 struct fixed_file_data *file_data;
4a38aed2 200 struct llist_node llist;
05589553
XW
201};
202
05f3fb3c
JA
203struct fixed_file_data {
204 struct fixed_file_table *table;
205 struct io_ring_ctx *ctx;
206
05589553 207 struct percpu_ref *cur_refs;
05f3fb3c 208 struct percpu_ref refs;
05f3fb3c 209 struct completion done;
05589553
XW
210 struct list_head ref_list;
211 spinlock_t lock;
05f3fb3c
JA
212};
213
5a2e745d
JA
214struct io_buffer {
215 struct list_head list;
216 __u64 addr;
217 __s32 len;
218 __u16 bid;
219};
220
2b188cc1
JA
221struct io_ring_ctx {
222 struct {
223 struct percpu_ref refs;
224 } ____cacheline_aligned_in_smp;
225
226 struct {
227 unsigned int flags;
e1d85334
RD
228 unsigned int compat: 1;
229 unsigned int account_mem: 1;
230 unsigned int cq_overflow_flushed: 1;
231 unsigned int drain_next: 1;
232 unsigned int eventfd_async: 1;
2b188cc1 233
75b28aff
HV
234 /*
235 * Ring buffer of indices into array of io_uring_sqe, which is
236 * mmapped by the application using the IORING_OFF_SQES offset.
237 *
238 * This indirection could e.g. be used to assign fixed
239 * io_uring_sqe entries to operations and only submit them to
240 * the queue when needed.
241 *
242 * The kernel modifies neither the indices array nor the entries
243 * array.
244 */
245 u32 *sq_array;
2b188cc1
JA
246 unsigned cached_sq_head;
247 unsigned sq_entries;
248 unsigned sq_mask;
6c271ce2 249 unsigned sq_thread_idle;
498ccd9e 250 unsigned cached_sq_dropped;
206aefde 251 atomic_t cached_cq_overflow;
ad3eb2c8 252 unsigned long sq_check_overflow;
de0617e4
JA
253
254 struct list_head defer_list;
5262f567 255 struct list_head timeout_list;
1d7bb1d5 256 struct list_head cq_overflow_list;
fcb323cc
JA
257
258 wait_queue_head_t inflight_wait;
ad3eb2c8 259 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
260 } ____cacheline_aligned_in_smp;
261
206aefde
JA
262 struct io_rings *rings;
263
2b188cc1 264 /* IO offload */
561fb04a 265 struct io_wq *io_wq;
6c271ce2 266 struct task_struct *sqo_thread; /* if using sq thread polling */
2b188cc1 267 struct mm_struct *sqo_mm;
6c271ce2 268 wait_queue_head_t sqo_wait;
75b28aff 269
6b06314c
JA
270 /*
271 * If used, fixed file set. Writers must ensure that ->refs is dead,
272 * readers must ensure that ->refs is alive as long as the file* is
273 * used. Only updated through io_uring_register(2).
274 */
05f3fb3c 275 struct fixed_file_data *file_data;
6b06314c 276 unsigned nr_user_files;
b14cca0c
PB
277 int ring_fd;
278 struct file *ring_file;
6b06314c 279
edafccee
JA
280 /* if used, fixed mapped user buffers */
281 unsigned nr_user_bufs;
282 struct io_mapped_ubuf *user_bufs;
283
2b188cc1
JA
284 struct user_struct *user;
285
0b8c0ec7 286 const struct cred *creds;
181e448d 287
0f158b4c
JA
288 struct completion ref_comp;
289 struct completion sq_thread_comp;
206aefde 290
0ddf92e8
JA
291 /* if all else fails... */
292 struct io_kiocb *fallback_req;
293
206aefde
JA
294#if defined(CONFIG_UNIX)
295 struct socket *ring_sock;
296#endif
297
5a2e745d
JA
298 struct idr io_buffer_idr;
299
071698e1
JA
300 struct idr personality_idr;
301
206aefde
JA
302 struct {
303 unsigned cached_cq_tail;
304 unsigned cq_entries;
305 unsigned cq_mask;
306 atomic_t cq_timeouts;
ad3eb2c8 307 unsigned long cq_check_overflow;
206aefde
JA
308 struct wait_queue_head cq_wait;
309 struct fasync_struct *cq_fasync;
310 struct eventfd_ctx *cq_ev_fd;
311 } ____cacheline_aligned_in_smp;
2b188cc1
JA
312
313 struct {
314 struct mutex uring_lock;
315 wait_queue_head_t wait;
316 } ____cacheline_aligned_in_smp;
317
318 struct {
319 spinlock_t completion_lock;
e94f141b 320
def596e9
JA
321 /*
322 * ->poll_list is protected by the ctx->uring_lock for
323 * io_uring instances that don't use IORING_SETUP_SQPOLL.
324 * For SQPOLL, only the single threaded io_sq_thread() will
325 * manipulate the list, hence no extra locking is needed there.
326 */
327 struct list_head poll_list;
78076bb6
JA
328 struct hlist_head *cancel_hash;
329 unsigned cancel_hash_bits;
e94f141b 330 bool poll_multi_file;
31b51510 331
fcb323cc
JA
332 spinlock_t inflight_lock;
333 struct list_head inflight_list;
2b188cc1 334 } ____cacheline_aligned_in_smp;
85faa7b8 335
4a38aed2
JA
336 struct delayed_work file_put_work;
337 struct llist_head file_put_llist;
338
85faa7b8 339 struct work_struct exit_work;
2b188cc1
JA
340};
341
09bb8394
JA
342/*
343 * First field must be the file pointer in all the
344 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
345 */
221c5eb2
JA
346struct io_poll_iocb {
347 struct file *file;
0969e783
JA
348 union {
349 struct wait_queue_head *head;
350 u64 addr;
351 };
221c5eb2 352 __poll_t events;
8c838788 353 bool done;
221c5eb2 354 bool canceled;
392edb45 355 struct wait_queue_entry wait;
221c5eb2
JA
356};
357
b5dba59e
JA
358struct io_close {
359 struct file *file;
360 struct file *put_file;
361 int fd;
362};
363
ad8a48ac
JA
364struct io_timeout_data {
365 struct io_kiocb *req;
366 struct hrtimer timer;
367 struct timespec64 ts;
368 enum hrtimer_mode mode;
369};
370
8ed8d3c3
JA
371struct io_accept {
372 struct file *file;
373 struct sockaddr __user *addr;
374 int __user *addr_len;
375 int flags;
09952e3e 376 unsigned long nofile;
8ed8d3c3
JA
377};
378
379struct io_sync {
380 struct file *file;
381 loff_t len;
382 loff_t off;
383 int flags;
d63d1b5e 384 int mode;
8ed8d3c3
JA
385};
386
fbf23849
JA
387struct io_cancel {
388 struct file *file;
389 u64 addr;
390};
391
b29472ee
JA
392struct io_timeout {
393 struct file *file;
394 u64 addr;
395 int flags;
bfe68a22
PB
396 u32 off;
397 u32 target_seq;
b29472ee
JA
398};
399
9adbd45d
JA
400struct io_rw {
401 /* NOTE: kiocb has the file as the first member, so don't do it here */
402 struct kiocb kiocb;
403 u64 addr;
404 u64 len;
405};
406
3fbb51c1
JA
407struct io_connect {
408 struct file *file;
409 struct sockaddr __user *addr;
410 int addr_len;
411};
412
e47293fd
JA
413struct io_sr_msg {
414 struct file *file;
fddaface
JA
415 union {
416 struct user_msghdr __user *msg;
417 void __user *buf;
418 };
e47293fd 419 int msg_flags;
bcda7baa 420 int bgid;
fddaface 421 size_t len;
bcda7baa 422 struct io_buffer *kbuf;
e47293fd
JA
423};
424
15b71abe
JA
425struct io_open {
426 struct file *file;
427 int dfd;
15b71abe 428 struct filename *filename;
c12cedf2 429 struct open_how how;
4022e7af 430 unsigned long nofile;
15b71abe
JA
431};
432
05f3fb3c
JA
433struct io_files_update {
434 struct file *file;
435 u64 arg;
436 u32 nr_args;
437 u32 offset;
438};
439
4840e418
JA
440struct io_fadvise {
441 struct file *file;
442 u64 offset;
443 u32 len;
444 u32 advice;
445};
446
c1ca757b
JA
447struct io_madvise {
448 struct file *file;
449 u64 addr;
450 u32 len;
451 u32 advice;
452};
453
3e4827b0
JA
454struct io_epoll {
455 struct file *file;
456 int epfd;
457 int op;
458 int fd;
459 struct epoll_event event;
e47293fd
JA
460};
461
7d67af2c
PB
462struct io_splice {
463 struct file *file_out;
464 struct file *file_in;
465 loff_t off_out;
466 loff_t off_in;
467 u64 len;
468 unsigned int flags;
469};
470
ddf0322d
JA
471struct io_provide_buf {
472 struct file *file;
473 __u64 addr;
474 __s32 len;
475 __u32 bgid;
476 __u16 nbufs;
477 __u16 bid;
478};
479
1d9e1288
BM
480struct io_statx {
481 struct file *file;
482 int dfd;
483 unsigned int mask;
484 unsigned int flags;
e62753e4 485 const char __user *filename;
1d9e1288
BM
486 struct statx __user *buffer;
487};
488
f499a021
JA
489struct io_async_connect {
490 struct sockaddr_storage address;
491};
492
03b1230c
JA
493struct io_async_msghdr {
494 struct iovec fast_iov[UIO_FASTIOV];
495 struct iovec *iov;
496 struct sockaddr __user *uaddr;
497 struct msghdr msg;
b537916c 498 struct sockaddr_storage addr;
03b1230c
JA
499};
500
f67676d1
JA
501struct io_async_rw {
502 struct iovec fast_iov[UIO_FASTIOV];
503 struct iovec *iov;
504 ssize_t nr_segs;
505 ssize_t size;
506};
507
1a6b74fc 508struct io_async_ctx {
f67676d1
JA
509 union {
510 struct io_async_rw rw;
03b1230c 511 struct io_async_msghdr msg;
f499a021 512 struct io_async_connect connect;
2d28390a 513 struct io_timeout_data timeout;
f67676d1 514 };
1a6b74fc
JA
515};
516
6b47ee6e
PB
517enum {
518 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
519 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
520 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
521 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
522 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
bcda7baa 523 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
6b47ee6e 524
dea3b49c 525 REQ_F_LINK_HEAD_BIT,
6b47ee6e
PB
526 REQ_F_LINK_NEXT_BIT,
527 REQ_F_FAIL_LINK_BIT,
528 REQ_F_INFLIGHT_BIT,
529 REQ_F_CUR_POS_BIT,
530 REQ_F_NOWAIT_BIT,
6b47ee6e
PB
531 REQ_F_LINK_TIMEOUT_BIT,
532 REQ_F_TIMEOUT_BIT,
533 REQ_F_ISREG_BIT,
534 REQ_F_MUST_PUNT_BIT,
535 REQ_F_TIMEOUT_NOSEQ_BIT,
536 REQ_F_COMP_LOCKED_BIT,
99bc4c38 537 REQ_F_NEED_CLEANUP_BIT,
2ca10259 538 REQ_F_OVERFLOW_BIT,
d7718a9d 539 REQ_F_POLLED_BIT,
bcda7baa 540 REQ_F_BUFFER_SELECTED_BIT,
5b0bbee4 541 REQ_F_NO_FILE_TABLE_BIT,
d4c81f38 542 REQ_F_QUEUE_TIMEOUT_BIT,
7cdaf587 543 REQ_F_WORK_INITIALIZED_BIT,
4dd2824d 544 REQ_F_TASK_PINNED_BIT,
84557871
JA
545
546 /* not a real bit, just to check we're not overflowing the space */
547 __REQ_F_LAST_BIT,
6b47ee6e
PB
548};
549
550enum {
551 /* ctx owns file */
552 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
553 /* drain existing IO first */
554 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
555 /* linked sqes */
556 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
557 /* doesn't sever on completion < 0 */
558 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
559 /* IOSQE_ASYNC */
560 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
bcda7baa
JA
561 /* IOSQE_BUFFER_SELECT */
562 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
6b47ee6e 563
dea3b49c
PB
564 /* head of a link */
565 REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT),
6b47ee6e
PB
566 /* already grabbed next link */
567 REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT),
568 /* fail rest of links */
569 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
570 /* on inflight list */
571 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
572 /* read/write uses file position */
573 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
574 /* must not punt to workers */
575 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
6b47ee6e
PB
576 /* has linked timeout */
577 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
578 /* timeout request */
579 REQ_F_TIMEOUT = BIT(REQ_F_TIMEOUT_BIT),
580 /* regular file */
581 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
582 /* must be punted even for NONBLOCK */
583 REQ_F_MUST_PUNT = BIT(REQ_F_MUST_PUNT_BIT),
584 /* no timeout sequence */
585 REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
586 /* completion under lock */
587 REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
99bc4c38
PB
588 /* needs cleanup */
589 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
2ca10259
JA
590 /* in overflow list */
591 REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT),
d7718a9d
JA
592 /* already went through poll handler */
593 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
bcda7baa
JA
594 /* buffer already selected */
595 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
5b0bbee4
JA
596 /* doesn't need file table for this request */
597 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
d4c81f38
PB
598 /* needs to queue linked timeout */
599 REQ_F_QUEUE_TIMEOUT = BIT(REQ_F_QUEUE_TIMEOUT_BIT),
7cdaf587
XW
600 /* io_wq_work is initialized */
601 REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
4dd2824d
PB
602 /* req->task is refcounted */
603 REQ_F_TASK_PINNED = BIT(REQ_F_TASK_PINNED_BIT),
d7718a9d
JA
604};
605
606struct async_poll {
607 struct io_poll_iocb poll;
608 struct io_wq_work work;
6b47ee6e
PB
609};
610
09bb8394
JA
611/*
612 * NOTE! Each of the iocb union members has the file pointer
613 * as the first entry in their struct definition. So you can
614 * access the file pointer through any of the sub-structs,
615 * or directly as just 'ki_filp' in this struct.
616 */
2b188cc1 617struct io_kiocb {
221c5eb2 618 union {
09bb8394 619 struct file *file;
9adbd45d 620 struct io_rw rw;
221c5eb2 621 struct io_poll_iocb poll;
8ed8d3c3
JA
622 struct io_accept accept;
623 struct io_sync sync;
fbf23849 624 struct io_cancel cancel;
b29472ee 625 struct io_timeout timeout;
3fbb51c1 626 struct io_connect connect;
e47293fd 627 struct io_sr_msg sr_msg;
15b71abe 628 struct io_open open;
b5dba59e 629 struct io_close close;
05f3fb3c 630 struct io_files_update files_update;
4840e418 631 struct io_fadvise fadvise;
c1ca757b 632 struct io_madvise madvise;
3e4827b0 633 struct io_epoll epoll;
7d67af2c 634 struct io_splice splice;
ddf0322d 635 struct io_provide_buf pbuf;
1d9e1288 636 struct io_statx statx;
221c5eb2 637 };
2b188cc1 638
1a6b74fc 639 struct io_async_ctx *io;
c398ecb3 640 int cflags;
d625c6ee 641 u8 opcode;
65a6543d
XW
642 /* polled IO has completed */
643 u8 iopoll_completed;
2b188cc1 644
4f4eeba8
BM
645 u16 buf_index;
646
2b188cc1 647 struct io_ring_ctx *ctx;
d7718a9d 648 struct list_head list;
2b188cc1 649 unsigned int flags;
c16361c1 650 refcount_t refs;
3537b6a7
JA
651 struct task_struct *task;
652 unsigned long fsize;
2b188cc1 653 u64 user_data;
9e645e11 654 u32 result;
de0617e4 655 u32 sequence;
2b188cc1 656
d7718a9d
JA
657 struct list_head link_list;
658
fcb323cc
JA
659 struct list_head inflight_entry;
660
05589553
XW
661 struct percpu_ref *fixed_file_refs;
662
b41e9852
JA
663 union {
664 /*
665 * Only commands that never go async can use the below fields,
d7718a9d
JA
666 * obviously. Right now only IORING_OP_POLL_ADD uses them, and
667 * async armed poll handlers for regular commands. The latter
668 * restore the work, if needed.
b41e9852
JA
669 */
670 struct {
b41e9852 671 struct callback_head task_work;
d7718a9d
JA
672 struct hlist_node hash_node;
673 struct async_poll *apoll;
b41e9852
JA
674 };
675 struct io_wq_work work;
676 };
2b188cc1
JA
677};
678
679#define IO_PLUG_THRESHOLD 2
def596e9 680#define IO_IOPOLL_BATCH 8
2b188cc1 681
9a56a232
JA
682struct io_submit_state {
683 struct blk_plug plug;
684
2579f913
JA
685 /*
686 * io_kiocb alloc cache
687 */
688 void *reqs[IO_IOPOLL_BATCH];
6c8a3134 689 unsigned int free_reqs;
2579f913 690
9a56a232
JA
691 /*
692 * File reference cache
693 */
694 struct file *file;
695 unsigned int fd;
696 unsigned int has_refs;
697 unsigned int used_refs;
698 unsigned int ios_left;
699};
700
d3656344
JA
701struct io_op_def {
702 /* needs req->io allocated for deferral/async */
703 unsigned async_ctx : 1;
704 /* needs current->mm setup, does mm access */
705 unsigned needs_mm : 1;
706 /* needs req->file assigned */
707 unsigned needs_file : 1;
fd2206e4
JA
708 /* don't fail if file grab fails */
709 unsigned needs_file_no_error : 1;
d3656344
JA
710 /* hash wq insertion if file is a regular file */
711 unsigned hash_reg_file : 1;
712 /* unbound wq insertion if file is a non-regular file */
713 unsigned unbound_nonreg_file : 1;
66f4af93
JA
714 /* opcode is not supported by this kernel */
715 unsigned not_supported : 1;
f86cd20c
JA
716 /* needs file table */
717 unsigned file_table : 1;
ff002b30
JA
718 /* needs ->fs */
719 unsigned needs_fs : 1;
8a72758c
JA
720 /* set if opcode supports polled "wait" */
721 unsigned pollin : 1;
722 unsigned pollout : 1;
bcda7baa
JA
723 /* op supports buffer selection */
724 unsigned buffer_select : 1;
d3656344
JA
725};
726
727static const struct io_op_def io_op_defs[] = {
0463b6c5
PB
728 [IORING_OP_NOP] = {},
729 [IORING_OP_READV] = {
d3656344
JA
730 .async_ctx = 1,
731 .needs_mm = 1,
732 .needs_file = 1,
733 .unbound_nonreg_file = 1,
8a72758c 734 .pollin = 1,
4d954c25 735 .buffer_select = 1,
d3656344 736 },
0463b6c5 737 [IORING_OP_WRITEV] = {
d3656344
JA
738 .async_ctx = 1,
739 .needs_mm = 1,
740 .needs_file = 1,
741 .hash_reg_file = 1,
742 .unbound_nonreg_file = 1,
8a72758c 743 .pollout = 1,
d3656344 744 },
0463b6c5 745 [IORING_OP_FSYNC] = {
d3656344
JA
746 .needs_file = 1,
747 },
0463b6c5 748 [IORING_OP_READ_FIXED] = {
d3656344
JA
749 .needs_file = 1,
750 .unbound_nonreg_file = 1,
8a72758c 751 .pollin = 1,
d3656344 752 },
0463b6c5 753 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
754 .needs_file = 1,
755 .hash_reg_file = 1,
756 .unbound_nonreg_file = 1,
8a72758c 757 .pollout = 1,
d3656344 758 },
0463b6c5 759 [IORING_OP_POLL_ADD] = {
d3656344
JA
760 .needs_file = 1,
761 .unbound_nonreg_file = 1,
762 },
0463b6c5
PB
763 [IORING_OP_POLL_REMOVE] = {},
764 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344
JA
765 .needs_file = 1,
766 },
0463b6c5 767 [IORING_OP_SENDMSG] = {
d3656344
JA
768 .async_ctx = 1,
769 .needs_mm = 1,
770 .needs_file = 1,
771 .unbound_nonreg_file = 1,
ff002b30 772 .needs_fs = 1,
8a72758c 773 .pollout = 1,
d3656344 774 },
0463b6c5 775 [IORING_OP_RECVMSG] = {
d3656344
JA
776 .async_ctx = 1,
777 .needs_mm = 1,
778 .needs_file = 1,
779 .unbound_nonreg_file = 1,
ff002b30 780 .needs_fs = 1,
8a72758c 781 .pollin = 1,
52de1fe1 782 .buffer_select = 1,
d3656344 783 },
0463b6c5 784 [IORING_OP_TIMEOUT] = {
d3656344
JA
785 .async_ctx = 1,
786 .needs_mm = 1,
787 },
0463b6c5
PB
788 [IORING_OP_TIMEOUT_REMOVE] = {},
789 [IORING_OP_ACCEPT] = {
d3656344
JA
790 .needs_mm = 1,
791 .needs_file = 1,
792 .unbound_nonreg_file = 1,
f86cd20c 793 .file_table = 1,
8a72758c 794 .pollin = 1,
d3656344 795 },
0463b6c5
PB
796 [IORING_OP_ASYNC_CANCEL] = {},
797 [IORING_OP_LINK_TIMEOUT] = {
d3656344
JA
798 .async_ctx = 1,
799 .needs_mm = 1,
800 },
0463b6c5 801 [IORING_OP_CONNECT] = {
d3656344
JA
802 .async_ctx = 1,
803 .needs_mm = 1,
804 .needs_file = 1,
805 .unbound_nonreg_file = 1,
8a72758c 806 .pollout = 1,
d3656344 807 },
0463b6c5 808 [IORING_OP_FALLOCATE] = {
d3656344
JA
809 .needs_file = 1,
810 },
0463b6c5 811 [IORING_OP_OPENAT] = {
f86cd20c 812 .file_table = 1,
ff002b30 813 .needs_fs = 1,
d3656344 814 },
0463b6c5 815 [IORING_OP_CLOSE] = {
fd2206e4
JA
816 .needs_file = 1,
817 .needs_file_no_error = 1,
f86cd20c 818 .file_table = 1,
d3656344 819 },
0463b6c5 820 [IORING_OP_FILES_UPDATE] = {
d3656344 821 .needs_mm = 1,
f86cd20c 822 .file_table = 1,
d3656344 823 },
0463b6c5 824 [IORING_OP_STATX] = {
d3656344 825 .needs_mm = 1,
ff002b30 826 .needs_fs = 1,
5b0bbee4 827 .file_table = 1,
d3656344 828 },
0463b6c5 829 [IORING_OP_READ] = {
3a6820f2
JA
830 .needs_mm = 1,
831 .needs_file = 1,
832 .unbound_nonreg_file = 1,
8a72758c 833 .pollin = 1,
bcda7baa 834 .buffer_select = 1,
3a6820f2 835 },
0463b6c5 836 [IORING_OP_WRITE] = {
3a6820f2
JA
837 .needs_mm = 1,
838 .needs_file = 1,
839 .unbound_nonreg_file = 1,
8a72758c 840 .pollout = 1,
3a6820f2 841 },
0463b6c5 842 [IORING_OP_FADVISE] = {
4840e418
JA
843 .needs_file = 1,
844 },
0463b6c5 845 [IORING_OP_MADVISE] = {
c1ca757b
JA
846 .needs_mm = 1,
847 },
0463b6c5 848 [IORING_OP_SEND] = {
fddaface
JA
849 .needs_mm = 1,
850 .needs_file = 1,
851 .unbound_nonreg_file = 1,
8a72758c 852 .pollout = 1,
fddaface 853 },
0463b6c5 854 [IORING_OP_RECV] = {
fddaface
JA
855 .needs_mm = 1,
856 .needs_file = 1,
857 .unbound_nonreg_file = 1,
8a72758c 858 .pollin = 1,
bcda7baa 859 .buffer_select = 1,
fddaface 860 },
0463b6c5 861 [IORING_OP_OPENAT2] = {
f86cd20c 862 .file_table = 1,
ff002b30 863 .needs_fs = 1,
cebdb986 864 },
3e4827b0
JA
865 [IORING_OP_EPOLL_CTL] = {
866 .unbound_nonreg_file = 1,
867 .file_table = 1,
868 },
7d67af2c
PB
869 [IORING_OP_SPLICE] = {
870 .needs_file = 1,
871 .hash_reg_file = 1,
872 .unbound_nonreg_file = 1,
ddf0322d
JA
873 },
874 [IORING_OP_PROVIDE_BUFFERS] = {},
067524e9 875 [IORING_OP_REMOVE_BUFFERS] = {},
f2a8d5c7
PB
876 [IORING_OP_TEE] = {
877 .needs_file = 1,
878 .hash_reg_file = 1,
879 .unbound_nonreg_file = 1,
880 },
d3656344
JA
881};
882
561fb04a 883static void io_wq_submit_work(struct io_wq_work **workptr);
78e19bbe 884static void io_cqring_fill_event(struct io_kiocb *req, long res);
ec9c02ad 885static void io_put_req(struct io_kiocb *req);
978db57e 886static void __io_double_put_req(struct io_kiocb *req);
94ae5e77
JA
887static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
888static void io_queue_linked_timeout(struct io_kiocb *req);
05f3fb3c
JA
889static int __io_sqe_files_update(struct io_ring_ctx *ctx,
890 struct io_uring_files_update *ip,
891 unsigned nr_args);
f86cd20c 892static int io_grab_files(struct io_kiocb *req);
99bc4c38 893static void io_cleanup_req(struct io_kiocb *req);
b41e9852
JA
894static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
895 int fd, struct file **out_file, bool fixed);
896static void __io_queue_sqe(struct io_kiocb *req,
897 const struct io_uring_sqe *sqe);
de0617e4 898
2b188cc1
JA
899static struct kmem_cache *req_cachep;
900
901static const struct file_operations io_uring_fops;
902
903struct sock *io_uring_get_socket(struct file *file)
904{
905#if defined(CONFIG_UNIX)
906 if (file->f_op == &io_uring_fops) {
907 struct io_ring_ctx *ctx = file->private_data;
908
909 return ctx->ring_sock->sk;
910 }
911#endif
912 return NULL;
913}
914EXPORT_SYMBOL(io_uring_get_socket);
915
4dd2824d
PB
916static void io_get_req_task(struct io_kiocb *req)
917{
918 if (req->flags & REQ_F_TASK_PINNED)
919 return;
920 get_task_struct(req->task);
921 req->flags |= REQ_F_TASK_PINNED;
922}
923
924/* not idempotent -- it doesn't clear REQ_F_TASK_PINNED */
925static void __io_put_req_task(struct io_kiocb *req)
926{
927 if (req->flags & REQ_F_TASK_PINNED)
928 put_task_struct(req->task);
929}
930
4a38aed2
JA
931static void io_file_put_work(struct work_struct *work);
932
7cdaf587
XW
933/*
934 * Note: must call io_req_init_async() for the first time you
935 * touch any members of io_wq_work.
936 */
937static inline void io_req_init_async(struct io_kiocb *req)
938{
939 if (req->flags & REQ_F_WORK_INITIALIZED)
940 return;
941
942 memset(&req->work, 0, sizeof(req->work));
943 req->flags |= REQ_F_WORK_INITIALIZED;
944}
945
0cdaf760
PB
946static inline bool io_async_submit(struct io_ring_ctx *ctx)
947{
948 return ctx->flags & IORING_SETUP_SQPOLL;
949}
950
2b188cc1
JA
951static void io_ring_ctx_ref_free(struct percpu_ref *ref)
952{
953 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
954
0f158b4c 955 complete(&ctx->ref_comp);
2b188cc1
JA
956}
957
958static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
959{
960 struct io_ring_ctx *ctx;
78076bb6 961 int hash_bits;
2b188cc1
JA
962
963 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
964 if (!ctx)
965 return NULL;
966
0ddf92e8
JA
967 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
968 if (!ctx->fallback_req)
969 goto err;
970
78076bb6
JA
971 /*
972 * Use 5 bits less than the max cq entries, that should give us around
973 * 32 entries per hash list if totally full and uniformly spread.
974 */
975 hash_bits = ilog2(p->cq_entries);
976 hash_bits -= 5;
977 if (hash_bits <= 0)
978 hash_bits = 1;
979 ctx->cancel_hash_bits = hash_bits;
980 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
981 GFP_KERNEL);
982 if (!ctx->cancel_hash)
983 goto err;
984 __hash_init(ctx->cancel_hash, 1U << hash_bits);
985
21482896 986 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
987 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
988 goto err;
2b188cc1
JA
989
990 ctx->flags = p->flags;
583863ed 991 init_waitqueue_head(&ctx->sqo_wait);
2b188cc1 992 init_waitqueue_head(&ctx->cq_wait);
1d7bb1d5 993 INIT_LIST_HEAD(&ctx->cq_overflow_list);
0f158b4c
JA
994 init_completion(&ctx->ref_comp);
995 init_completion(&ctx->sq_thread_comp);
5a2e745d 996 idr_init(&ctx->io_buffer_idr);
071698e1 997 idr_init(&ctx->personality_idr);
2b188cc1
JA
998 mutex_init(&ctx->uring_lock);
999 init_waitqueue_head(&ctx->wait);
1000 spin_lock_init(&ctx->completion_lock);
def596e9 1001 INIT_LIST_HEAD(&ctx->poll_list);
de0617e4 1002 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 1003 INIT_LIST_HEAD(&ctx->timeout_list);
fcb323cc
JA
1004 init_waitqueue_head(&ctx->inflight_wait);
1005 spin_lock_init(&ctx->inflight_lock);
1006 INIT_LIST_HEAD(&ctx->inflight_list);
4a38aed2
JA
1007 INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
1008 init_llist_head(&ctx->file_put_llist);
2b188cc1 1009 return ctx;
206aefde 1010err:
0ddf92e8
JA
1011 if (ctx->fallback_req)
1012 kmem_cache_free(req_cachep, ctx->fallback_req);
78076bb6 1013 kfree(ctx->cancel_hash);
206aefde
JA
1014 kfree(ctx);
1015 return NULL;
2b188cc1
JA
1016}
1017
9d858b21 1018static inline bool __req_need_defer(struct io_kiocb *req)
7adf4eaf 1019{
a197f664
JL
1020 struct io_ring_ctx *ctx = req->ctx;
1021
31af27c7
PB
1022 return req->sequence != ctx->cached_cq_tail
1023 + atomic_read(&ctx->cached_cq_overflow);
7adf4eaf
JA
1024}
1025
9d858b21 1026static inline bool req_need_defer(struct io_kiocb *req)
de0617e4 1027{
87987898 1028 if (unlikely(req->flags & REQ_F_IO_DRAIN))
9d858b21 1029 return __req_need_defer(req);
de0617e4 1030
9d858b21 1031 return false;
de0617e4
JA
1032}
1033
de0617e4 1034static void __io_commit_cqring(struct io_ring_ctx *ctx)
2b188cc1 1035{
75b28aff 1036 struct io_rings *rings = ctx->rings;
2b188cc1 1037
07910158
PB
1038 /* order cqe stores with ring update */
1039 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
2b188cc1 1040
07910158
PB
1041 if (wq_has_sleeper(&ctx->cq_wait)) {
1042 wake_up_interruptible(&ctx->cq_wait);
1043 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
2b188cc1
JA
1044 }
1045}
1046
cccf0ee8
JA
1047static inline void io_req_work_grab_env(struct io_kiocb *req,
1048 const struct io_op_def *def)
1049{
1050 if (!req->work.mm && def->needs_mm) {
1051 mmgrab(current->mm);
1052 req->work.mm = current->mm;
2b188cc1 1053 }
cccf0ee8
JA
1054 if (!req->work.creds)
1055 req->work.creds = get_current_cred();
ff002b30
JA
1056 if (!req->work.fs && def->needs_fs) {
1057 spin_lock(&current->fs->lock);
1058 if (!current->fs->in_exec) {
1059 req->work.fs = current->fs;
1060 req->work.fs->users++;
1061 } else {
1062 req->work.flags |= IO_WQ_WORK_CANCEL;
1063 }
1064 spin_unlock(&current->fs->lock);
1065 }
6ab23144
JA
1066 if (!req->work.task_pid)
1067 req->work.task_pid = task_pid_vnr(current);
2b188cc1
JA
1068}
1069
cccf0ee8 1070static inline void io_req_work_drop_env(struct io_kiocb *req)
18d9be1a 1071{
7cdaf587
XW
1072 if (!(req->flags & REQ_F_WORK_INITIALIZED))
1073 return;
1074
cccf0ee8
JA
1075 if (req->work.mm) {
1076 mmdrop(req->work.mm);
1077 req->work.mm = NULL;
1078 }
1079 if (req->work.creds) {
1080 put_cred(req->work.creds);
1081 req->work.creds = NULL;
1082 }
ff002b30
JA
1083 if (req->work.fs) {
1084 struct fs_struct *fs = req->work.fs;
1085
1086 spin_lock(&req->work.fs->lock);
1087 if (--fs->users)
1088 fs = NULL;
1089 spin_unlock(&req->work.fs->lock);
1090 if (fs)
1091 free_fs_struct(fs);
1092 }
561fb04a
JA
1093}
1094
8766dd51 1095static inline void io_prep_async_work(struct io_kiocb *req,
94ae5e77 1096 struct io_kiocb **link)
18d9be1a 1097{
d3656344 1098 const struct io_op_def *def = &io_op_defs[req->opcode];
54a91f3b 1099
d3656344
JA
1100 if (req->flags & REQ_F_ISREG) {
1101 if (def->hash_reg_file)
8766dd51 1102 io_wq_hash_work(&req->work, file_inode(req->file));
d3656344
JA
1103 } else {
1104 if (def->unbound_nonreg_file)
3529d8c2 1105 req->work.flags |= IO_WQ_WORK_UNBOUND;
54a91f3b 1106 }
cccf0ee8 1107
59960b9d 1108 io_req_init_async(req);
cccf0ee8 1109 io_req_work_grab_env(req, def);
54a91f3b 1110
94ae5e77 1111 *link = io_prep_linked_timeout(req);
561fb04a
JA
1112}
1113
a197f664 1114static inline void io_queue_async_work(struct io_kiocb *req)
561fb04a 1115{
a197f664 1116 struct io_ring_ctx *ctx = req->ctx;
94ae5e77 1117 struct io_kiocb *link;
94ae5e77 1118
8766dd51 1119 io_prep_async_work(req, &link);
561fb04a 1120
8766dd51
PB
1121 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1122 &req->work, req->flags);
1123 io_wq_enqueue(ctx->io_wq, &req->work);
94ae5e77
JA
1124
1125 if (link)
1126 io_queue_linked_timeout(link);
18d9be1a
JA
1127}
1128
5262f567
JA
1129static void io_kill_timeout(struct io_kiocb *req)
1130{
1131 int ret;
1132
2d28390a 1133 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
5262f567
JA
1134 if (ret != -1) {
1135 atomic_inc(&req->ctx->cq_timeouts);
842f9612 1136 list_del_init(&req->list);
f0e20b89 1137 req->flags |= REQ_F_COMP_LOCKED;
78e19bbe 1138 io_cqring_fill_event(req, 0);
ec9c02ad 1139 io_put_req(req);
5262f567
JA
1140 }
1141}
1142
1143static void io_kill_timeouts(struct io_ring_ctx *ctx)
1144{
1145 struct io_kiocb *req, *tmp;
1146
1147 spin_lock_irq(&ctx->completion_lock);
1148 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
1149 io_kill_timeout(req);
1150 spin_unlock_irq(&ctx->completion_lock);
1151}
1152
04518945 1153static void __io_queue_deferred(struct io_ring_ctx *ctx)
de0617e4 1154{
04518945
PB
1155 do {
1156 struct io_kiocb *req = list_first_entry(&ctx->defer_list,
1157 struct io_kiocb, list);
de0617e4 1158
04518945
PB
1159 if (req_need_defer(req))
1160 break;
1161 list_del_init(&req->list);
1162 io_queue_async_work(req);
1163 } while (!list_empty(&ctx->defer_list));
1164}
1165
360428f8 1166static void io_flush_timeouts(struct io_ring_ctx *ctx)
de0617e4 1167{
360428f8
PB
1168 while (!list_empty(&ctx->timeout_list)) {
1169 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
1170 struct io_kiocb, list);
de0617e4 1171
360428f8
PB
1172 if (req->flags & REQ_F_TIMEOUT_NOSEQ)
1173 break;
bfe68a22
PB
1174 if (req->timeout.target_seq != ctx->cached_cq_tail
1175 - atomic_read(&ctx->cq_timeouts))
360428f8 1176 break;
bfe68a22 1177
360428f8 1178 list_del_init(&req->list);
5262f567 1179 io_kill_timeout(req);
360428f8
PB
1180 }
1181}
5262f567 1182
360428f8
PB
1183static void io_commit_cqring(struct io_ring_ctx *ctx)
1184{
1185 io_flush_timeouts(ctx);
de0617e4
JA
1186 __io_commit_cqring(ctx);
1187
04518945
PB
1188 if (unlikely(!list_empty(&ctx->defer_list)))
1189 __io_queue_deferred(ctx);
de0617e4
JA
1190}
1191
2b188cc1
JA
1192static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1193{
75b28aff 1194 struct io_rings *rings = ctx->rings;
2b188cc1
JA
1195 unsigned tail;
1196
1197 tail = ctx->cached_cq_tail;
115e12e5
SB
1198 /*
1199 * writes to the cq entry need to come after reading head; the
1200 * control dependency is enough as we're using WRITE_ONCE to
1201 * fill the cq entry
1202 */
75b28aff 1203 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
2b188cc1
JA
1204 return NULL;
1205
1206 ctx->cached_cq_tail++;
75b28aff 1207 return &rings->cqes[tail & ctx->cq_mask];
2b188cc1
JA
1208}
1209
f2842ab5
JA
1210static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1211{
f0b493e6
JA
1212 if (!ctx->cq_ev_fd)
1213 return false;
7e55a19c
SG
1214 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1215 return false;
f2842ab5
JA
1216 if (!ctx->eventfd_async)
1217 return true;
b41e9852 1218 return io_wq_current_is_worker();
f2842ab5
JA
1219}
1220
b41e9852 1221static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1d7bb1d5
JA
1222{
1223 if (waitqueue_active(&ctx->wait))
1224 wake_up(&ctx->wait);
1225 if (waitqueue_active(&ctx->sqo_wait))
1226 wake_up(&ctx->sqo_wait);
b41e9852 1227 if (io_should_trigger_evfd(ctx))
1d7bb1d5
JA
1228 eventfd_signal(ctx->cq_ev_fd, 1);
1229}
1230
c4a2ed72
JA
1231/* Returns true if there are no backlogged entries after the flush */
1232static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1d7bb1d5
JA
1233{
1234 struct io_rings *rings = ctx->rings;
1235 struct io_uring_cqe *cqe;
1236 struct io_kiocb *req;
1237 unsigned long flags;
1238 LIST_HEAD(list);
1239
1240 if (!force) {
1241 if (list_empty_careful(&ctx->cq_overflow_list))
c4a2ed72 1242 return true;
1d7bb1d5
JA
1243 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1244 rings->cq_ring_entries))
c4a2ed72 1245 return false;
1d7bb1d5
JA
1246 }
1247
1248 spin_lock_irqsave(&ctx->completion_lock, flags);
1249
1250 /* if force is set, the ring is going away. always drop after that */
1251 if (force)
69b3e546 1252 ctx->cq_overflow_flushed = 1;
1d7bb1d5 1253
c4a2ed72 1254 cqe = NULL;
1d7bb1d5
JA
1255 while (!list_empty(&ctx->cq_overflow_list)) {
1256 cqe = io_get_cqring(ctx);
1257 if (!cqe && !force)
1258 break;
1259
1260 req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
1261 list);
1262 list_move(&req->list, &list);
2ca10259 1263 req->flags &= ~REQ_F_OVERFLOW;
1d7bb1d5
JA
1264 if (cqe) {
1265 WRITE_ONCE(cqe->user_data, req->user_data);
1266 WRITE_ONCE(cqe->res, req->result);
bcda7baa 1267 WRITE_ONCE(cqe->flags, req->cflags);
1d7bb1d5
JA
1268 } else {
1269 WRITE_ONCE(ctx->rings->cq_overflow,
1270 atomic_inc_return(&ctx->cached_cq_overflow));
1271 }
1272 }
1273
1274 io_commit_cqring(ctx);
ad3eb2c8
JA
1275 if (cqe) {
1276 clear_bit(0, &ctx->sq_check_overflow);
1277 clear_bit(0, &ctx->cq_check_overflow);
1278 }
1d7bb1d5
JA
1279 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1280 io_cqring_ev_posted(ctx);
1281
1282 while (!list_empty(&list)) {
1283 req = list_first_entry(&list, struct io_kiocb, list);
1284 list_del(&req->list);
ec9c02ad 1285 io_put_req(req);
1d7bb1d5 1286 }
c4a2ed72
JA
1287
1288 return cqe != NULL;
1d7bb1d5
JA
1289}
1290
bcda7baa 1291static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1292{
78e19bbe 1293 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1294 struct io_uring_cqe *cqe;
1295
78e19bbe 1296 trace_io_uring_complete(ctx, req->user_data, res);
51c3ff62 1297
2b188cc1
JA
1298 /*
1299 * If we can't get a cq entry, userspace overflowed the
1300 * submission (by quite a lot). Increment the overflow count in
1301 * the ring.
1302 */
1303 cqe = io_get_cqring(ctx);
1d7bb1d5 1304 if (likely(cqe)) {
78e19bbe 1305 WRITE_ONCE(cqe->user_data, req->user_data);
2b188cc1 1306 WRITE_ONCE(cqe->res, res);
bcda7baa 1307 WRITE_ONCE(cqe->flags, cflags);
1d7bb1d5 1308 } else if (ctx->cq_overflow_flushed) {
498ccd9e
JA
1309 WRITE_ONCE(ctx->rings->cq_overflow,
1310 atomic_inc_return(&ctx->cached_cq_overflow));
1d7bb1d5 1311 } else {
ad3eb2c8
JA
1312 if (list_empty(&ctx->cq_overflow_list)) {
1313 set_bit(0, &ctx->sq_check_overflow);
1314 set_bit(0, &ctx->cq_check_overflow);
1315 }
2ca10259 1316 req->flags |= REQ_F_OVERFLOW;
1d7bb1d5
JA
1317 refcount_inc(&req->refs);
1318 req->result = res;
bcda7baa 1319 req->cflags = cflags;
1d7bb1d5 1320 list_add_tail(&req->list, &ctx->cq_overflow_list);
2b188cc1
JA
1321 }
1322}
1323
bcda7baa
JA
1324static void io_cqring_fill_event(struct io_kiocb *req, long res)
1325{
1326 __io_cqring_fill_event(req, res, 0);
1327}
1328
1329static void __io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1330{
78e19bbe 1331 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1332 unsigned long flags;
1333
1334 spin_lock_irqsave(&ctx->completion_lock, flags);
bcda7baa 1335 __io_cqring_fill_event(req, res, cflags);
2b188cc1
JA
1336 io_commit_cqring(ctx);
1337 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1338
8c838788 1339 io_cqring_ev_posted(ctx);
2b188cc1
JA
1340}
1341
bcda7baa
JA
1342static void io_cqring_add_event(struct io_kiocb *req, long res)
1343{
1344 __io_cqring_add_event(req, res, 0);
1345}
1346
0ddf92e8
JA
1347static inline bool io_is_fallback_req(struct io_kiocb *req)
1348{
1349 return req == (struct io_kiocb *)
1350 ((unsigned long) req->ctx->fallback_req & ~1UL);
1351}
1352
1353static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1354{
1355 struct io_kiocb *req;
1356
1357 req = ctx->fallback_req;
dd461af6 1358 if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
0ddf92e8
JA
1359 return req;
1360
1361 return NULL;
1362}
1363
0553b8bd
PB
1364static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
1365 struct io_submit_state *state)
2b188cc1 1366{
fd6fab2c 1367 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2b188cc1
JA
1368 struct io_kiocb *req;
1369
2579f913 1370 if (!state) {
fd6fab2c 1371 req = kmem_cache_alloc(req_cachep, gfp);
2579f913 1372 if (unlikely(!req))
0ddf92e8 1373 goto fallback;
2579f913
JA
1374 } else if (!state->free_reqs) {
1375 size_t sz;
1376 int ret;
1377
1378 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
fd6fab2c
JA
1379 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1380
1381 /*
1382 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1383 * retry single alloc to be on the safe side.
1384 */
1385 if (unlikely(ret <= 0)) {
1386 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1387 if (!state->reqs[0])
0ddf92e8 1388 goto fallback;
fd6fab2c
JA
1389 ret = 1;
1390 }
2579f913 1391 state->free_reqs = ret - 1;
6c8a3134 1392 req = state->reqs[ret - 1];
2579f913 1393 } else {
2579f913 1394 state->free_reqs--;
6c8a3134 1395 req = state->reqs[state->free_reqs];
2b188cc1
JA
1396 }
1397
2579f913 1398 return req;
0ddf92e8 1399fallback:
0553b8bd 1400 return io_get_fallback_req(ctx);
2b188cc1
JA
1401}
1402
8da11c19
PB
1403static inline void io_put_file(struct io_kiocb *req, struct file *file,
1404 bool fixed)
1405{
1406 if (fixed)
05589553 1407 percpu_ref_put(req->fixed_file_refs);
8da11c19
PB
1408 else
1409 fput(file);
1410}
1411
c6ca97b3 1412static void __io_req_aux_free(struct io_kiocb *req)
2b188cc1 1413{
929a3af9
PB
1414 if (req->flags & REQ_F_NEED_CLEANUP)
1415 io_cleanup_req(req);
1416
96fd84d8 1417 kfree(req->io);
8da11c19
PB
1418 if (req->file)
1419 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
4dd2824d 1420 __io_put_req_task(req);
cccf0ee8 1421 io_req_work_drop_env(req);
def596e9
JA
1422}
1423
9e645e11 1424static void __io_free_req(struct io_kiocb *req)
2b188cc1 1425{
c6ca97b3 1426 __io_req_aux_free(req);
fcb323cc 1427
fcb323cc 1428 if (req->flags & REQ_F_INFLIGHT) {
c6ca97b3 1429 struct io_ring_ctx *ctx = req->ctx;
fcb323cc
JA
1430 unsigned long flags;
1431
1432 spin_lock_irqsave(&ctx->inflight_lock, flags);
1433 list_del(&req->inflight_entry);
1434 if (waitqueue_active(&ctx->inflight_wait))
1435 wake_up(&ctx->inflight_wait);
1436 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1437 }
2b85edfc
PB
1438
1439 percpu_ref_put(&req->ctx->refs);
b1e50e54
PB
1440 if (likely(!io_is_fallback_req(req)))
1441 kmem_cache_free(req_cachep, req);
1442 else
dd461af6 1443 clear_bit_unlock(0, (unsigned long *) &req->ctx->fallback_req);
e65ef56d
JA
1444}
1445
c6ca97b3
JA
1446struct req_batch {
1447 void *reqs[IO_IOPOLL_BATCH];
1448 int to_free;
1449 int need_iter;
1450};
1451
1452static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
1453{
1454 if (!rb->to_free)
1455 return;
1456 if (rb->need_iter) {
1457 int i, inflight = 0;
1458 unsigned long flags;
1459
1460 for (i = 0; i < rb->to_free; i++) {
1461 struct io_kiocb *req = rb->reqs[i];
1462
c6ca97b3
JA
1463 if (req->flags & REQ_F_INFLIGHT)
1464 inflight++;
c6ca97b3
JA
1465 __io_req_aux_free(req);
1466 }
1467 if (!inflight)
1468 goto do_free;
1469
1470 spin_lock_irqsave(&ctx->inflight_lock, flags);
1471 for (i = 0; i < rb->to_free; i++) {
1472 struct io_kiocb *req = rb->reqs[i];
1473
10fef4be 1474 if (req->flags & REQ_F_INFLIGHT) {
c6ca97b3
JA
1475 list_del(&req->inflight_entry);
1476 if (!--inflight)
1477 break;
1478 }
1479 }
1480 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1481
1482 if (waitqueue_active(&ctx->inflight_wait))
1483 wake_up(&ctx->inflight_wait);
1484 }
1485do_free:
1486 kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
1487 percpu_ref_put_many(&ctx->refs, rb->to_free);
c6ca97b3 1488 rb->to_free = rb->need_iter = 0;
e65ef56d
JA
1489}
1490
a197f664 1491static bool io_link_cancel_timeout(struct io_kiocb *req)
2665abfd 1492{
a197f664 1493 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
1494 int ret;
1495
2d28390a 1496 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
2665abfd 1497 if (ret != -1) {
78e19bbe 1498 io_cqring_fill_event(req, -ECANCELED);
2665abfd 1499 io_commit_cqring(ctx);
dea3b49c 1500 req->flags &= ~REQ_F_LINK_HEAD;
ec9c02ad 1501 io_put_req(req);
2665abfd
JA
1502 return true;
1503 }
1504
1505 return false;
e65ef56d
JA
1506}
1507
ba816ad6 1508static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
9e645e11 1509{
2665abfd 1510 struct io_ring_ctx *ctx = req->ctx;
2665abfd 1511 bool wake_ev = false;
9e645e11 1512
4d7dd462
JA
1513 /* Already got next link */
1514 if (req->flags & REQ_F_LINK_NEXT)
1515 return;
1516
9e645e11
JA
1517 /*
1518 * The list should never be empty when we are called here. But could
1519 * potentially happen if the chain is messed up, check to be on the
1520 * safe side.
1521 */
4493233e
PB
1522 while (!list_empty(&req->link_list)) {
1523 struct io_kiocb *nxt = list_first_entry(&req->link_list,
1524 struct io_kiocb, link_list);
94ae5e77 1525
4493233e
PB
1526 if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) &&
1527 (nxt->flags & REQ_F_TIMEOUT))) {
1528 list_del_init(&nxt->link_list);
94ae5e77 1529 wake_ev |= io_link_cancel_timeout(nxt);
94ae5e77
JA
1530 req->flags &= ~REQ_F_LINK_TIMEOUT;
1531 continue;
1532 }
9e645e11 1533
4493233e
PB
1534 list_del_init(&req->link_list);
1535 if (!list_empty(&nxt->link_list))
dea3b49c 1536 nxt->flags |= REQ_F_LINK_HEAD;
b18fdf71 1537 *nxtptr = nxt;
94ae5e77 1538 break;
9e645e11 1539 }
2665abfd 1540
4d7dd462 1541 req->flags |= REQ_F_LINK_NEXT;
2665abfd
JA
1542 if (wake_ev)
1543 io_cqring_ev_posted(ctx);
9e645e11
JA
1544}
1545
1546/*
dea3b49c 1547 * Called if REQ_F_LINK_HEAD is set, and we fail the head request
9e645e11
JA
1548 */
1549static void io_fail_links(struct io_kiocb *req)
1550{
2665abfd 1551 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
1552 unsigned long flags;
1553
1554 spin_lock_irqsave(&ctx->completion_lock, flags);
9e645e11
JA
1555
1556 while (!list_empty(&req->link_list)) {
4493233e
PB
1557 struct io_kiocb *link = list_first_entry(&req->link_list,
1558 struct io_kiocb, link_list);
9e645e11 1559
4493233e 1560 list_del_init(&link->link_list);
c826bd7a 1561 trace_io_uring_fail_link(req, link);
2665abfd
JA
1562
1563 if ((req->flags & REQ_F_LINK_TIMEOUT) &&
d625c6ee 1564 link->opcode == IORING_OP_LINK_TIMEOUT) {
a197f664 1565 io_link_cancel_timeout(link);
2665abfd 1566 } else {
78e19bbe 1567 io_cqring_fill_event(link, -ECANCELED);
978db57e 1568 __io_double_put_req(link);
2665abfd 1569 }
5d960724 1570 req->flags &= ~REQ_F_LINK_TIMEOUT;
9e645e11 1571 }
2665abfd
JA
1572
1573 io_commit_cqring(ctx);
1574 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1575 io_cqring_ev_posted(ctx);
9e645e11
JA
1576}
1577
4d7dd462 1578static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
9e645e11 1579{
dea3b49c 1580 if (likely(!(req->flags & REQ_F_LINK_HEAD)))
2665abfd 1581 return;
2665abfd 1582
9e645e11
JA
1583 /*
1584 * If LINK is set, we have dependent requests in this chain. If we
1585 * didn't fail this request, queue the first one up, moving any other
1586 * dependencies to the next request. In case of failure, fail the rest
1587 * of the chain.
1588 */
2665abfd
JA
1589 if (req->flags & REQ_F_FAIL_LINK) {
1590 io_fail_links(req);
7c9e7f0f
JA
1591 } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
1592 REQ_F_LINK_TIMEOUT) {
2665abfd
JA
1593 struct io_ring_ctx *ctx = req->ctx;
1594 unsigned long flags;
1595
1596 /*
1597 * If this is a timeout link, we could be racing with the
1598 * timeout timer. Grab the completion lock for this case to
7c9e7f0f 1599 * protect against that.
2665abfd
JA
1600 */
1601 spin_lock_irqsave(&ctx->completion_lock, flags);
1602 io_req_link_next(req, nxt);
1603 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1604 } else {
1605 io_req_link_next(req, nxt);
9e645e11 1606 }
4d7dd462 1607}
9e645e11 1608
c69f8dbe
JL
1609static void io_free_req(struct io_kiocb *req)
1610{
944e58bf
PB
1611 struct io_kiocb *nxt = NULL;
1612
1613 io_req_find_next(req, &nxt);
70cf9f32 1614 __io_free_req(req);
944e58bf
PB
1615
1616 if (nxt)
1617 io_queue_async_work(nxt);
c69f8dbe
JL
1618}
1619
7a743e22
PB
1620static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
1621{
1622 struct io_kiocb *link;
8766dd51
PB
1623 const struct io_op_def *def = &io_op_defs[nxt->opcode];
1624
1625 if ((nxt->flags & REQ_F_ISREG) && def->hash_reg_file)
1626 io_wq_hash_work(&nxt->work, file_inode(nxt->file));
7a743e22
PB
1627
1628 *workptr = &nxt->work;
1629 link = io_prep_linked_timeout(nxt);
18a542ff 1630 if (link)
d4c81f38 1631 nxt->flags |= REQ_F_QUEUE_TIMEOUT;
7a743e22
PB
1632}
1633
ba816ad6
JA
1634/*
1635 * Drop reference to request, return next in chain (if there is one) if this
1636 * was the last reference to this request.
1637 */
f9bd67f6 1638__attribute__((nonnull))
ec9c02ad 1639static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
e65ef56d 1640{
2a44f467
JA
1641 if (refcount_dec_and_test(&req->refs)) {
1642 io_req_find_next(req, nxtptr);
4d7dd462 1643 __io_free_req(req);
2a44f467 1644 }
2b188cc1
JA
1645}
1646
e65ef56d
JA
1647static void io_put_req(struct io_kiocb *req)
1648{
1649 if (refcount_dec_and_test(&req->refs))
1650 io_free_req(req);
2b188cc1
JA
1651}
1652
e9fd9396
PB
1653static void io_steal_work(struct io_kiocb *req,
1654 struct io_wq_work **workptr)
7a743e22
PB
1655{
1656 /*
1657 * It's in an io-wq worker, so there always should be at least
1658 * one reference, which will be dropped in io_put_work() just
1659 * after the current handler returns.
1660 *
1661 * It also means, that if the counter dropped to 1, then there is
1662 * no asynchronous users left, so it's safe to steal the next work.
1663 */
7a743e22
PB
1664 if (refcount_read(&req->refs) == 1) {
1665 struct io_kiocb *nxt = NULL;
1666
1667 io_req_find_next(req, &nxt);
1668 if (nxt)
1669 io_wq_assign_next(workptr, nxt);
1670 }
1671}
1672
978db57e
JA
1673/*
1674 * Must only be used if we don't need to care about links, usually from
1675 * within the completion handling itself.
1676 */
1677static void __io_double_put_req(struct io_kiocb *req)
78e19bbe
JA
1678{
1679 /* drop both submit and complete references */
1680 if (refcount_sub_and_test(2, &req->refs))
1681 __io_free_req(req);
1682}
1683
978db57e
JA
1684static void io_double_put_req(struct io_kiocb *req)
1685{
1686 /* drop both submit and complete references */
1687 if (refcount_sub_and_test(2, &req->refs))
1688 io_free_req(req);
1689}
1690
1d7bb1d5 1691static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
a3a0e43f 1692{
84f97dc2
JA
1693 struct io_rings *rings = ctx->rings;
1694
ad3eb2c8
JA
1695 if (test_bit(0, &ctx->cq_check_overflow)) {
1696 /*
1697 * noflush == true is from the waitqueue handler, just ensure
1698 * we wake up the task, and the next invocation will flush the
1699 * entries. We cannot safely to it from here.
1700 */
1701 if (noflush && !list_empty(&ctx->cq_overflow_list))
1702 return -1U;
1d7bb1d5 1703
ad3eb2c8
JA
1704 io_cqring_overflow_flush(ctx, false);
1705 }
1d7bb1d5 1706
a3a0e43f
JA
1707 /* See comment at the top of this file */
1708 smp_rmb();
ad3eb2c8 1709 return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
a3a0e43f
JA
1710}
1711
fb5ccc98
PB
1712static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
1713{
1714 struct io_rings *rings = ctx->rings;
1715
1716 /* make sure SQ entry isn't read before tail */
1717 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
1718}
1719
8237e045 1720static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
e94f141b 1721{
dea3b49c 1722 if ((req->flags & REQ_F_LINK_HEAD) || io_is_fallback_req(req))
c6ca97b3 1723 return false;
e94f141b 1724
9d9e88a2 1725 if (req->file || req->io)
c6ca97b3
JA
1726 rb->need_iter++;
1727
1728 rb->reqs[rb->to_free++] = req;
1729 if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
1730 io_free_req_many(req->ctx, rb);
1731 return true;
e94f141b
JA
1732}
1733
bcda7baa
JA
1734static int io_put_kbuf(struct io_kiocb *req)
1735{
4d954c25 1736 struct io_buffer *kbuf;
bcda7baa
JA
1737 int cflags;
1738
4d954c25 1739 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
bcda7baa
JA
1740 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
1741 cflags |= IORING_CQE_F_BUFFER;
1742 req->rw.addr = 0;
1743 kfree(kbuf);
1744 return cflags;
1745}
1746
def596e9
JA
1747/*
1748 * Find and free completed poll iocbs
1749 */
1750static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
1751 struct list_head *done)
1752{
8237e045 1753 struct req_batch rb;
def596e9 1754 struct io_kiocb *req;
def596e9 1755
c6ca97b3 1756 rb.to_free = rb.need_iter = 0;
def596e9 1757 while (!list_empty(done)) {
bcda7baa
JA
1758 int cflags = 0;
1759
def596e9
JA
1760 req = list_first_entry(done, struct io_kiocb, list);
1761 list_del(&req->list);
1762
bcda7baa
JA
1763 if (req->flags & REQ_F_BUFFER_SELECTED)
1764 cflags = io_put_kbuf(req);
1765
1766 __io_cqring_fill_event(req, req->result, cflags);
def596e9
JA
1767 (*nr_events)++;
1768
8237e045
JA
1769 if (refcount_dec_and_test(&req->refs) &&
1770 !io_req_multi_free(&rb, req))
1771 io_free_req(req);
def596e9 1772 }
def596e9 1773
09bb8394 1774 io_commit_cqring(ctx);
32b2244a
XW
1775 if (ctx->flags & IORING_SETUP_SQPOLL)
1776 io_cqring_ev_posted(ctx);
8237e045 1777 io_free_req_many(ctx, &rb);
def596e9
JA
1778}
1779
581f9810
BM
1780static void io_iopoll_queue(struct list_head *again)
1781{
1782 struct io_kiocb *req;
1783
1784 do {
1785 req = list_first_entry(again, struct io_kiocb, list);
1786 list_del(&req->list);
1787 refcount_inc(&req->refs);
1788 io_queue_async_work(req);
1789 } while (!list_empty(again));
1790}
1791
def596e9
JA
1792static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
1793 long min)
1794{
1795 struct io_kiocb *req, *tmp;
1796 LIST_HEAD(done);
581f9810 1797 LIST_HEAD(again);
def596e9
JA
1798 bool spin;
1799 int ret;
1800
1801 /*
1802 * Only spin for completions if we don't have multiple devices hanging
1803 * off our complete list, and we're under the requested amount.
1804 */
1805 spin = !ctx->poll_multi_file && *nr_events < min;
1806
1807 ret = 0;
1808 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
9adbd45d 1809 struct kiocb *kiocb = &req->rw.kiocb;
def596e9
JA
1810
1811 /*
581f9810
BM
1812 * Move completed and retryable entries to our local lists.
1813 * If we find a request that requires polling, break out
1814 * and complete those lists first, if we have entries there.
def596e9 1815 */
65a6543d 1816 if (READ_ONCE(req->iopoll_completed)) {
def596e9
JA
1817 list_move_tail(&req->list, &done);
1818 continue;
1819 }
1820 if (!list_empty(&done))
1821 break;
1822
581f9810
BM
1823 if (req->result == -EAGAIN) {
1824 list_move_tail(&req->list, &again);
1825 continue;
1826 }
1827 if (!list_empty(&again))
1828 break;
1829
def596e9
JA
1830 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
1831 if (ret < 0)
1832 break;
1833
1834 if (ret && spin)
1835 spin = false;
1836 ret = 0;
1837 }
1838
1839 if (!list_empty(&done))
1840 io_iopoll_complete(ctx, nr_events, &done);
1841
581f9810
BM
1842 if (!list_empty(&again))
1843 io_iopoll_queue(&again);
1844
def596e9
JA
1845 return ret;
1846}
1847
1848/*
d195a66e 1849 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
def596e9
JA
1850 * non-spinning poll check - we'll still enter the driver poll loop, but only
1851 * as a non-spinning completion check.
1852 */
1853static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
1854 long min)
1855{
08f5439f 1856 while (!list_empty(&ctx->poll_list) && !need_resched()) {
def596e9
JA
1857 int ret;
1858
1859 ret = io_do_iopoll(ctx, nr_events, min);
1860 if (ret < 0)
1861 return ret;
1862 if (!min || *nr_events >= min)
1863 return 0;
1864 }
1865
1866 return 1;
1867}
1868
1869/*
1870 * We can't just wait for polled events to come to us, we have to actively
1871 * find and complete them.
1872 */
1873static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
1874{
1875 if (!(ctx->flags & IORING_SETUP_IOPOLL))
1876 return;
1877
1878 mutex_lock(&ctx->uring_lock);
1879 while (!list_empty(&ctx->poll_list)) {
1880 unsigned int nr_events = 0;
1881
1882 io_iopoll_getevents(ctx, &nr_events, 1);
08f5439f
JA
1883
1884 /*
1885 * Ensure we allow local-to-the-cpu processing to take place,
1886 * in this case we need to ensure that we reap all events.
1887 */
1888 cond_resched();
def596e9
JA
1889 }
1890 mutex_unlock(&ctx->uring_lock);
1891}
1892
c7849be9
XW
1893static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1894 long min)
def596e9 1895{
2b2ed975 1896 int iters = 0, ret = 0;
500f9fba 1897
c7849be9
XW
1898 /*
1899 * We disallow the app entering submit/complete with polling, but we
1900 * still need to lock the ring to prevent racing with polled issue
1901 * that got punted to a workqueue.
1902 */
1903 mutex_lock(&ctx->uring_lock);
def596e9
JA
1904 do {
1905 int tmin = 0;
1906
a3a0e43f
JA
1907 /*
1908 * Don't enter poll loop if we already have events pending.
1909 * If we do, we can potentially be spinning for commands that
1910 * already triggered a CQE (eg in error).
1911 */
1d7bb1d5 1912 if (io_cqring_events(ctx, false))
a3a0e43f
JA
1913 break;
1914
500f9fba
JA
1915 /*
1916 * If a submit got punted to a workqueue, we can have the
1917 * application entering polling for a command before it gets
1918 * issued. That app will hold the uring_lock for the duration
1919 * of the poll right here, so we need to take a breather every
1920 * now and then to ensure that the issue has a chance to add
1921 * the poll to the issued list. Otherwise we can spin here
1922 * forever, while the workqueue is stuck trying to acquire the
1923 * very same mutex.
1924 */
1925 if (!(++iters & 7)) {
1926 mutex_unlock(&ctx->uring_lock);
1927 mutex_lock(&ctx->uring_lock);
1928 }
1929
def596e9
JA
1930 if (*nr_events < min)
1931 tmin = min - *nr_events;
1932
1933 ret = io_iopoll_getevents(ctx, nr_events, tmin);
1934 if (ret <= 0)
1935 break;
1936 ret = 0;
1937 } while (min && !*nr_events && !need_resched());
1938
500f9fba 1939 mutex_unlock(&ctx->uring_lock);
def596e9
JA
1940 return ret;
1941}
1942
491381ce 1943static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 1944{
491381ce
JA
1945 /*
1946 * Tell lockdep we inherited freeze protection from submission
1947 * thread.
1948 */
1949 if (req->flags & REQ_F_ISREG) {
1950 struct inode *inode = file_inode(req->file);
2b188cc1 1951
491381ce 1952 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2b188cc1 1953 }
491381ce 1954 file_end_write(req->file);
2b188cc1
JA
1955}
1956
4e88d6e7
JA
1957static inline void req_set_fail_links(struct io_kiocb *req)
1958{
1959 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1960 req->flags |= REQ_F_FAIL_LINK;
1961}
1962
ba816ad6 1963static void io_complete_rw_common(struct kiocb *kiocb, long res)
2b188cc1 1964{
9adbd45d 1965 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
bcda7baa 1966 int cflags = 0;
2b188cc1 1967
491381ce
JA
1968 if (kiocb->ki_flags & IOCB_WRITE)
1969 kiocb_end_write(req);
2b188cc1 1970
4e88d6e7
JA
1971 if (res != req->result)
1972 req_set_fail_links(req);
bcda7baa
JA
1973 if (req->flags & REQ_F_BUFFER_SELECTED)
1974 cflags = io_put_kbuf(req);
1975 __io_cqring_add_event(req, res, cflags);
ba816ad6
JA
1976}
1977
1978static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
1979{
9adbd45d 1980 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6
JA
1981
1982 io_complete_rw_common(kiocb, res);
e65ef56d 1983 io_put_req(req);
2b188cc1
JA
1984}
1985
def596e9
JA
1986static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
1987{
9adbd45d 1988 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 1989
491381ce
JA
1990 if (kiocb->ki_flags & IOCB_WRITE)
1991 kiocb_end_write(req);
def596e9 1992
4e88d6e7
JA
1993 if (res != req->result)
1994 req_set_fail_links(req);
9e645e11 1995 req->result = res;
def596e9 1996 if (res != -EAGAIN)
65a6543d 1997 WRITE_ONCE(req->iopoll_completed, 1);
def596e9
JA
1998}
1999
2000/*
2001 * After the iocb has been issued, it's safe to be found on the poll list.
2002 * Adding the kiocb to the list AFTER submission ensures that we don't
2003 * find it from a io_iopoll_getevents() thread before the issuer is done
2004 * accessing the kiocb cookie.
2005 */
2006static void io_iopoll_req_issued(struct io_kiocb *req)
2007{
2008 struct io_ring_ctx *ctx = req->ctx;
2009
2010 /*
2011 * Track whether we have multiple files in our lists. This will impact
2012 * how we do polling eventually, not spinning if we're on potentially
2013 * different devices.
2014 */
2015 if (list_empty(&ctx->poll_list)) {
2016 ctx->poll_multi_file = false;
2017 } else if (!ctx->poll_multi_file) {
2018 struct io_kiocb *list_req;
2019
2020 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
2021 list);
9adbd45d 2022 if (list_req->file != req->file)
def596e9
JA
2023 ctx->poll_multi_file = true;
2024 }
2025
2026 /*
2027 * For fast devices, IO may have already completed. If it has, add
2028 * it to the front so we find it first.
2029 */
65a6543d 2030 if (READ_ONCE(req->iopoll_completed))
def596e9
JA
2031 list_add(&req->list, &ctx->poll_list);
2032 else
2033 list_add_tail(&req->list, &ctx->poll_list);
bdcd3eab
XW
2034
2035 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2036 wq_has_sleeper(&ctx->sqo_wait))
2037 wake_up(&ctx->sqo_wait);
def596e9
JA
2038}
2039
9f13c35b 2040static void __io_state_file_put(struct io_submit_state *state)
9a56a232 2041{
9f13c35b 2042 int diff = state->has_refs - state->used_refs;
9a56a232 2043
9f13c35b
PB
2044 if (diff)
2045 fput_many(state->file, diff);
2046 state->file = NULL;
2047}
2048
2049static inline void io_state_file_put(struct io_submit_state *state)
2050{
2051 if (state->file)
2052 __io_state_file_put(state);
9a56a232
JA
2053}
2054
2055/*
2056 * Get as many references to a file as we have IOs left in this submission,
2057 * assuming most submissions are for one file, or at least that each file
2058 * has more than one submission.
2059 */
8da11c19 2060static struct file *__io_file_get(struct io_submit_state *state, int fd)
9a56a232
JA
2061{
2062 if (!state)
2063 return fget(fd);
2064
2065 if (state->file) {
2066 if (state->fd == fd) {
2067 state->used_refs++;
2068 state->ios_left--;
2069 return state->file;
2070 }
9f13c35b 2071 __io_state_file_put(state);
9a56a232
JA
2072 }
2073 state->file = fget_many(fd, state->ios_left);
2074 if (!state->file)
2075 return NULL;
2076
2077 state->fd = fd;
2078 state->has_refs = state->ios_left;
2079 state->used_refs = 1;
2080 state->ios_left--;
2081 return state->file;
2082}
2083
2b188cc1
JA
2084/*
2085 * If we tracked the file through the SCM inflight mechanism, we could support
2086 * any file. For now, just ensure that anything potentially problematic is done
2087 * inline.
2088 */
af197f50 2089static bool io_file_supports_async(struct file *file, int rw)
2b188cc1
JA
2090{
2091 umode_t mode = file_inode(file)->i_mode;
2092
10d59345 2093 if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISSOCK(mode))
2b188cc1
JA
2094 return true;
2095 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
2096 return true;
2097
c5b85625
JA
2098 /* any ->read/write should understand O_NONBLOCK */
2099 if (file->f_flags & O_NONBLOCK)
2100 return true;
2101
af197f50
JA
2102 if (!(file->f_mode & FMODE_NOWAIT))
2103 return false;
2104
2105 if (rw == READ)
2106 return file->f_op->read_iter != NULL;
2107
2108 return file->f_op->write_iter != NULL;
2b188cc1
JA
2109}
2110
3529d8c2
JA
2111static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2112 bool force_nonblock)
2b188cc1 2113{
def596e9 2114 struct io_ring_ctx *ctx = req->ctx;
9adbd45d 2115 struct kiocb *kiocb = &req->rw.kiocb;
09bb8394
JA
2116 unsigned ioprio;
2117 int ret;
2b188cc1 2118
491381ce
JA
2119 if (S_ISREG(file_inode(req->file)->i_mode))
2120 req->flags |= REQ_F_ISREG;
2121
2b188cc1 2122 kiocb->ki_pos = READ_ONCE(sqe->off);
ba04291e
JA
2123 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
2124 req->flags |= REQ_F_CUR_POS;
2125 kiocb->ki_pos = req->file->f_pos;
2126 }
2b188cc1 2127 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
3e577dcd
PB
2128 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2129 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2130 if (unlikely(ret))
2131 return ret;
2b188cc1
JA
2132
2133 ioprio = READ_ONCE(sqe->ioprio);
2134 if (ioprio) {
2135 ret = ioprio_check_cap(ioprio);
2136 if (ret)
09bb8394 2137 return ret;
2b188cc1
JA
2138
2139 kiocb->ki_ioprio = ioprio;
2140 } else
2141 kiocb->ki_ioprio = get_current_ioprio();
2142
8449eeda 2143 /* don't allow async punt if RWF_NOWAIT was requested */
c5b85625 2144 if (kiocb->ki_flags & IOCB_NOWAIT)
8449eeda
SB
2145 req->flags |= REQ_F_NOWAIT;
2146
2147 if (force_nonblock)
2b188cc1 2148 kiocb->ki_flags |= IOCB_NOWAIT;
8449eeda 2149
def596e9 2150 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
2151 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2152 !kiocb->ki_filp->f_op->iopoll)
09bb8394 2153 return -EOPNOTSUPP;
2b188cc1 2154
def596e9
JA
2155 kiocb->ki_flags |= IOCB_HIPRI;
2156 kiocb->ki_complete = io_complete_rw_iopoll;
6873e0bd 2157 req->result = 0;
65a6543d 2158 req->iopoll_completed = 0;
def596e9 2159 } else {
09bb8394
JA
2160 if (kiocb->ki_flags & IOCB_HIPRI)
2161 return -EINVAL;
def596e9
JA
2162 kiocb->ki_complete = io_complete_rw;
2163 }
9adbd45d 2164
3529d8c2
JA
2165 req->rw.addr = READ_ONCE(sqe->addr);
2166 req->rw.len = READ_ONCE(sqe->len);
4f4eeba8 2167 req->buf_index = READ_ONCE(sqe->buf_index);
2b188cc1 2168 return 0;
2b188cc1
JA
2169}
2170
2171static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2172{
2173 switch (ret) {
2174 case -EIOCBQUEUED:
2175 break;
2176 case -ERESTARTSYS:
2177 case -ERESTARTNOINTR:
2178 case -ERESTARTNOHAND:
2179 case -ERESTART_RESTARTBLOCK:
2180 /*
2181 * We can't just restart the syscall, since previously
2182 * submitted sqes may already be in progress. Just fail this
2183 * IO with EINTR.
2184 */
2185 ret = -EINTR;
2186 /* fall through */
2187 default:
2188 kiocb->ki_complete(kiocb, ret, 0);
2189 }
2190}
2191
014db007 2192static void kiocb_done(struct kiocb *kiocb, ssize_t ret)
ba816ad6 2193{
ba04291e
JA
2194 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2195
2196 if (req->flags & REQ_F_CUR_POS)
2197 req->file->f_pos = kiocb->ki_pos;
bcaec089 2198 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
014db007 2199 io_complete_rw(kiocb, ret, 0);
ba816ad6
JA
2200 else
2201 io_rw_done(kiocb, ret);
2202}
2203
9adbd45d 2204static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
7d009165 2205 struct iov_iter *iter)
edafccee 2206{
9adbd45d
JA
2207 struct io_ring_ctx *ctx = req->ctx;
2208 size_t len = req->rw.len;
edafccee 2209 struct io_mapped_ubuf *imu;
4f4eeba8 2210 u16 index, buf_index;
edafccee
JA
2211 size_t offset;
2212 u64 buf_addr;
2213
2214 /* attempt to use fixed buffers without having provided iovecs */
2215 if (unlikely(!ctx->user_bufs))
2216 return -EFAULT;
2217
4f4eeba8 2218 buf_index = req->buf_index;
edafccee
JA
2219 if (unlikely(buf_index >= ctx->nr_user_bufs))
2220 return -EFAULT;
2221
2222 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2223 imu = &ctx->user_bufs[index];
9adbd45d 2224 buf_addr = req->rw.addr;
edafccee
JA
2225
2226 /* overflow */
2227 if (buf_addr + len < buf_addr)
2228 return -EFAULT;
2229 /* not inside the mapped region */
2230 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2231 return -EFAULT;
2232
2233 /*
2234 * May not be a start of buffer, set size appropriately
2235 * and advance us to the beginning.
2236 */
2237 offset = buf_addr - imu->ubuf;
2238 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
2239
2240 if (offset) {
2241 /*
2242 * Don't use iov_iter_advance() here, as it's really slow for
2243 * using the latter parts of a big fixed buffer - it iterates
2244 * over each segment manually. We can cheat a bit here, because
2245 * we know that:
2246 *
2247 * 1) it's a BVEC iter, we set it up
2248 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2249 * first and last bvec
2250 *
2251 * So just find our index, and adjust the iterator afterwards.
2252 * If the offset is within the first bvec (or the whole first
2253 * bvec, just use iov_iter_advance(). This makes it easier
2254 * since we can just skip the first segment, which may not
2255 * be PAGE_SIZE aligned.
2256 */
2257 const struct bio_vec *bvec = imu->bvec;
2258
2259 if (offset <= bvec->bv_len) {
2260 iov_iter_advance(iter, offset);
2261 } else {
2262 unsigned long seg_skip;
2263
2264 /* skip first vec */
2265 offset -= bvec->bv_len;
2266 seg_skip = 1 + (offset >> PAGE_SHIFT);
2267
2268 iter->bvec = bvec + seg_skip;
2269 iter->nr_segs -= seg_skip;
99c79f66 2270 iter->count -= bvec->bv_len + offset;
bd11b3a3 2271 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
2272 }
2273 }
2274
5e559561 2275 return len;
edafccee
JA
2276}
2277
bcda7baa
JA
2278static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2279{
2280 if (needs_lock)
2281 mutex_unlock(&ctx->uring_lock);
2282}
2283
2284static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2285{
2286 /*
2287 * "Normal" inline submissions always hold the uring_lock, since we
2288 * grab it from the system call. Same is true for the SQPOLL offload.
2289 * The only exception is when we've detached the request and issue it
2290 * from an async worker thread, grab the lock for that case.
2291 */
2292 if (needs_lock)
2293 mutex_lock(&ctx->uring_lock);
2294}
2295
2296static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2297 int bgid, struct io_buffer *kbuf,
2298 bool needs_lock)
2299{
2300 struct io_buffer *head;
2301
2302 if (req->flags & REQ_F_BUFFER_SELECTED)
2303 return kbuf;
2304
2305 io_ring_submit_lock(req->ctx, needs_lock);
2306
2307 lockdep_assert_held(&req->ctx->uring_lock);
2308
2309 head = idr_find(&req->ctx->io_buffer_idr, bgid);
2310 if (head) {
2311 if (!list_empty(&head->list)) {
2312 kbuf = list_last_entry(&head->list, struct io_buffer,
2313 list);
2314 list_del(&kbuf->list);
2315 } else {
2316 kbuf = head;
2317 idr_remove(&req->ctx->io_buffer_idr, bgid);
2318 }
2319 if (*len > kbuf->len)
2320 *len = kbuf->len;
2321 } else {
2322 kbuf = ERR_PTR(-ENOBUFS);
2323 }
2324
2325 io_ring_submit_unlock(req->ctx, needs_lock);
2326
2327 return kbuf;
2328}
2329
4d954c25
JA
2330static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2331 bool needs_lock)
2332{
2333 struct io_buffer *kbuf;
4f4eeba8 2334 u16 bgid;
4d954c25
JA
2335
2336 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
4f4eeba8 2337 bgid = req->buf_index;
4d954c25
JA
2338 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2339 if (IS_ERR(kbuf))
2340 return kbuf;
2341 req->rw.addr = (u64) (unsigned long) kbuf;
2342 req->flags |= REQ_F_BUFFER_SELECTED;
2343 return u64_to_user_ptr(kbuf->addr);
2344}
2345
2346#ifdef CONFIG_COMPAT
2347static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2348 bool needs_lock)
2349{
2350 struct compat_iovec __user *uiov;
2351 compat_ssize_t clen;
2352 void __user *buf;
2353 ssize_t len;
2354
2355 uiov = u64_to_user_ptr(req->rw.addr);
2356 if (!access_ok(uiov, sizeof(*uiov)))
2357 return -EFAULT;
2358 if (__get_user(clen, &uiov->iov_len))
2359 return -EFAULT;
2360 if (clen < 0)
2361 return -EINVAL;
2362
2363 len = clen;
2364 buf = io_rw_buffer_select(req, &len, needs_lock);
2365 if (IS_ERR(buf))
2366 return PTR_ERR(buf);
2367 iov[0].iov_base = buf;
2368 iov[0].iov_len = (compat_size_t) len;
2369 return 0;
2370}
2371#endif
2372
2373static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2374 bool needs_lock)
2375{
2376 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2377 void __user *buf;
2378 ssize_t len;
2379
2380 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2381 return -EFAULT;
2382
2383 len = iov[0].iov_len;
2384 if (len < 0)
2385 return -EINVAL;
2386 buf = io_rw_buffer_select(req, &len, needs_lock);
2387 if (IS_ERR(buf))
2388 return PTR_ERR(buf);
2389 iov[0].iov_base = buf;
2390 iov[0].iov_len = len;
2391 return 0;
2392}
2393
2394static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2395 bool needs_lock)
2396{
dddb3e26
JA
2397 if (req->flags & REQ_F_BUFFER_SELECTED) {
2398 struct io_buffer *kbuf;
2399
2400 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2401 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2402 iov[0].iov_len = kbuf->len;
4d954c25 2403 return 0;
dddb3e26 2404 }
4d954c25
JA
2405 if (!req->rw.len)
2406 return 0;
2407 else if (req->rw.len > 1)
2408 return -EINVAL;
2409
2410#ifdef CONFIG_COMPAT
2411 if (req->ctx->compat)
2412 return io_compat_import(req, iov, needs_lock);
2413#endif
2414
2415 return __io_iov_buffer_select(req, iov, needs_lock);
2416}
2417
cf6fd4bd 2418static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
bcda7baa
JA
2419 struct iovec **iovec, struct iov_iter *iter,
2420 bool needs_lock)
2b188cc1 2421{
9adbd45d
JA
2422 void __user *buf = u64_to_user_ptr(req->rw.addr);
2423 size_t sqe_len = req->rw.len;
4d954c25 2424 ssize_t ret;
edafccee
JA
2425 u8 opcode;
2426
d625c6ee 2427 opcode = req->opcode;
7d009165 2428 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
edafccee 2429 *iovec = NULL;
9adbd45d 2430 return io_import_fixed(req, rw, iter);
edafccee 2431 }
2b188cc1 2432
bcda7baa 2433 /* buffer index only valid with fixed read/write, or buffer select */
4f4eeba8 2434 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
9adbd45d
JA
2435 return -EINVAL;
2436
3a6820f2 2437 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
bcda7baa 2438 if (req->flags & REQ_F_BUFFER_SELECT) {
4d954c25
JA
2439 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
2440 if (IS_ERR(buf)) {
bcda7baa 2441 *iovec = NULL;
4d954c25 2442 return PTR_ERR(buf);
bcda7baa 2443 }
3f9d6441 2444 req->rw.len = sqe_len;
bcda7baa
JA
2445 }
2446
3a6820f2
JA
2447 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
2448 *iovec = NULL;
3a901598 2449 return ret < 0 ? ret : sqe_len;
3a6820f2
JA
2450 }
2451
f67676d1
JA
2452 if (req->io) {
2453 struct io_async_rw *iorw = &req->io->rw;
2454
2455 *iovec = iorw->iov;
2456 iov_iter_init(iter, rw, *iovec, iorw->nr_segs, iorw->size);
2457 if (iorw->iov == iorw->fast_iov)
2458 *iovec = NULL;
2459 return iorw->size;
2460 }
2461
4d954c25
JA
2462 if (req->flags & REQ_F_BUFFER_SELECT) {
2463 ret = io_iov_buffer_select(req, *iovec, needs_lock);
3f9d6441
JA
2464 if (!ret) {
2465 ret = (*iovec)->iov_len;
2466 iov_iter_init(iter, rw, *iovec, 1, ret);
2467 }
4d954c25
JA
2468 *iovec = NULL;
2469 return ret;
2470 }
2471
2b188cc1 2472#ifdef CONFIG_COMPAT
cf6fd4bd 2473 if (req->ctx->compat)
2b188cc1
JA
2474 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
2475 iovec, iter);
2476#endif
2477
2478 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
2479}
2480
31b51510 2481/*
32960613
JA
2482 * For files that don't have ->read_iter() and ->write_iter(), handle them
2483 * by looping over ->read() or ->write() manually.
31b51510 2484 */
32960613
JA
2485static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
2486 struct iov_iter *iter)
2487{
2488 ssize_t ret = 0;
2489
2490 /*
2491 * Don't support polled IO through this interface, and we can't
2492 * support non-blocking either. For the latter, this just causes
2493 * the kiocb to be handled from an async context.
2494 */
2495 if (kiocb->ki_flags & IOCB_HIPRI)
2496 return -EOPNOTSUPP;
2497 if (kiocb->ki_flags & IOCB_NOWAIT)
2498 return -EAGAIN;
2499
2500 while (iov_iter_count(iter)) {
311ae9e1 2501 struct iovec iovec;
32960613
JA
2502 ssize_t nr;
2503
311ae9e1
PB
2504 if (!iov_iter_is_bvec(iter)) {
2505 iovec = iov_iter_iovec(iter);
2506 } else {
2507 /* fixed buffers import bvec */
2508 iovec.iov_base = kmap(iter->bvec->bv_page)
2509 + iter->iov_offset;
2510 iovec.iov_len = min(iter->count,
2511 iter->bvec->bv_len - iter->iov_offset);
2512 }
2513
32960613
JA
2514 if (rw == READ) {
2515 nr = file->f_op->read(file, iovec.iov_base,
2516 iovec.iov_len, &kiocb->ki_pos);
2517 } else {
2518 nr = file->f_op->write(file, iovec.iov_base,
2519 iovec.iov_len, &kiocb->ki_pos);
2520 }
2521
311ae9e1
PB
2522 if (iov_iter_is_bvec(iter))
2523 kunmap(iter->bvec->bv_page);
2524
32960613
JA
2525 if (nr < 0) {
2526 if (!ret)
2527 ret = nr;
2528 break;
2529 }
2530 ret += nr;
2531 if (nr != iovec.iov_len)
2532 break;
2533 iov_iter_advance(iter, nr);
2534 }
2535
2536 return ret;
2537}
2538
b7bb4f7d 2539static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
f67676d1
JA
2540 struct iovec *iovec, struct iovec *fast_iov,
2541 struct iov_iter *iter)
2542{
2543 req->io->rw.nr_segs = iter->nr_segs;
2544 req->io->rw.size = io_size;
2545 req->io->rw.iov = iovec;
2546 if (!req->io->rw.iov) {
2547 req->io->rw.iov = req->io->rw.fast_iov;
45097dae
XW
2548 if (req->io->rw.iov != fast_iov)
2549 memcpy(req->io->rw.iov, fast_iov,
2550 sizeof(struct iovec) * iter->nr_segs);
99bc4c38
PB
2551 } else {
2552 req->flags |= REQ_F_NEED_CLEANUP;
f67676d1
JA
2553 }
2554}
2555
3d9932a8
XW
2556static inline int __io_alloc_async_ctx(struct io_kiocb *req)
2557{
2558 req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
2559 return req->io == NULL;
2560}
2561
b7bb4f7d 2562static int io_alloc_async_ctx(struct io_kiocb *req)
f67676d1 2563{
d3656344
JA
2564 if (!io_op_defs[req->opcode].async_ctx)
2565 return 0;
3d9932a8
XW
2566
2567 return __io_alloc_async_ctx(req);
b7bb4f7d
JA
2568}
2569
b7bb4f7d
JA
2570static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
2571 struct iovec *iovec, struct iovec *fast_iov,
2572 struct iov_iter *iter)
2573{
980ad263 2574 if (!io_op_defs[req->opcode].async_ctx)
74566df3 2575 return 0;
5d204bcf 2576 if (!req->io) {
3d9932a8 2577 if (__io_alloc_async_ctx(req))
5d204bcf 2578 return -ENOMEM;
b7bb4f7d 2579
5d204bcf
JA
2580 io_req_map_rw(req, io_size, iovec, fast_iov, iter);
2581 }
b7bb4f7d 2582 return 0;
f67676d1
JA
2583}
2584
3529d8c2
JA
2585static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2586 bool force_nonblock)
f67676d1 2587{
3529d8c2
JA
2588 struct io_async_ctx *io;
2589 struct iov_iter iter;
f67676d1
JA
2590 ssize_t ret;
2591
3529d8c2
JA
2592 ret = io_prep_rw(req, sqe, force_nonblock);
2593 if (ret)
2594 return ret;
f67676d1 2595
3529d8c2
JA
2596 if (unlikely(!(req->file->f_mode & FMODE_READ)))
2597 return -EBADF;
f67676d1 2598
5f798bea
PB
2599 /* either don't need iovec imported or already have it */
2600 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
3529d8c2
JA
2601 return 0;
2602
2603 io = req->io;
2604 io->rw.iov = io->rw.fast_iov;
2605 req->io = NULL;
bcda7baa 2606 ret = io_import_iovec(READ, req, &io->rw.iov, &iter, !force_nonblock);
3529d8c2
JA
2607 req->io = io;
2608 if (ret < 0)
2609 return ret;
2610
2611 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2612 return 0;
f67676d1
JA
2613}
2614
014db007 2615static int io_read(struct io_kiocb *req, bool force_nonblock)
2b188cc1
JA
2616{
2617 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 2618 struct kiocb *kiocb = &req->rw.kiocb;
2b188cc1 2619 struct iov_iter iter;
31b51510 2620 size_t iov_count;
f67676d1 2621 ssize_t io_size, ret;
2b188cc1 2622
bcda7baa 2623 ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
06b76d44
JA
2624 if (ret < 0)
2625 return ret;
2b188cc1 2626
fd6c2e4c
JA
2627 /* Ensure we clear previously set non-block flag */
2628 if (!force_nonblock)
29de5f6a 2629 kiocb->ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 2630
797f3f53 2631 req->result = 0;
f67676d1 2632 io_size = ret;
dea3b49c 2633 if (req->flags & REQ_F_LINK_HEAD)
f67676d1
JA
2634 req->result = io_size;
2635
2636 /*
2637 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2638 * we know to async punt it even if it was opened O_NONBLOCK
2639 */
af197f50 2640 if (force_nonblock && !io_file_supports_async(req->file, READ))
f67676d1 2641 goto copy_iov;
9e645e11 2642
31b51510 2643 iov_count = iov_iter_count(&iter);
9adbd45d 2644 ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
2b188cc1
JA
2645 if (!ret) {
2646 ssize_t ret2;
2647
9adbd45d
JA
2648 if (req->file->f_op->read_iter)
2649 ret2 = call_read_iter(req->file, kiocb, &iter);
32960613 2650 else
9adbd45d 2651 ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
32960613 2652
9d93a3f5 2653 /* Catch -EAGAIN return for forced non-blocking submission */
f67676d1 2654 if (!force_nonblock || ret2 != -EAGAIN) {
014db007 2655 kiocb_done(kiocb, ret2);
f67676d1
JA
2656 } else {
2657copy_iov:
b7bb4f7d 2658 ret = io_setup_async_rw(req, io_size, iovec,
f67676d1
JA
2659 inline_vecs, &iter);
2660 if (ret)
2661 goto out_free;
29de5f6a 2662 /* any defer here is final, must blocking retry */
490e8967
JA
2663 if (!(req->flags & REQ_F_NOWAIT) &&
2664 !file_can_poll(req->file))
29de5f6a 2665 req->flags |= REQ_F_MUST_PUNT;
f67676d1
JA
2666 return -EAGAIN;
2667 }
2b188cc1 2668 }
f67676d1 2669out_free:
1e95081c 2670 kfree(iovec);
99bc4c38 2671 req->flags &= ~REQ_F_NEED_CLEANUP;
2b188cc1
JA
2672 return ret;
2673}
2674
3529d8c2
JA
2675static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2676 bool force_nonblock)
f67676d1 2677{
3529d8c2
JA
2678 struct io_async_ctx *io;
2679 struct iov_iter iter;
f67676d1
JA
2680 ssize_t ret;
2681
3529d8c2
JA
2682 ret = io_prep_rw(req, sqe, force_nonblock);
2683 if (ret)
2684 return ret;
f67676d1 2685
3529d8c2
JA
2686 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
2687 return -EBADF;
f67676d1 2688
4ed734b0
JA
2689 req->fsize = rlimit(RLIMIT_FSIZE);
2690
5f798bea
PB
2691 /* either don't need iovec imported or already have it */
2692 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
3529d8c2
JA
2693 return 0;
2694
2695 io = req->io;
2696 io->rw.iov = io->rw.fast_iov;
2697 req->io = NULL;
bcda7baa 2698 ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter, !force_nonblock);
3529d8c2
JA
2699 req->io = io;
2700 if (ret < 0)
2701 return ret;
2702
2703 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2704 return 0;
f67676d1
JA
2705}
2706
014db007 2707static int io_write(struct io_kiocb *req, bool force_nonblock)
2b188cc1
JA
2708{
2709 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 2710 struct kiocb *kiocb = &req->rw.kiocb;
2b188cc1 2711 struct iov_iter iter;
31b51510 2712 size_t iov_count;
f67676d1 2713 ssize_t ret, io_size;
2b188cc1 2714
bcda7baa 2715 ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
06b76d44
JA
2716 if (ret < 0)
2717 return ret;
2b188cc1 2718
fd6c2e4c
JA
2719 /* Ensure we clear previously set non-block flag */
2720 if (!force_nonblock)
9adbd45d 2721 req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 2722
797f3f53 2723 req->result = 0;
f67676d1 2724 io_size = ret;
dea3b49c 2725 if (req->flags & REQ_F_LINK_HEAD)
f67676d1 2726 req->result = io_size;
9e645e11 2727
f67676d1
JA
2728 /*
2729 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2730 * we know to async punt it even if it was opened O_NONBLOCK
2731 */
af197f50 2732 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
f67676d1 2733 goto copy_iov;
31b51510 2734
10d59345
JA
2735 /* file path doesn't support NOWAIT for non-direct_IO */
2736 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
2737 (req->flags & REQ_F_ISREG))
f67676d1 2738 goto copy_iov;
31b51510 2739
f67676d1 2740 iov_count = iov_iter_count(&iter);
9adbd45d 2741 ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
2b188cc1 2742 if (!ret) {
9bf7933f
RP
2743 ssize_t ret2;
2744
2b188cc1
JA
2745 /*
2746 * Open-code file_start_write here to grab freeze protection,
2747 * which will be released by another thread in
2748 * io_complete_rw(). Fool lockdep by telling it the lock got
2749 * released so that it doesn't complain about the held lock when
2750 * we return to userspace.
2751 */
491381ce 2752 if (req->flags & REQ_F_ISREG) {
9adbd45d 2753 __sb_start_write(file_inode(req->file)->i_sb,
2b188cc1 2754 SB_FREEZE_WRITE, true);
9adbd45d 2755 __sb_writers_release(file_inode(req->file)->i_sb,
2b188cc1
JA
2756 SB_FREEZE_WRITE);
2757 }
2758 kiocb->ki_flags |= IOCB_WRITE;
9bf7933f 2759
4ed734b0
JA
2760 if (!force_nonblock)
2761 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
2762
9adbd45d
JA
2763 if (req->file->f_op->write_iter)
2764 ret2 = call_write_iter(req->file, kiocb, &iter);
32960613 2765 else
9adbd45d 2766 ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
4ed734b0
JA
2767
2768 if (!force_nonblock)
2769 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
2770
faac996c 2771 /*
bff6035d 2772 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
faac996c
JA
2773 * retry them without IOCB_NOWAIT.
2774 */
2775 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
2776 ret2 = -EAGAIN;
f67676d1 2777 if (!force_nonblock || ret2 != -EAGAIN) {
014db007 2778 kiocb_done(kiocb, ret2);
f67676d1
JA
2779 } else {
2780copy_iov:
b7bb4f7d 2781 ret = io_setup_async_rw(req, io_size, iovec,
f67676d1
JA
2782 inline_vecs, &iter);
2783 if (ret)
2784 goto out_free;
29de5f6a 2785 /* any defer here is final, must blocking retry */
c5b85625
JA
2786 if (!(req->flags & REQ_F_NOWAIT) &&
2787 !file_can_poll(req->file))
490e8967 2788 req->flags |= REQ_F_MUST_PUNT;
f67676d1
JA
2789 return -EAGAIN;
2790 }
2b188cc1 2791 }
31b51510 2792out_free:
99bc4c38 2793 req->flags &= ~REQ_F_NEED_CLEANUP;
1e95081c 2794 kfree(iovec);
2b188cc1
JA
2795 return ret;
2796}
2797
f2a8d5c7
PB
2798static int __io_splice_prep(struct io_kiocb *req,
2799 const struct io_uring_sqe *sqe)
7d67af2c
PB
2800{
2801 struct io_splice* sp = &req->splice;
2802 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
2803 int ret;
2804
2805 if (req->flags & REQ_F_NEED_CLEANUP)
2806 return 0;
3232dd02
PB
2807 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
2808 return -EINVAL;
7d67af2c
PB
2809
2810 sp->file_in = NULL;
7d67af2c
PB
2811 sp->len = READ_ONCE(sqe->len);
2812 sp->flags = READ_ONCE(sqe->splice_flags);
2813
2814 if (unlikely(sp->flags & ~valid_flags))
2815 return -EINVAL;
2816
2817 ret = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), &sp->file_in,
2818 (sp->flags & SPLICE_F_FD_IN_FIXED));
2819 if (ret)
2820 return ret;
2821 req->flags |= REQ_F_NEED_CLEANUP;
2822
7cdaf587
XW
2823 if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
2824 /*
2825 * Splice operation will be punted aync, and here need to
2826 * modify io_wq_work.flags, so initialize io_wq_work firstly.
2827 */
2828 io_req_init_async(req);
7d67af2c 2829 req->work.flags |= IO_WQ_WORK_UNBOUND;
7cdaf587 2830 }
7d67af2c
PB
2831
2832 return 0;
2833}
2834
f2a8d5c7
PB
2835static int io_tee_prep(struct io_kiocb *req,
2836 const struct io_uring_sqe *sqe)
2837{
2838 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
2839 return -EINVAL;
2840 return __io_splice_prep(req, sqe);
2841}
2842
2843static int io_tee(struct io_kiocb *req, bool force_nonblock)
2844{
2845 struct io_splice *sp = &req->splice;
2846 struct file *in = sp->file_in;
2847 struct file *out = sp->file_out;
2848 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
2849 long ret = 0;
2850
2851 if (force_nonblock)
2852 return -EAGAIN;
2853 if (sp->len)
2854 ret = do_tee(in, out, sp->len, flags);
2855
2856 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
2857 req->flags &= ~REQ_F_NEED_CLEANUP;
2858
2859 io_cqring_add_event(req, ret);
2860 if (ret != sp->len)
2861 req_set_fail_links(req);
2862 io_put_req(req);
2863 return 0;
2864}
2865
2866static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2867{
2868 struct io_splice* sp = &req->splice;
2869
2870 sp->off_in = READ_ONCE(sqe->splice_off_in);
2871 sp->off_out = READ_ONCE(sqe->off);
2872 return __io_splice_prep(req, sqe);
2873}
2874
014db007 2875static int io_splice(struct io_kiocb *req, bool force_nonblock)
7d67af2c
PB
2876{
2877 struct io_splice *sp = &req->splice;
2878 struct file *in = sp->file_in;
2879 struct file *out = sp->file_out;
2880 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
2881 loff_t *poff_in, *poff_out;
c9687426 2882 long ret = 0;
7d67af2c 2883
2fb3e822
PB
2884 if (force_nonblock)
2885 return -EAGAIN;
7d67af2c
PB
2886
2887 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
2888 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
c9687426 2889
948a7749 2890 if (sp->len)
c9687426 2891 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
7d67af2c
PB
2892
2893 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
2894 req->flags &= ~REQ_F_NEED_CLEANUP;
2895
2896 io_cqring_add_event(req, ret);
2897 if (ret != sp->len)
2898 req_set_fail_links(req);
014db007 2899 io_put_req(req);
7d67af2c
PB
2900 return 0;
2901}
2902
2b188cc1
JA
2903/*
2904 * IORING_OP_NOP just posts a completion event, nothing else.
2905 */
78e19bbe 2906static int io_nop(struct io_kiocb *req)
2b188cc1
JA
2907{
2908 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 2909
def596e9
JA
2910 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2911 return -EINVAL;
2912
78e19bbe 2913 io_cqring_add_event(req, 0);
e65ef56d 2914 io_put_req(req);
2b188cc1
JA
2915 return 0;
2916}
2917
3529d8c2 2918static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 2919{
6b06314c 2920 struct io_ring_ctx *ctx = req->ctx;
c992fe29 2921
09bb8394
JA
2922 if (!req->file)
2923 return -EBADF;
c992fe29 2924
6b06314c 2925 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 2926 return -EINVAL;
edafccee 2927 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
2928 return -EINVAL;
2929
8ed8d3c3
JA
2930 req->sync.flags = READ_ONCE(sqe->fsync_flags);
2931 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
2932 return -EINVAL;
2933
2934 req->sync.off = READ_ONCE(sqe->off);
2935 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
2936 return 0;
2937}
2938
ac45abc0 2939static int io_fsync(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3 2940{
8ed8d3c3 2941 loff_t end = req->sync.off + req->sync.len;
8ed8d3c3
JA
2942 int ret;
2943
ac45abc0
PB
2944 /* fsync always requires a blocking context */
2945 if (force_nonblock)
2946 return -EAGAIN;
2947
9adbd45d 2948 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
2949 end > 0 ? end : LLONG_MAX,
2950 req->sync.flags & IORING_FSYNC_DATASYNC);
2951 if (ret < 0)
2952 req_set_fail_links(req);
2953 io_cqring_add_event(req, ret);
014db007 2954 io_put_req(req);
c992fe29
CH
2955 return 0;
2956}
2957
d63d1b5e
JA
2958static int io_fallocate_prep(struct io_kiocb *req,
2959 const struct io_uring_sqe *sqe)
2960{
2961 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
2962 return -EINVAL;
3232dd02
PB
2963 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
2964 return -EINVAL;
d63d1b5e
JA
2965
2966 req->sync.off = READ_ONCE(sqe->off);
2967 req->sync.len = READ_ONCE(sqe->addr);
2968 req->sync.mode = READ_ONCE(sqe->len);
4ed734b0 2969 req->fsize = rlimit(RLIMIT_FSIZE);
d63d1b5e
JA
2970 return 0;
2971}
2972
014db007 2973static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
5d17b4a4 2974{
ac45abc0
PB
2975 int ret;
2976
d63d1b5e 2977 /* fallocate always requiring blocking context */
ac45abc0 2978 if (force_nonblock)
5d17b4a4
JA
2979 return -EAGAIN;
2980
ac45abc0
PB
2981 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
2982 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
2983 req->sync.len);
2984 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
2985 if (ret < 0)
2986 req_set_fail_links(req);
2987 io_cqring_add_event(req, ret);
2988 io_put_req(req);
5d17b4a4
JA
2989 return 0;
2990}
2991
ec65fea5 2992static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 2993{
f8748881 2994 const char __user *fname;
15b71abe 2995 int ret;
b7bb4f7d 2996
3232dd02 2997 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
15b71abe 2998 return -EINVAL;
ec65fea5 2999 if (unlikely(sqe->ioprio || sqe->buf_index))
15b71abe 3000 return -EINVAL;
ec65fea5 3001 if (unlikely(req->flags & REQ_F_FIXED_FILE))
cf3040ca 3002 return -EBADF;
03b1230c 3003
ec65fea5
PB
3004 /* open.how should be already initialised */
3005 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
08a1d26e 3006 req->open.how.flags |= O_LARGEFILE;
3529d8c2 3007
25e72d10
PB
3008 req->open.dfd = READ_ONCE(sqe->fd);
3009 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
f8748881 3010 req->open.filename = getname(fname);
15b71abe
JA
3011 if (IS_ERR(req->open.filename)) {
3012 ret = PTR_ERR(req->open.filename);
3013 req->open.filename = NULL;
3014 return ret;
3015 }
4022e7af 3016 req->open.nofile = rlimit(RLIMIT_NOFILE);
8fef80bf 3017 req->flags |= REQ_F_NEED_CLEANUP;
15b71abe 3018 return 0;
03b1230c
JA
3019}
3020
ec65fea5
PB
3021static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3022{
3023 u64 flags, mode;
3024
3025 if (req->flags & REQ_F_NEED_CLEANUP)
3026 return 0;
3027 mode = READ_ONCE(sqe->len);
3028 flags = READ_ONCE(sqe->open_flags);
3029 req->open.how = build_open_how(flags, mode);
3030 return __io_openat_prep(req, sqe);
3031}
3032
cebdb986 3033static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 3034{
cebdb986 3035 struct open_how __user *how;
cebdb986 3036 size_t len;
0fa03c62
JA
3037 int ret;
3038
0bdbdd08
PB
3039 if (req->flags & REQ_F_NEED_CLEANUP)
3040 return 0;
cebdb986
JA
3041 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3042 len = READ_ONCE(sqe->len);
cebdb986
JA
3043 if (len < OPEN_HOW_SIZE_VER0)
3044 return -EINVAL;
3529d8c2 3045
cebdb986
JA
3046 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3047 len);
3048 if (ret)
3049 return ret;
3529d8c2 3050
ec65fea5 3051 return __io_openat_prep(req, sqe);
cebdb986
JA
3052}
3053
014db007 3054static int io_openat2(struct io_kiocb *req, bool force_nonblock)
15b71abe
JA
3055{
3056 struct open_flags op;
15b71abe
JA
3057 struct file *file;
3058 int ret;
3059
f86cd20c 3060 if (force_nonblock)
15b71abe 3061 return -EAGAIN;
15b71abe 3062
cebdb986 3063 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
3064 if (ret)
3065 goto err;
3066
4022e7af 3067 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
15b71abe
JA
3068 if (ret < 0)
3069 goto err;
3070
3071 file = do_filp_open(req->open.dfd, req->open.filename, &op);
3072 if (IS_ERR(file)) {
3073 put_unused_fd(ret);
3074 ret = PTR_ERR(file);
3075 } else {
3076 fsnotify_open(file);
3077 fd_install(ret, file);
3078 }
3079err:
3080 putname(req->open.filename);
8fef80bf 3081 req->flags &= ~REQ_F_NEED_CLEANUP;
15b71abe
JA
3082 if (ret < 0)
3083 req_set_fail_links(req);
3084 io_cqring_add_event(req, ret);
014db007 3085 io_put_req(req);
15b71abe
JA
3086 return 0;
3087}
3088
014db007 3089static int io_openat(struct io_kiocb *req, bool force_nonblock)
cebdb986 3090{
014db007 3091 return io_openat2(req, force_nonblock);
cebdb986
JA
3092}
3093
067524e9
JA
3094static int io_remove_buffers_prep(struct io_kiocb *req,
3095 const struct io_uring_sqe *sqe)
3096{
3097 struct io_provide_buf *p = &req->pbuf;
3098 u64 tmp;
3099
3100 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3101 return -EINVAL;
3102
3103 tmp = READ_ONCE(sqe->fd);
3104 if (!tmp || tmp > USHRT_MAX)
3105 return -EINVAL;
3106
3107 memset(p, 0, sizeof(*p));
3108 p->nbufs = tmp;
3109 p->bgid = READ_ONCE(sqe->buf_group);
3110 return 0;
3111}
3112
3113static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3114 int bgid, unsigned nbufs)
3115{
3116 unsigned i = 0;
3117
3118 /* shouldn't happen */
3119 if (!nbufs)
3120 return 0;
3121
3122 /* the head kbuf is the list itself */
3123 while (!list_empty(&buf->list)) {
3124 struct io_buffer *nxt;
3125
3126 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3127 list_del(&nxt->list);
3128 kfree(nxt);
3129 if (++i == nbufs)
3130 return i;
3131 }
3132 i++;
3133 kfree(buf);
3134 idr_remove(&ctx->io_buffer_idr, bgid);
3135
3136 return i;
3137}
3138
3139static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock)
3140{
3141 struct io_provide_buf *p = &req->pbuf;
3142 struct io_ring_ctx *ctx = req->ctx;
3143 struct io_buffer *head;
3144 int ret = 0;
3145
3146 io_ring_submit_lock(ctx, !force_nonblock);
3147
3148 lockdep_assert_held(&ctx->uring_lock);
3149
3150 ret = -ENOENT;
3151 head = idr_find(&ctx->io_buffer_idr, p->bgid);
3152 if (head)
3153 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
3154
3155 io_ring_submit_lock(ctx, !force_nonblock);
3156 if (ret < 0)
3157 req_set_fail_links(req);
3158 io_cqring_add_event(req, ret);
3159 io_put_req(req);
3160 return 0;
3161}
3162
ddf0322d
JA
3163static int io_provide_buffers_prep(struct io_kiocb *req,
3164 const struct io_uring_sqe *sqe)
3165{
3166 struct io_provide_buf *p = &req->pbuf;
3167 u64 tmp;
3168
3169 if (sqe->ioprio || sqe->rw_flags)
3170 return -EINVAL;
3171
3172 tmp = READ_ONCE(sqe->fd);
3173 if (!tmp || tmp > USHRT_MAX)
3174 return -E2BIG;
3175 p->nbufs = tmp;
3176 p->addr = READ_ONCE(sqe->addr);
3177 p->len = READ_ONCE(sqe->len);
3178
efe68c1c 3179 if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
ddf0322d
JA
3180 return -EFAULT;
3181
3182 p->bgid = READ_ONCE(sqe->buf_group);
3183 tmp = READ_ONCE(sqe->off);
3184 if (tmp > USHRT_MAX)
3185 return -E2BIG;
3186 p->bid = tmp;
3187 return 0;
3188}
3189
3190static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3191{
3192 struct io_buffer *buf;
3193 u64 addr = pbuf->addr;
3194 int i, bid = pbuf->bid;
3195
3196 for (i = 0; i < pbuf->nbufs; i++) {
3197 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3198 if (!buf)
3199 break;
3200
3201 buf->addr = addr;
3202 buf->len = pbuf->len;
3203 buf->bid = bid;
3204 addr += pbuf->len;
3205 bid++;
3206 if (!*head) {
3207 INIT_LIST_HEAD(&buf->list);
3208 *head = buf;
3209 } else {
3210 list_add_tail(&buf->list, &(*head)->list);
3211 }
3212 }
3213
3214 return i ? i : -ENOMEM;
3215}
3216
ddf0322d
JA
3217static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock)
3218{
3219 struct io_provide_buf *p = &req->pbuf;
3220 struct io_ring_ctx *ctx = req->ctx;
3221 struct io_buffer *head, *list;
3222 int ret = 0;
3223
3224 io_ring_submit_lock(ctx, !force_nonblock);
3225
3226 lockdep_assert_held(&ctx->uring_lock);
3227
3228 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
3229
3230 ret = io_add_buffers(p, &head);
3231 if (ret < 0)
3232 goto out;
3233
3234 if (!list) {
3235 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
3236 GFP_KERNEL);
3237 if (ret < 0) {
067524e9 3238 __io_remove_buffers(ctx, head, p->bgid, -1U);
ddf0322d
JA
3239 goto out;
3240 }
3241 }
3242out:
3243 io_ring_submit_unlock(ctx, !force_nonblock);
3244 if (ret < 0)
3245 req_set_fail_links(req);
3246 io_cqring_add_event(req, ret);
3247 io_put_req(req);
3248 return 0;
cebdb986
JA
3249}
3250
3e4827b0
JA
3251static int io_epoll_ctl_prep(struct io_kiocb *req,
3252 const struct io_uring_sqe *sqe)
3253{
3254#if defined(CONFIG_EPOLL)
3255 if (sqe->ioprio || sqe->buf_index)
3256 return -EINVAL;
3232dd02
PB
3257 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3258 return -EINVAL;
3e4827b0
JA
3259
3260 req->epoll.epfd = READ_ONCE(sqe->fd);
3261 req->epoll.op = READ_ONCE(sqe->len);
3262 req->epoll.fd = READ_ONCE(sqe->off);
3263
3264 if (ep_op_has_event(req->epoll.op)) {
3265 struct epoll_event __user *ev;
3266
3267 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
3268 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
3269 return -EFAULT;
3270 }
3271
3272 return 0;
3273#else
3274 return -EOPNOTSUPP;
3275#endif
3276}
3277
014db007 3278static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock)
3e4827b0
JA
3279{
3280#if defined(CONFIG_EPOLL)
3281 struct io_epoll *ie = &req->epoll;
3282 int ret;
3283
3284 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
3285 if (force_nonblock && ret == -EAGAIN)
3286 return -EAGAIN;
3287
3288 if (ret < 0)
3289 req_set_fail_links(req);
3290 io_cqring_add_event(req, ret);
014db007 3291 io_put_req(req);
3e4827b0
JA
3292 return 0;
3293#else
3294 return -EOPNOTSUPP;
3295#endif
3296}
3297
c1ca757b
JA
3298static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3299{
3300#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3301 if (sqe->ioprio || sqe->buf_index || sqe->off)
3302 return -EINVAL;
3232dd02
PB
3303 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3304 return -EINVAL;
c1ca757b
JA
3305
3306 req->madvise.addr = READ_ONCE(sqe->addr);
3307 req->madvise.len = READ_ONCE(sqe->len);
3308 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
3309 return 0;
3310#else
3311 return -EOPNOTSUPP;
3312#endif
3313}
3314
014db007 3315static int io_madvise(struct io_kiocb *req, bool force_nonblock)
c1ca757b
JA
3316{
3317#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3318 struct io_madvise *ma = &req->madvise;
3319 int ret;
3320
3321 if (force_nonblock)
3322 return -EAGAIN;
3323
3324 ret = do_madvise(ma->addr, ma->len, ma->advice);
3325 if (ret < 0)
3326 req_set_fail_links(req);
3327 io_cqring_add_event(req, ret);
014db007 3328 io_put_req(req);
c1ca757b
JA
3329 return 0;
3330#else
3331 return -EOPNOTSUPP;
3332#endif
3333}
3334
4840e418
JA
3335static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3336{
3337 if (sqe->ioprio || sqe->buf_index || sqe->addr)
3338 return -EINVAL;
3232dd02
PB
3339 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3340 return -EINVAL;
4840e418
JA
3341
3342 req->fadvise.offset = READ_ONCE(sqe->off);
3343 req->fadvise.len = READ_ONCE(sqe->len);
3344 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
3345 return 0;
3346}
3347
014db007 3348static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
4840e418
JA
3349{
3350 struct io_fadvise *fa = &req->fadvise;
3351 int ret;
3352
3e69426d
JA
3353 if (force_nonblock) {
3354 switch (fa->advice) {
3355 case POSIX_FADV_NORMAL:
3356 case POSIX_FADV_RANDOM:
3357 case POSIX_FADV_SEQUENTIAL:
3358 break;
3359 default:
3360 return -EAGAIN;
3361 }
3362 }
4840e418
JA
3363
3364 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
3365 if (ret < 0)
3366 req_set_fail_links(req);
3367 io_cqring_add_event(req, ret);
014db007 3368 io_put_req(req);
4840e418
JA
3369 return 0;
3370}
3371
eddc7ef5
JA
3372static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3373{
3232dd02
PB
3374 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3375 return -EINVAL;
eddc7ef5
JA
3376 if (sqe->ioprio || sqe->buf_index)
3377 return -EINVAL;
9c280f90 3378 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 3379 return -EBADF;
eddc7ef5 3380
1d9e1288
BM
3381 req->statx.dfd = READ_ONCE(sqe->fd);
3382 req->statx.mask = READ_ONCE(sqe->len);
e62753e4 3383 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
1d9e1288
BM
3384 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3385 req->statx.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5
JA
3386
3387 return 0;
3388}
3389
014db007 3390static int io_statx(struct io_kiocb *req, bool force_nonblock)
eddc7ef5 3391{
1d9e1288 3392 struct io_statx *ctx = &req->statx;
eddc7ef5
JA
3393 int ret;
3394
5b0bbee4
JA
3395 if (force_nonblock) {
3396 /* only need file table for an actual valid fd */
3397 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
3398 req->flags |= REQ_F_NO_FILE_TABLE;
eddc7ef5 3399 return -EAGAIN;
5b0bbee4 3400 }
eddc7ef5 3401
e62753e4
BM
3402 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
3403 ctx->buffer);
eddc7ef5 3404
eddc7ef5
JA
3405 if (ret < 0)
3406 req_set_fail_links(req);
3407 io_cqring_add_event(req, ret);
014db007 3408 io_put_req(req);
eddc7ef5
JA
3409 return 0;
3410}
3411
b5dba59e
JA
3412static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3413{
3414 /*
3415 * If we queue this for async, it must not be cancellable. That would
7cdaf587
XW
3416 * leave the 'file' in an undeterminate state, and here need to modify
3417 * io_wq_work.flags, so initialize io_wq_work firstly.
b5dba59e 3418 */
7cdaf587 3419 io_req_init_async(req);
b5dba59e
JA
3420 req->work.flags |= IO_WQ_WORK_NO_CANCEL;
3421
3232dd02
PB
3422 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3423 return -EINVAL;
b5dba59e
JA
3424 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
3425 sqe->rw_flags || sqe->buf_index)
3426 return -EINVAL;
9c280f90 3427 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 3428 return -EBADF;
b5dba59e
JA
3429
3430 req->close.fd = READ_ONCE(sqe->fd);
fd2206e4
JA
3431 if ((req->file && req->file->f_op == &io_uring_fops) ||
3432 req->close.fd == req->ctx->ring_fd)
3433 return -EBADF;
b5dba59e 3434
3af73b28 3435 req->close.put_file = NULL;
b5dba59e 3436 return 0;
b5dba59e
JA
3437}
3438
014db007 3439static int io_close(struct io_kiocb *req, bool force_nonblock)
b5dba59e 3440{
3af73b28 3441 struct io_close *close = &req->close;
b5dba59e
JA
3442 int ret;
3443
3af73b28
PB
3444 /* might be already done during nonblock submission */
3445 if (!close->put_file) {
3446 ret = __close_fd_get_file(close->fd, &close->put_file);
3447 if (ret < 0)
3448 return (ret == -ENOENT) ? -EBADF : ret;
3449 }
b5dba59e
JA
3450
3451 /* if the file has a flush method, be safe and punt to async */
3af73b28 3452 if (close->put_file->f_op->flush && force_nonblock) {
0bf0eefd
PB
3453 /* avoid grabbing files - we don't need the files */
3454 req->flags |= REQ_F_NO_FILE_TABLE | REQ_F_MUST_PUNT;
0bf0eefd 3455 return -EAGAIN;
a2100672 3456 }
b5dba59e 3457
3af73b28
PB
3458 /* No ->flush() or already async, safely close from here */
3459 ret = filp_close(close->put_file, req->work.files);
3460 if (ret < 0)
3461 req_set_fail_links(req);
3462 io_cqring_add_event(req, ret);
3463 fput(close->put_file);
3464 close->put_file = NULL;
3465 io_put_req(req);
1a417f4e 3466 return 0;
b5dba59e
JA
3467}
3468
3529d8c2 3469static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4
JA
3470{
3471 struct io_ring_ctx *ctx = req->ctx;
5d17b4a4
JA
3472
3473 if (!req->file)
3474 return -EBADF;
5d17b4a4
JA
3475
3476 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3477 return -EINVAL;
3478 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
3479 return -EINVAL;
3480
8ed8d3c3
JA
3481 req->sync.off = READ_ONCE(sqe->off);
3482 req->sync.len = READ_ONCE(sqe->len);
3483 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
3484 return 0;
3485}
3486
ac45abc0 3487static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3 3488{
8ed8d3c3
JA
3489 int ret;
3490
ac45abc0
PB
3491 /* sync_file_range always requires a blocking context */
3492 if (force_nonblock)
3493 return -EAGAIN;
3494
9adbd45d 3495 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3
JA
3496 req->sync.flags);
3497 if (ret < 0)
3498 req_set_fail_links(req);
3499 io_cqring_add_event(req, ret);
014db007 3500 io_put_req(req);
5d17b4a4
JA
3501 return 0;
3502}
3503
469956e8 3504#if defined(CONFIG_NET)
02d27d89
PB
3505static int io_setup_async_msg(struct io_kiocb *req,
3506 struct io_async_msghdr *kmsg)
3507{
3508 if (req->io)
3509 return -EAGAIN;
3510 if (io_alloc_async_ctx(req)) {
3511 if (kmsg->iov != kmsg->fast_iov)
3512 kfree(kmsg->iov);
3513 return -ENOMEM;
3514 }
3515 req->flags |= REQ_F_NEED_CLEANUP;
3516 memcpy(&req->io->msg, kmsg, sizeof(*kmsg));
3517 return -EAGAIN;
3518}
3519
3529d8c2 3520static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 3521{
e47293fd 3522 struct io_sr_msg *sr = &req->sr_msg;
3529d8c2 3523 struct io_async_ctx *io = req->io;
99bc4c38 3524 int ret;
03b1230c 3525
d2b6f48b
PB
3526 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3527 return -EINVAL;
3528
e47293fd
JA
3529 sr->msg_flags = READ_ONCE(sqe->msg_flags);
3530 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 3531 sr->len = READ_ONCE(sqe->len);
3529d8c2 3532
d8768362
JA
3533#ifdef CONFIG_COMPAT
3534 if (req->ctx->compat)
3535 sr->msg_flags |= MSG_CMSG_COMPAT;
3536#endif
3537
fddaface 3538 if (!io || req->opcode == IORING_OP_SEND)
3529d8c2 3539 return 0;
5f798bea
PB
3540 /* iovec is already imported */
3541 if (req->flags & REQ_F_NEED_CLEANUP)
3542 return 0;
3529d8c2 3543
d9688565 3544 io->msg.iov = io->msg.fast_iov;
99bc4c38 3545 ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
e47293fd 3546 &io->msg.iov);
99bc4c38
PB
3547 if (!ret)
3548 req->flags |= REQ_F_NEED_CLEANUP;
3549 return ret;
03b1230c
JA
3550}
3551
014db007 3552static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
aa1fa28f 3553{
0b416c3e 3554 struct io_async_msghdr *kmsg = NULL;
0fa03c62
JA
3555 struct socket *sock;
3556 int ret;
3557
0fa03c62
JA
3558 sock = sock_from_file(req->file, &ret);
3559 if (sock) {
b7bb4f7d 3560 struct io_async_ctx io;
0fa03c62
JA
3561 unsigned flags;
3562
03b1230c 3563 if (req->io) {
0b416c3e 3564 kmsg = &req->io->msg;
b537916c 3565 kmsg->msg.msg_name = &req->io->msg.addr;
0b416c3e
JA
3566 /* if iov is set, it's allocated already */
3567 if (!kmsg->iov)
3568 kmsg->iov = kmsg->fast_iov;
3569 kmsg->msg.msg_iter.iov = kmsg->iov;
03b1230c 3570 } else {
3529d8c2
JA
3571 struct io_sr_msg *sr = &req->sr_msg;
3572
0b416c3e 3573 kmsg = &io.msg;
b537916c 3574 kmsg->msg.msg_name = &io.msg.addr;
3529d8c2
JA
3575
3576 io.msg.iov = io.msg.fast_iov;
3577 ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
3578 sr->msg_flags, &io.msg.iov);
03b1230c 3579 if (ret)
3529d8c2 3580 return ret;
03b1230c 3581 }
0fa03c62 3582
e47293fd
JA
3583 flags = req->sr_msg.msg_flags;
3584 if (flags & MSG_DONTWAIT)
3585 req->flags |= REQ_F_NOWAIT;
3586 else if (force_nonblock)
3587 flags |= MSG_DONTWAIT;
3588
0b416c3e 3589 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
02d27d89
PB
3590 if (force_nonblock && ret == -EAGAIN)
3591 return io_setup_async_msg(req, kmsg);
441cdbd5
JA
3592 if (ret == -ERESTARTSYS)
3593 ret = -EINTR;
0fa03c62
JA
3594 }
3595
1e95081c 3596 if (kmsg && kmsg->iov != kmsg->fast_iov)
0b416c3e 3597 kfree(kmsg->iov);
99bc4c38 3598 req->flags &= ~REQ_F_NEED_CLEANUP;
78e19bbe 3599 io_cqring_add_event(req, ret);
4e88d6e7
JA
3600 if (ret < 0)
3601 req_set_fail_links(req);
014db007 3602 io_put_req(req);
5d17b4a4 3603 return 0;
03b1230c 3604}
aa1fa28f 3605
014db007 3606static int io_send(struct io_kiocb *req, bool force_nonblock)
fddaface 3607{
fddaface
JA
3608 struct socket *sock;
3609 int ret;
3610
fddaface
JA
3611 sock = sock_from_file(req->file, &ret);
3612 if (sock) {
3613 struct io_sr_msg *sr = &req->sr_msg;
3614 struct msghdr msg;
3615 struct iovec iov;
3616 unsigned flags;
3617
3618 ret = import_single_range(WRITE, sr->buf, sr->len, &iov,
3619 &msg.msg_iter);
3620 if (ret)
3621 return ret;
3622
3623 msg.msg_name = NULL;
3624 msg.msg_control = NULL;
3625 msg.msg_controllen = 0;
3626 msg.msg_namelen = 0;
3627
3628 flags = req->sr_msg.msg_flags;
3629 if (flags & MSG_DONTWAIT)
3630 req->flags |= REQ_F_NOWAIT;
3631 else if (force_nonblock)
3632 flags |= MSG_DONTWAIT;
3633
0b7b21e4
JA
3634 msg.msg_flags = flags;
3635 ret = sock_sendmsg(sock, &msg);
fddaface
JA
3636 if (force_nonblock && ret == -EAGAIN)
3637 return -EAGAIN;
3638 if (ret == -ERESTARTSYS)
3639 ret = -EINTR;
3640 }
3641
3642 io_cqring_add_event(req, ret);
3643 if (ret < 0)
3644 req_set_fail_links(req);
014db007 3645 io_put_req(req);
fddaface 3646 return 0;
fddaface
JA
3647}
3648
52de1fe1
JA
3649static int __io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
3650{
3651 struct io_sr_msg *sr = &req->sr_msg;
3652 struct iovec __user *uiov;
3653 size_t iov_len;
3654 int ret;
3655
3656 ret = __copy_msghdr_from_user(&io->msg.msg, sr->msg, &io->msg.uaddr,
3657 &uiov, &iov_len);
3658 if (ret)
3659 return ret;
3660
3661 if (req->flags & REQ_F_BUFFER_SELECT) {
3662 if (iov_len > 1)
3663 return -EINVAL;
3664 if (copy_from_user(io->msg.iov, uiov, sizeof(*uiov)))
3665 return -EFAULT;
3666 sr->len = io->msg.iov[0].iov_len;
3667 iov_iter_init(&io->msg.msg.msg_iter, READ, io->msg.iov, 1,
3668 sr->len);
3669 io->msg.iov = NULL;
3670 } else {
3671 ret = import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
3672 &io->msg.iov, &io->msg.msg.msg_iter);
3673 if (ret > 0)
3674 ret = 0;
3675 }
3676
3677 return ret;
3678}
3679
3680#ifdef CONFIG_COMPAT
3681static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
3682 struct io_async_ctx *io)
3683{
3684 struct compat_msghdr __user *msg_compat;
3685 struct io_sr_msg *sr = &req->sr_msg;
3686 struct compat_iovec __user *uiov;
3687 compat_uptr_t ptr;
3688 compat_size_t len;
3689 int ret;
3690
3691 msg_compat = (struct compat_msghdr __user *) sr->msg;
3692 ret = __get_compat_msghdr(&io->msg.msg, msg_compat, &io->msg.uaddr,
3693 &ptr, &len);
3694 if (ret)
3695 return ret;
3696
3697 uiov = compat_ptr(ptr);
3698 if (req->flags & REQ_F_BUFFER_SELECT) {
3699 compat_ssize_t clen;
3700
3701 if (len > 1)
3702 return -EINVAL;
3703 if (!access_ok(uiov, sizeof(*uiov)))
3704 return -EFAULT;
3705 if (__get_user(clen, &uiov->iov_len))
3706 return -EFAULT;
3707 if (clen < 0)
3708 return -EINVAL;
3709 sr->len = io->msg.iov[0].iov_len;
3710 io->msg.iov = NULL;
3711 } else {
3712 ret = compat_import_iovec(READ, uiov, len, UIO_FASTIOV,
3713 &io->msg.iov,
3714 &io->msg.msg.msg_iter);
3715 if (ret < 0)
3716 return ret;
3717 }
3718
3719 return 0;
3720}
3721#endif
3722
3723static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
3724{
3725 io->msg.iov = io->msg.fast_iov;
3726
3727#ifdef CONFIG_COMPAT
3728 if (req->ctx->compat)
3729 return __io_compat_recvmsg_copy_hdr(req, io);
fddaface 3730#endif
52de1fe1
JA
3731
3732 return __io_recvmsg_copy_hdr(req, io);
3733}
3734
bcda7baa
JA
3735static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
3736 int *cflags, bool needs_lock)
3737{
3738 struct io_sr_msg *sr = &req->sr_msg;
3739 struct io_buffer *kbuf;
3740
3741 if (!(req->flags & REQ_F_BUFFER_SELECT))
3742 return NULL;
3743
3744 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
3745 if (IS_ERR(kbuf))
3746 return kbuf;
3747
3748 sr->kbuf = kbuf;
3749 req->flags |= REQ_F_BUFFER_SELECTED;
3750
3751 *cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
3752 *cflags |= IORING_CQE_F_BUFFER;
3753 return kbuf;
fddaface
JA
3754}
3755
3529d8c2
JA
3756static int io_recvmsg_prep(struct io_kiocb *req,
3757 const struct io_uring_sqe *sqe)
aa1fa28f 3758{
e47293fd 3759 struct io_sr_msg *sr = &req->sr_msg;
3529d8c2 3760 struct io_async_ctx *io = req->io;
99bc4c38 3761 int ret;
3529d8c2 3762
d2b6f48b
PB
3763 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3764 return -EINVAL;
3765
3529d8c2
JA
3766 sr->msg_flags = READ_ONCE(sqe->msg_flags);
3767 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 3768 sr->len = READ_ONCE(sqe->len);
bcda7baa 3769 sr->bgid = READ_ONCE(sqe->buf_group);
06b76d44 3770
d8768362
JA
3771#ifdef CONFIG_COMPAT
3772 if (req->ctx->compat)
3773 sr->msg_flags |= MSG_CMSG_COMPAT;
3774#endif
3775
fddaface 3776 if (!io || req->opcode == IORING_OP_RECV)
06b76d44 3777 return 0;
5f798bea
PB
3778 /* iovec is already imported */
3779 if (req->flags & REQ_F_NEED_CLEANUP)
3780 return 0;
03b1230c 3781
52de1fe1 3782 ret = io_recvmsg_copy_hdr(req, io);
99bc4c38
PB
3783 if (!ret)
3784 req->flags |= REQ_F_NEED_CLEANUP;
3785 return ret;
aa1fa28f
JA
3786}
3787
014db007 3788static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
aa1fa28f 3789{
0b416c3e 3790 struct io_async_msghdr *kmsg = NULL;
03b1230c 3791 struct socket *sock;
52de1fe1 3792 int ret, cflags = 0;
03b1230c 3793
03b1230c
JA
3794 sock = sock_from_file(req->file, &ret);
3795 if (sock) {
52de1fe1 3796 struct io_buffer *kbuf;
b7bb4f7d 3797 struct io_async_ctx io;
03b1230c
JA
3798 unsigned flags;
3799
03b1230c 3800 if (req->io) {
0b416c3e 3801 kmsg = &req->io->msg;
b537916c 3802 kmsg->msg.msg_name = &req->io->msg.addr;
0b416c3e
JA
3803 /* if iov is set, it's allocated already */
3804 if (!kmsg->iov)
3805 kmsg->iov = kmsg->fast_iov;
3806 kmsg->msg.msg_iter.iov = kmsg->iov;
03b1230c 3807 } else {
0b416c3e 3808 kmsg = &io.msg;
b537916c 3809 kmsg->msg.msg_name = &io.msg.addr;
3529d8c2 3810
52de1fe1 3811 ret = io_recvmsg_copy_hdr(req, &io);
03b1230c 3812 if (ret)
3529d8c2 3813 return ret;
03b1230c
JA
3814 }
3815
52de1fe1
JA
3816 kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
3817 if (IS_ERR(kbuf)) {
3818 return PTR_ERR(kbuf);
3819 } else if (kbuf) {
3820 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3821 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
3822 1, req->sr_msg.len);
3823 }
3824
e47293fd
JA
3825 flags = req->sr_msg.msg_flags;
3826 if (flags & MSG_DONTWAIT)
3827 req->flags |= REQ_F_NOWAIT;
3828 else if (force_nonblock)
3829 flags |= MSG_DONTWAIT;
3830
3831 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg,
3832 kmsg->uaddr, flags);
02d27d89
PB
3833 if (force_nonblock && ret == -EAGAIN)
3834 return io_setup_async_msg(req, kmsg);
03b1230c
JA
3835 if (ret == -ERESTARTSYS)
3836 ret = -EINTR;
3837 }
3838
1e95081c 3839 if (kmsg && kmsg->iov != kmsg->fast_iov)
0b416c3e 3840 kfree(kmsg->iov);
99bc4c38 3841 req->flags &= ~REQ_F_NEED_CLEANUP;
52de1fe1 3842 __io_cqring_add_event(req, ret, cflags);
4e88d6e7
JA
3843 if (ret < 0)
3844 req_set_fail_links(req);
014db007 3845 io_put_req(req);
03b1230c 3846 return 0;
0fa03c62 3847}
5d17b4a4 3848
014db007 3849static int io_recv(struct io_kiocb *req, bool force_nonblock)
fddaface 3850{
bcda7baa 3851 struct io_buffer *kbuf = NULL;
fddaface 3852 struct socket *sock;
bcda7baa 3853 int ret, cflags = 0;
fddaface 3854
fddaface
JA
3855 sock = sock_from_file(req->file, &ret);
3856 if (sock) {
3857 struct io_sr_msg *sr = &req->sr_msg;
bcda7baa 3858 void __user *buf = sr->buf;
fddaface
JA
3859 struct msghdr msg;
3860 struct iovec iov;
3861 unsigned flags;
3862
bcda7baa
JA
3863 kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
3864 if (IS_ERR(kbuf))
3865 return PTR_ERR(kbuf);
3866 else if (kbuf)
3867 buf = u64_to_user_ptr(kbuf->addr);
3868
3869 ret = import_single_range(READ, buf, sr->len, &iov,
fddaface 3870 &msg.msg_iter);
bcda7baa
JA
3871 if (ret) {
3872 kfree(kbuf);
fddaface 3873 return ret;
bcda7baa 3874 }
fddaface 3875
bcda7baa 3876 req->flags |= REQ_F_NEED_CLEANUP;
fddaface
JA
3877 msg.msg_name = NULL;
3878 msg.msg_control = NULL;
3879 msg.msg_controllen = 0;
3880 msg.msg_namelen = 0;
3881 msg.msg_iocb = NULL;
3882 msg.msg_flags = 0;
3883
3884 flags = req->sr_msg.msg_flags;
3885 if (flags & MSG_DONTWAIT)
3886 req->flags |= REQ_F_NOWAIT;
3887 else if (force_nonblock)
3888 flags |= MSG_DONTWAIT;
3889
0b7b21e4 3890 ret = sock_recvmsg(sock, &msg, flags);
fddaface
JA
3891 if (force_nonblock && ret == -EAGAIN)
3892 return -EAGAIN;
3893 if (ret == -ERESTARTSYS)
3894 ret = -EINTR;
3895 }
3896
bcda7baa
JA
3897 kfree(kbuf);
3898 req->flags &= ~REQ_F_NEED_CLEANUP;
3899 __io_cqring_add_event(req, ret, cflags);
fddaface
JA
3900 if (ret < 0)
3901 req_set_fail_links(req);
014db007 3902 io_put_req(req);
fddaface 3903 return 0;
fddaface
JA
3904}
3905
3529d8c2 3906static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35 3907{
8ed8d3c3
JA
3908 struct io_accept *accept = &req->accept;
3909
17f2fe35
JA
3910 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3911 return -EINVAL;
8042d6ce 3912 if (sqe->ioprio || sqe->len || sqe->buf_index)
17f2fe35
JA
3913 return -EINVAL;
3914
d55e5f5b
JA
3915 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3916 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 3917 accept->flags = READ_ONCE(sqe->accept_flags);
09952e3e 3918 accept->nofile = rlimit(RLIMIT_NOFILE);
8ed8d3c3 3919 return 0;
8ed8d3c3 3920}
17f2fe35 3921
ac45abc0 3922static int io_accept(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3
JA
3923{
3924 struct io_accept *accept = &req->accept;
ac45abc0 3925 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
8ed8d3c3
JA
3926 int ret;
3927
e697deed
JX
3928 if (req->file->f_flags & O_NONBLOCK)
3929 req->flags |= REQ_F_NOWAIT;
3930
8ed8d3c3 3931 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
09952e3e
JA
3932 accept->addr_len, accept->flags,
3933 accept->nofile);
8ed8d3c3 3934 if (ret == -EAGAIN && force_nonblock)
17f2fe35 3935 return -EAGAIN;
ac45abc0
PB
3936 if (ret < 0) {
3937 if (ret == -ERESTARTSYS)
3938 ret = -EINTR;
4e88d6e7 3939 req_set_fail_links(req);
ac45abc0 3940 }
78e19bbe 3941 io_cqring_add_event(req, ret);
014db007 3942 io_put_req(req);
17f2fe35 3943 return 0;
8ed8d3c3
JA
3944}
3945
3529d8c2 3946static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021 3947{
3529d8c2
JA
3948 struct io_connect *conn = &req->connect;
3949 struct io_async_ctx *io = req->io;
f499a021 3950
3fbb51c1
JA
3951 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3952 return -EINVAL;
3953 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
3954 return -EINVAL;
3955
3529d8c2
JA
3956 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3957 conn->addr_len = READ_ONCE(sqe->addr2);
3958
3959 if (!io)
3960 return 0;
3961
3962 return move_addr_to_kernel(conn->addr, conn->addr_len,
3fbb51c1 3963 &io->connect.address);
f499a021
JA
3964}
3965
014db007 3966static int io_connect(struct io_kiocb *req, bool force_nonblock)
f8e85cf2 3967{
f499a021 3968 struct io_async_ctx __io, *io;
f8e85cf2 3969 unsigned file_flags;
3fbb51c1 3970 int ret;
f8e85cf2 3971
f499a021
JA
3972 if (req->io) {
3973 io = req->io;
3974 } else {
3529d8c2
JA
3975 ret = move_addr_to_kernel(req->connect.addr,
3976 req->connect.addr_len,
3977 &__io.connect.address);
f499a021
JA
3978 if (ret)
3979 goto out;
3980 io = &__io;
3981 }
3982
3fbb51c1
JA
3983 file_flags = force_nonblock ? O_NONBLOCK : 0;
3984
3985 ret = __sys_connect_file(req->file, &io->connect.address,
3986 req->connect.addr_len, file_flags);
87f80d62 3987 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
b7bb4f7d
JA
3988 if (req->io)
3989 return -EAGAIN;
3990 if (io_alloc_async_ctx(req)) {
f499a021
JA
3991 ret = -ENOMEM;
3992 goto out;
3993 }
b7bb4f7d 3994 memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect));
f8e85cf2 3995 return -EAGAIN;
f499a021 3996 }
f8e85cf2
JA
3997 if (ret == -ERESTARTSYS)
3998 ret = -EINTR;
f499a021 3999out:
4e88d6e7
JA
4000 if (ret < 0)
4001 req_set_fail_links(req);
f8e85cf2 4002 io_cqring_add_event(req, ret);
014db007 4003 io_put_req(req);
f8e85cf2 4004 return 0;
469956e8
Y
4005}
4006#else /* !CONFIG_NET */
4007static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4008{
f8e85cf2 4009 return -EOPNOTSUPP;
f8e85cf2
JA
4010}
4011
469956e8
Y
4012static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
4013{
4014 return -EOPNOTSUPP;
4015}
4016
4017static int io_send(struct io_kiocb *req, bool force_nonblock)
4018{
4019 return -EOPNOTSUPP;
4020}
4021
4022static int io_recvmsg_prep(struct io_kiocb *req,
4023 const struct io_uring_sqe *sqe)
4024{
4025 return -EOPNOTSUPP;
4026}
4027
4028static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
4029{
4030 return -EOPNOTSUPP;
4031}
4032
4033static int io_recv(struct io_kiocb *req, bool force_nonblock)
4034{
4035 return -EOPNOTSUPP;
4036}
4037
4038static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4039{
4040 return -EOPNOTSUPP;
4041}
4042
4043static int io_accept(struct io_kiocb *req, bool force_nonblock)
4044{
4045 return -EOPNOTSUPP;
4046}
4047
4048static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4049{
4050 return -EOPNOTSUPP;
4051}
4052
4053static int io_connect(struct io_kiocb *req, bool force_nonblock)
4054{
f8e85cf2 4055 return -EOPNOTSUPP;
f8e85cf2 4056}
469956e8 4057#endif /* CONFIG_NET */
f8e85cf2 4058
d7718a9d
JA
4059struct io_poll_table {
4060 struct poll_table_struct pt;
4061 struct io_kiocb *req;
4062 int error;
4063};
4064
d7718a9d
JA
4065static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4066 __poll_t mask, task_work_func_t func)
4067{
4068 struct task_struct *tsk;
aa96bf8a 4069 int ret;
d7718a9d
JA
4070
4071 /* for instances that support it check for an event match first: */
4072 if (mask && !(mask & poll->events))
4073 return 0;
4074
4075 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4076
4077 list_del_init(&poll->wait.entry);
4078
4079 tsk = req->task;
4080 req->result = mask;
4081 init_task_work(&req->task_work, func);
4082 /*
e3aabf95
JA
4083 * If this fails, then the task is exiting. When a task exits, the
4084 * work gets canceled, so just cancel this request as well instead
4085 * of executing it. We can't safely execute it anyway, as we may not
4086 * have the needed state needed for it anyway.
d7718a9d 4087 */
aa96bf8a
JA
4088 ret = task_work_add(tsk, &req->task_work, true);
4089 if (unlikely(ret)) {
e3aabf95 4090 WRITE_ONCE(poll->canceled, true);
aa96bf8a
JA
4091 tsk = io_wq_get_task(req->ctx->io_wq);
4092 task_work_add(tsk, &req->task_work, true);
4093 }
d7718a9d
JA
4094 wake_up_process(tsk);
4095 return 1;
4096}
4097
74ce6ce4
JA
4098static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4099 __acquires(&req->ctx->completion_lock)
4100{
4101 struct io_ring_ctx *ctx = req->ctx;
4102
4103 if (!req->result && !READ_ONCE(poll->canceled)) {
4104 struct poll_table_struct pt = { ._key = poll->events };
4105
4106 req->result = vfs_poll(req->file, &pt) & poll->events;
4107 }
4108
4109 spin_lock_irq(&ctx->completion_lock);
4110 if (!req->result && !READ_ONCE(poll->canceled)) {
4111 add_wait_queue(poll->head, &poll->wait);
4112 return true;
4113 }
4114
4115 return false;
4116}
4117
18bceab1
JA
4118static void io_poll_remove_double(struct io_kiocb *req)
4119{
4120 struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
4121
4122 lockdep_assert_held(&req->ctx->completion_lock);
4123
4124 if (poll && poll->head) {
4125 struct wait_queue_head *head = poll->head;
4126
4127 spin_lock(&head->lock);
4128 list_del_init(&poll->wait.entry);
4129 if (poll->wait.private)
4130 refcount_dec(&req->refs);
4131 poll->head = NULL;
4132 spin_unlock(&head->lock);
4133 }
4134}
4135
4136static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
4137{
4138 struct io_ring_ctx *ctx = req->ctx;
4139
4140 io_poll_remove_double(req);
4141 req->poll.done = true;
4142 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
4143 io_commit_cqring(ctx);
4144}
4145
4146static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
4147{
4148 struct io_ring_ctx *ctx = req->ctx;
4149
4150 if (io_poll_rewait(req, &req->poll)) {
4151 spin_unlock_irq(&ctx->completion_lock);
4152 return;
4153 }
4154
4155 hash_del(&req->hash_node);
4156 io_poll_complete(req, req->result, 0);
4157 req->flags |= REQ_F_COMP_LOCKED;
4158 io_put_req_find_next(req, nxt);
4159 spin_unlock_irq(&ctx->completion_lock);
4160
4161 io_cqring_ev_posted(ctx);
4162}
4163
4164static void io_poll_task_func(struct callback_head *cb)
4165{
4166 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4167 struct io_kiocb *nxt = NULL;
4168
4169 io_poll_task_handler(req, &nxt);
4170 if (nxt) {
4171 struct io_ring_ctx *ctx = nxt->ctx;
4172
4173 mutex_lock(&ctx->uring_lock);
4174 __io_queue_sqe(nxt, NULL);
4175 mutex_unlock(&ctx->uring_lock);
4176 }
4177}
4178
4179static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4180 int sync, void *key)
4181{
4182 struct io_kiocb *req = wait->private;
4183 struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
4184 __poll_t mask = key_to_poll(key);
4185
4186 /* for instances that support it check for an event match first: */
4187 if (mask && !(mask & poll->events))
4188 return 0;
4189
4190 if (req->poll.head) {
4191 bool done;
4192
4193 spin_lock(&req->poll.head->lock);
4194 done = list_empty(&req->poll.wait.entry);
4195 if (!done)
4196 list_del_init(&req->poll.wait.entry);
4197 spin_unlock(&req->poll.head->lock);
4198 if (!done)
4199 __io_async_wake(req, poll, mask, io_poll_task_func);
4200 }
4201 refcount_dec(&req->refs);
4202 return 1;
4203}
4204
4205static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
4206 wait_queue_func_t wake_func)
4207{
4208 poll->head = NULL;
4209 poll->done = false;
4210 poll->canceled = false;
4211 poll->events = events;
4212 INIT_LIST_HEAD(&poll->wait.entry);
4213 init_waitqueue_func_entry(&poll->wait, wake_func);
4214}
4215
4216static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
4217 struct wait_queue_head *head)
4218{
4219 struct io_kiocb *req = pt->req;
4220
4221 /*
4222 * If poll->head is already set, it's because the file being polled
4223 * uses multiple waitqueues for poll handling (eg one for read, one
4224 * for write). Setup a separate io_poll_iocb if this happens.
4225 */
4226 if (unlikely(poll->head)) {
4227 /* already have a 2nd entry, fail a third attempt */
4228 if (req->io) {
4229 pt->error = -EINVAL;
4230 return;
4231 }
4232 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
4233 if (!poll) {
4234 pt->error = -ENOMEM;
4235 return;
4236 }
4237 io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake);
4238 refcount_inc(&req->refs);
4239 poll->wait.private = req;
4240 req->io = (void *) poll;
4241 }
4242
4243 pt->error = 0;
4244 poll->head = head;
4245 add_wait_queue(head, &poll->wait);
4246}
4247
4248static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
4249 struct poll_table_struct *p)
4250{
4251 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4252
4253 __io_queue_proc(&pt->req->apoll->poll, pt, head);
4254}
4255
d7718a9d
JA
4256static void io_async_task_func(struct callback_head *cb)
4257{
4258 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4259 struct async_poll *apoll = req->apoll;
4260 struct io_ring_ctx *ctx = req->ctx;
31067255 4261 bool canceled = false;
d7718a9d
JA
4262
4263 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
4264
74ce6ce4 4265 if (io_poll_rewait(req, &apoll->poll)) {
d7718a9d 4266 spin_unlock_irq(&ctx->completion_lock);
74ce6ce4 4267 return;
d7718a9d
JA
4268 }
4269
31067255
JA
4270 /* If req is still hashed, it cannot have been canceled. Don't check. */
4271 if (hash_hashed(&req->hash_node)) {
74ce6ce4 4272 hash_del(&req->hash_node);
31067255
JA
4273 } else {
4274 canceled = READ_ONCE(apoll->poll.canceled);
4275 if (canceled) {
4276 io_cqring_fill_event(req, -ECANCELED);
4277 io_commit_cqring(ctx);
4278 }
2bae047e
JA
4279 }
4280
74ce6ce4
JA
4281 spin_unlock_irq(&ctx->completion_lock);
4282
44575a67 4283 /* restore ->work in case we need to retry again */
405a5d2b
XW
4284 if (req->flags & REQ_F_WORK_INITIALIZED)
4285 memcpy(&req->work, &apoll->work, sizeof(req->work));
31067255 4286 kfree(apoll);
44575a67 4287
31067255
JA
4288 if (!canceled) {
4289 __set_current_state(TASK_RUNNING);
4290 mutex_lock(&ctx->uring_lock);
4291 __io_queue_sqe(req, NULL);
4292 mutex_unlock(&ctx->uring_lock);
4293 } else {
2bae047e
JA
4294 io_cqring_ev_posted(ctx);
4295 req_set_fail_links(req);
44575a67 4296 io_double_put_req(req);
2bae047e 4297 }
d7718a9d
JA
4298}
4299
4300static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4301 void *key)
4302{
4303 struct io_kiocb *req = wait->private;
4304 struct io_poll_iocb *poll = &req->apoll->poll;
4305
4306 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
4307 key_to_poll(key));
4308
4309 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
4310}
4311
4312static void io_poll_req_insert(struct io_kiocb *req)
4313{
4314 struct io_ring_ctx *ctx = req->ctx;
4315 struct hlist_head *list;
4316
4317 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
4318 hlist_add_head(&req->hash_node, list);
4319}
4320
4321static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
4322 struct io_poll_iocb *poll,
4323 struct io_poll_table *ipt, __poll_t mask,
4324 wait_queue_func_t wake_func)
4325 __acquires(&ctx->completion_lock)
4326{
4327 struct io_ring_ctx *ctx = req->ctx;
4328 bool cancel = false;
4329
4330 poll->file = req->file;
18bceab1
JA
4331 io_init_poll_iocb(poll, mask, wake_func);
4332 poll->wait.private = req;
d7718a9d
JA
4333
4334 ipt->pt._key = mask;
4335 ipt->req = req;
4336 ipt->error = -EINVAL;
4337
d7718a9d
JA
4338 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
4339
4340 spin_lock_irq(&ctx->completion_lock);
4341 if (likely(poll->head)) {
4342 spin_lock(&poll->head->lock);
4343 if (unlikely(list_empty(&poll->wait.entry))) {
4344 if (ipt->error)
4345 cancel = true;
4346 ipt->error = 0;
4347 mask = 0;
4348 }
4349 if (mask || ipt->error)
4350 list_del_init(&poll->wait.entry);
4351 else if (cancel)
4352 WRITE_ONCE(poll->canceled, true);
4353 else if (!poll->done) /* actually waiting for an event */
4354 io_poll_req_insert(req);
4355 spin_unlock(&poll->head->lock);
4356 }
4357
4358 return mask;
4359}
4360
4361static bool io_arm_poll_handler(struct io_kiocb *req)
4362{
4363 const struct io_op_def *def = &io_op_defs[req->opcode];
4364 struct io_ring_ctx *ctx = req->ctx;
4365 struct async_poll *apoll;
4366 struct io_poll_table ipt;
4367 __poll_t mask, ret;
18bceab1 4368 bool had_io;
d7718a9d
JA
4369
4370 if (!req->file || !file_can_poll(req->file))
4371 return false;
4372 if (req->flags & (REQ_F_MUST_PUNT | REQ_F_POLLED))
4373 return false;
4374 if (!def->pollin && !def->pollout)
4375 return false;
4376
4377 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
4378 if (unlikely(!apoll))
4379 return false;
4380
4381 req->flags |= REQ_F_POLLED;
405a5d2b
XW
4382 if (req->flags & REQ_F_WORK_INITIALIZED)
4383 memcpy(&apoll->work, &req->work, sizeof(req->work));
18bceab1 4384 had_io = req->io != NULL;
d7718a9d 4385
4dd2824d 4386 io_get_req_task(req);
d7718a9d
JA
4387 req->apoll = apoll;
4388 INIT_HLIST_NODE(&req->hash_node);
4389
8755d97a 4390 mask = 0;
d7718a9d 4391 if (def->pollin)
8755d97a 4392 mask |= POLLIN | POLLRDNORM;
d7718a9d
JA
4393 if (def->pollout)
4394 mask |= POLLOUT | POLLWRNORM;
4395 mask |= POLLERR | POLLPRI;
4396
4397 ipt.pt._qproc = io_async_queue_proc;
4398
4399 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
4400 io_async_wake);
4401 if (ret) {
4402 ipt.error = 0;
18bceab1
JA
4403 /* only remove double add if we did it here */
4404 if (!had_io)
4405 io_poll_remove_double(req);
d7718a9d 4406 spin_unlock_irq(&ctx->completion_lock);
405a5d2b
XW
4407 if (req->flags & REQ_F_WORK_INITIALIZED)
4408 memcpy(&req->work, &apoll->work, sizeof(req->work));
d7718a9d
JA
4409 kfree(apoll);
4410 return false;
4411 }
4412 spin_unlock_irq(&ctx->completion_lock);
4413 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
4414 apoll->poll.events);
4415 return true;
4416}
4417
4418static bool __io_poll_remove_one(struct io_kiocb *req,
4419 struct io_poll_iocb *poll)
221c5eb2 4420{
b41e9852 4421 bool do_complete = false;
221c5eb2
JA
4422
4423 spin_lock(&poll->head->lock);
4424 WRITE_ONCE(poll->canceled, true);
392edb45
JA
4425 if (!list_empty(&poll->wait.entry)) {
4426 list_del_init(&poll->wait.entry);
b41e9852 4427 do_complete = true;
221c5eb2
JA
4428 }
4429 spin_unlock(&poll->head->lock);
3bfa5bcb 4430 hash_del(&req->hash_node);
d7718a9d
JA
4431 return do_complete;
4432}
4433
4434static bool io_poll_remove_one(struct io_kiocb *req)
4435{
4436 bool do_complete;
4437
4438 if (req->opcode == IORING_OP_POLL_ADD) {
18bceab1 4439 io_poll_remove_double(req);
d7718a9d
JA
4440 do_complete = __io_poll_remove_one(req, &req->poll);
4441 } else {
3bfa5bcb
JA
4442 struct async_poll *apoll = req->apoll;
4443
d7718a9d 4444 /* non-poll requests have submit ref still */
3bfa5bcb
JA
4445 do_complete = __io_poll_remove_one(req, &apoll->poll);
4446 if (do_complete) {
d7718a9d 4447 io_put_req(req);
3bfa5bcb
JA
4448 /*
4449 * restore ->work because we will call
4450 * io_req_work_drop_env below when dropping the
4451 * final reference.
4452 */
405a5d2b
XW
4453 if (req->flags & REQ_F_WORK_INITIALIZED)
4454 memcpy(&req->work, &apoll->work,
4455 sizeof(req->work));
3bfa5bcb
JA
4456 kfree(apoll);
4457 }
b1f573bd
XW
4458 }
4459
b41e9852
JA
4460 if (do_complete) {
4461 io_cqring_fill_event(req, -ECANCELED);
4462 io_commit_cqring(req->ctx);
4463 req->flags |= REQ_F_COMP_LOCKED;
4464 io_put_req(req);
4465 }
4466
4467 return do_complete;
221c5eb2
JA
4468}
4469
4470static void io_poll_remove_all(struct io_ring_ctx *ctx)
4471{
78076bb6 4472 struct hlist_node *tmp;
221c5eb2 4473 struct io_kiocb *req;
8e2e1faf 4474 int posted = 0, i;
221c5eb2
JA
4475
4476 spin_lock_irq(&ctx->completion_lock);
78076bb6
JA
4477 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
4478 struct hlist_head *list;
4479
4480 list = &ctx->cancel_hash[i];
4481 hlist_for_each_entry_safe(req, tmp, list, hash_node)
8e2e1faf 4482 posted += io_poll_remove_one(req);
221c5eb2
JA
4483 }
4484 spin_unlock_irq(&ctx->completion_lock);
b41e9852 4485
8e2e1faf
JA
4486 if (posted)
4487 io_cqring_ev_posted(ctx);
221c5eb2
JA
4488}
4489
47f46768
JA
4490static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
4491{
78076bb6 4492 struct hlist_head *list;
47f46768
JA
4493 struct io_kiocb *req;
4494
78076bb6
JA
4495 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
4496 hlist_for_each_entry(req, list, hash_node) {
b41e9852
JA
4497 if (sqe_addr != req->user_data)
4498 continue;
4499 if (io_poll_remove_one(req))
eac406c6 4500 return 0;
b41e9852 4501 return -EALREADY;
47f46768
JA
4502 }
4503
4504 return -ENOENT;
4505}
4506
3529d8c2
JA
4507static int io_poll_remove_prep(struct io_kiocb *req,
4508 const struct io_uring_sqe *sqe)
0969e783 4509{
0969e783
JA
4510 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4511 return -EINVAL;
4512 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
4513 sqe->poll_events)
4514 return -EINVAL;
4515
4516 req->poll.addr = READ_ONCE(sqe->addr);
0969e783
JA
4517 return 0;
4518}
4519
221c5eb2
JA
4520/*
4521 * Find a running poll command that matches one specified in sqe->addr,
4522 * and remove it if found.
4523 */
fc4df999 4524static int io_poll_remove(struct io_kiocb *req)
221c5eb2
JA
4525{
4526 struct io_ring_ctx *ctx = req->ctx;
0969e783 4527 u64 addr;
47f46768 4528 int ret;
221c5eb2 4529
0969e783 4530 addr = req->poll.addr;
221c5eb2 4531 spin_lock_irq(&ctx->completion_lock);
0969e783 4532 ret = io_poll_cancel(ctx, addr);
221c5eb2
JA
4533 spin_unlock_irq(&ctx->completion_lock);
4534
78e19bbe 4535 io_cqring_add_event(req, ret);
4e88d6e7
JA
4536 if (ret < 0)
4537 req_set_fail_links(req);
e65ef56d 4538 io_put_req(req);
221c5eb2
JA
4539 return 0;
4540}
4541
221c5eb2
JA
4542static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4543 void *key)
4544{
c2f2eb7d
JA
4545 struct io_kiocb *req = wait->private;
4546 struct io_poll_iocb *poll = &req->poll;
221c5eb2 4547
d7718a9d 4548 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
221c5eb2
JA
4549}
4550
221c5eb2
JA
4551static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
4552 struct poll_table_struct *p)
4553{
4554 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4555
d7718a9d 4556 __io_queue_proc(&pt->req->poll, pt, head);
eac406c6
JA
4557}
4558
3529d8c2 4559static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
4560{
4561 struct io_poll_iocb *poll = &req->poll;
221c5eb2 4562 u16 events;
221c5eb2
JA
4563
4564 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4565 return -EINVAL;
4566 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
4567 return -EINVAL;
09bb8394
JA
4568 if (!poll->file)
4569 return -EBADF;
221c5eb2 4570
221c5eb2
JA
4571 events = READ_ONCE(sqe->poll_events);
4572 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
b41e9852 4573
4dd2824d 4574 io_get_req_task(req);
0969e783
JA
4575 return 0;
4576}
4577
014db007 4578static int io_poll_add(struct io_kiocb *req)
0969e783
JA
4579{
4580 struct io_poll_iocb *poll = &req->poll;
4581 struct io_ring_ctx *ctx = req->ctx;
4582 struct io_poll_table ipt;
0969e783 4583 __poll_t mask;
0969e783 4584
78076bb6 4585 INIT_HLIST_NODE(&req->hash_node);
36703247 4586 INIT_LIST_HEAD(&req->list);
d7718a9d 4587 ipt.pt._qproc = io_poll_queue_proc;
36703247 4588
d7718a9d
JA
4589 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
4590 io_poll_wake);
221c5eb2 4591
8c838788 4592 if (mask) { /* no async, we'd stolen it */
221c5eb2 4593 ipt.error = 0;
b0dd8a41 4594 io_poll_complete(req, mask, 0);
221c5eb2 4595 }
221c5eb2
JA
4596 spin_unlock_irq(&ctx->completion_lock);
4597
8c838788
JA
4598 if (mask) {
4599 io_cqring_ev_posted(ctx);
014db007 4600 io_put_req(req);
221c5eb2 4601 }
8c838788 4602 return ipt.error;
221c5eb2
JA
4603}
4604
5262f567
JA
4605static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
4606{
ad8a48ac
JA
4607 struct io_timeout_data *data = container_of(timer,
4608 struct io_timeout_data, timer);
4609 struct io_kiocb *req = data->req;
4610 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
4611 unsigned long flags;
4612
5262f567
JA
4613 atomic_inc(&ctx->cq_timeouts);
4614
4615 spin_lock_irqsave(&ctx->completion_lock, flags);
ef03681a 4616 /*
11365043
JA
4617 * We could be racing with timeout deletion. If the list is empty,
4618 * then timeout lookup already found it and will be handling it.
ef03681a 4619 */
bfe68a22 4620 if (!list_empty(&req->list))
11365043 4621 list_del_init(&req->list);
5262f567 4622
78e19bbe 4623 io_cqring_fill_event(req, -ETIME);
5262f567
JA
4624 io_commit_cqring(ctx);
4625 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4626
4627 io_cqring_ev_posted(ctx);
4e88d6e7 4628 req_set_fail_links(req);
5262f567
JA
4629 io_put_req(req);
4630 return HRTIMER_NORESTART;
4631}
4632
47f46768
JA
4633static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
4634{
4635 struct io_kiocb *req;
4636 int ret = -ENOENT;
4637
4638 list_for_each_entry(req, &ctx->timeout_list, list) {
4639 if (user_data == req->user_data) {
4640 list_del_init(&req->list);
4641 ret = 0;
4642 break;
4643 }
4644 }
4645
4646 if (ret == -ENOENT)
4647 return ret;
4648
2d28390a 4649 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
47f46768
JA
4650 if (ret == -1)
4651 return -EALREADY;
4652
4e88d6e7 4653 req_set_fail_links(req);
47f46768
JA
4654 io_cqring_fill_event(req, -ECANCELED);
4655 io_put_req(req);
4656 return 0;
4657}
4658
3529d8c2
JA
4659static int io_timeout_remove_prep(struct io_kiocb *req,
4660 const struct io_uring_sqe *sqe)
b29472ee 4661{
b29472ee
JA
4662 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4663 return -EINVAL;
4664 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
4665 return -EINVAL;
4666
4667 req->timeout.addr = READ_ONCE(sqe->addr);
4668 req->timeout.flags = READ_ONCE(sqe->timeout_flags);
4669 if (req->timeout.flags)
4670 return -EINVAL;
4671
b29472ee
JA
4672 return 0;
4673}
4674
11365043
JA
4675/*
4676 * Remove or update an existing timeout command
4677 */
fc4df999 4678static int io_timeout_remove(struct io_kiocb *req)
11365043
JA
4679{
4680 struct io_ring_ctx *ctx = req->ctx;
47f46768 4681 int ret;
11365043 4682
11365043 4683 spin_lock_irq(&ctx->completion_lock);
b29472ee 4684 ret = io_timeout_cancel(ctx, req->timeout.addr);
11365043 4685
47f46768 4686 io_cqring_fill_event(req, ret);
11365043
JA
4687 io_commit_cqring(ctx);
4688 spin_unlock_irq(&ctx->completion_lock);
5262f567 4689 io_cqring_ev_posted(ctx);
4e88d6e7
JA
4690 if (ret < 0)
4691 req_set_fail_links(req);
ec9c02ad 4692 io_put_req(req);
11365043 4693 return 0;
5262f567
JA
4694}
4695
3529d8c2 4696static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2d28390a 4697 bool is_timeout_link)
5262f567 4698{
ad8a48ac 4699 struct io_timeout_data *data;
a41525ab 4700 unsigned flags;
56080b02 4701 u32 off = READ_ONCE(sqe->off);
5262f567 4702
ad8a48ac 4703 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5262f567 4704 return -EINVAL;
ad8a48ac 4705 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
a41525ab 4706 return -EINVAL;
56080b02 4707 if (off && is_timeout_link)
2d28390a 4708 return -EINVAL;
a41525ab
JA
4709 flags = READ_ONCE(sqe->timeout_flags);
4710 if (flags & ~IORING_TIMEOUT_ABS)
5262f567 4711 return -EINVAL;
bdf20073 4712
bfe68a22 4713 req->timeout.off = off;
26a61679 4714
3529d8c2 4715 if (!req->io && io_alloc_async_ctx(req))
26a61679
JA
4716 return -ENOMEM;
4717
4718 data = &req->io->timeout;
ad8a48ac 4719 data->req = req;
ad8a48ac
JA
4720 req->flags |= REQ_F_TIMEOUT;
4721
4722 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
4723 return -EFAULT;
4724
11365043 4725 if (flags & IORING_TIMEOUT_ABS)
ad8a48ac 4726 data->mode = HRTIMER_MODE_ABS;
11365043 4727 else
ad8a48ac 4728 data->mode = HRTIMER_MODE_REL;
11365043 4729
ad8a48ac
JA
4730 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
4731 return 0;
4732}
4733
fc4df999 4734static int io_timeout(struct io_kiocb *req)
ad8a48ac 4735{
ad8a48ac 4736 struct io_ring_ctx *ctx = req->ctx;
bfe68a22 4737 struct io_timeout_data *data = &req->io->timeout;
ad8a48ac 4738 struct list_head *entry;
bfe68a22 4739 u32 tail, off = req->timeout.off;
ad8a48ac 4740
733f5c95 4741 spin_lock_irq(&ctx->completion_lock);
93bd25bb 4742
5262f567
JA
4743 /*
4744 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
4745 * timeout event to be satisfied. If it isn't set, then this is
4746 * a pure timeout request, sequence isn't used.
5262f567 4747 */
bfe68a22 4748 if (!off) {
93bd25bb 4749 req->flags |= REQ_F_TIMEOUT_NOSEQ;
93bd25bb
JA
4750 entry = ctx->timeout_list.prev;
4751 goto add;
4752 }
5262f567 4753
bfe68a22
PB
4754 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
4755 req->timeout.target_seq = tail + off;
5262f567
JA
4756
4757 /*
4758 * Insertion sort, ensuring the first entry in the list is always
4759 * the one we need first.
4760 */
5262f567
JA
4761 list_for_each_prev(entry, &ctx->timeout_list) {
4762 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
5262f567 4763
93bd25bb
JA
4764 if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
4765 continue;
bfe68a22
PB
4766 /* nxt.seq is behind @tail, otherwise would've been completed */
4767 if (off >= nxt->timeout.target_seq - tail)
5262f567
JA
4768 break;
4769 }
93bd25bb 4770add:
5262f567 4771 list_add(&req->list, entry);
ad8a48ac
JA
4772 data->timer.function = io_timeout_fn;
4773 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5262f567 4774 spin_unlock_irq(&ctx->completion_lock);
5262f567
JA
4775 return 0;
4776}
5262f567 4777
62755e35
JA
4778static bool io_cancel_cb(struct io_wq_work *work, void *data)
4779{
4780 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
4781
4782 return req->user_data == (unsigned long) data;
4783}
4784
e977d6d3 4785static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
62755e35 4786{
62755e35 4787 enum io_wq_cancel cancel_ret;
62755e35
JA
4788 int ret = 0;
4789
4f26bda1 4790 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
62755e35
JA
4791 switch (cancel_ret) {
4792 case IO_WQ_CANCEL_OK:
4793 ret = 0;
4794 break;
4795 case IO_WQ_CANCEL_RUNNING:
4796 ret = -EALREADY;
4797 break;
4798 case IO_WQ_CANCEL_NOTFOUND:
4799 ret = -ENOENT;
4800 break;
4801 }
4802
e977d6d3
JA
4803 return ret;
4804}
4805
47f46768
JA
4806static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
4807 struct io_kiocb *req, __u64 sqe_addr,
014db007 4808 int success_ret)
47f46768
JA
4809{
4810 unsigned long flags;
4811 int ret;
4812
4813 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
4814 if (ret != -ENOENT) {
4815 spin_lock_irqsave(&ctx->completion_lock, flags);
4816 goto done;
4817 }
4818
4819 spin_lock_irqsave(&ctx->completion_lock, flags);
4820 ret = io_timeout_cancel(ctx, sqe_addr);
4821 if (ret != -ENOENT)
4822 goto done;
4823 ret = io_poll_cancel(ctx, sqe_addr);
4824done:
b0dd8a41
JA
4825 if (!ret)
4826 ret = success_ret;
47f46768
JA
4827 io_cqring_fill_event(req, ret);
4828 io_commit_cqring(ctx);
4829 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4830 io_cqring_ev_posted(ctx);
4831
4e88d6e7
JA
4832 if (ret < 0)
4833 req_set_fail_links(req);
014db007 4834 io_put_req(req);
47f46768
JA
4835}
4836
3529d8c2
JA
4837static int io_async_cancel_prep(struct io_kiocb *req,
4838 const struct io_uring_sqe *sqe)
e977d6d3 4839{
fbf23849 4840 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
e977d6d3
JA
4841 return -EINVAL;
4842 if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
4843 sqe->cancel_flags)
4844 return -EINVAL;
4845
fbf23849
JA
4846 req->cancel.addr = READ_ONCE(sqe->addr);
4847 return 0;
4848}
4849
014db007 4850static int io_async_cancel(struct io_kiocb *req)
fbf23849
JA
4851{
4852 struct io_ring_ctx *ctx = req->ctx;
fbf23849 4853
014db007 4854 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
5262f567
JA
4855 return 0;
4856}
4857
05f3fb3c
JA
4858static int io_files_update_prep(struct io_kiocb *req,
4859 const struct io_uring_sqe *sqe)
4860{
4861 if (sqe->flags || sqe->ioprio || sqe->rw_flags)
4862 return -EINVAL;
4863
4864 req->files_update.offset = READ_ONCE(sqe->off);
4865 req->files_update.nr_args = READ_ONCE(sqe->len);
4866 if (!req->files_update.nr_args)
4867 return -EINVAL;
4868 req->files_update.arg = READ_ONCE(sqe->addr);
4869 return 0;
4870}
4871
4872static int io_files_update(struct io_kiocb *req, bool force_nonblock)
fbf23849
JA
4873{
4874 struct io_ring_ctx *ctx = req->ctx;
05f3fb3c
JA
4875 struct io_uring_files_update up;
4876 int ret;
fbf23849 4877
f86cd20c 4878 if (force_nonblock)
05f3fb3c 4879 return -EAGAIN;
05f3fb3c
JA
4880
4881 up.offset = req->files_update.offset;
4882 up.fds = req->files_update.arg;
4883
4884 mutex_lock(&ctx->uring_lock);
4885 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
4886 mutex_unlock(&ctx->uring_lock);
4887
4888 if (ret < 0)
4889 req_set_fail_links(req);
4890 io_cqring_add_event(req, ret);
4891 io_put_req(req);
5262f567
JA
4892 return 0;
4893}
4894
3529d8c2
JA
4895static int io_req_defer_prep(struct io_kiocb *req,
4896 const struct io_uring_sqe *sqe)
f67676d1 4897{
e781573e 4898 ssize_t ret = 0;
f67676d1 4899
f1d96a8f
PB
4900 if (!sqe)
4901 return 0;
4902
7cdaf587
XW
4903 io_req_init_async(req);
4904
f86cd20c
JA
4905 if (io_op_defs[req->opcode].file_table) {
4906 ret = io_grab_files(req);
4907 if (unlikely(ret))
4908 return ret;
4909 }
4910
cccf0ee8
JA
4911 io_req_work_grab_env(req, &io_op_defs[req->opcode]);
4912
d625c6ee 4913 switch (req->opcode) {
e781573e
JA
4914 case IORING_OP_NOP:
4915 break;
f67676d1
JA
4916 case IORING_OP_READV:
4917 case IORING_OP_READ_FIXED:
3a6820f2 4918 case IORING_OP_READ:
3529d8c2 4919 ret = io_read_prep(req, sqe, true);
f67676d1
JA
4920 break;
4921 case IORING_OP_WRITEV:
4922 case IORING_OP_WRITE_FIXED:
3a6820f2 4923 case IORING_OP_WRITE:
3529d8c2 4924 ret = io_write_prep(req, sqe, true);
f67676d1 4925 break;
0969e783 4926 case IORING_OP_POLL_ADD:
3529d8c2 4927 ret = io_poll_add_prep(req, sqe);
0969e783
JA
4928 break;
4929 case IORING_OP_POLL_REMOVE:
3529d8c2 4930 ret = io_poll_remove_prep(req, sqe);
0969e783 4931 break;
8ed8d3c3 4932 case IORING_OP_FSYNC:
3529d8c2 4933 ret = io_prep_fsync(req, sqe);
8ed8d3c3
JA
4934 break;
4935 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2 4936 ret = io_prep_sfr(req, sqe);
8ed8d3c3 4937 break;
03b1230c 4938 case IORING_OP_SENDMSG:
fddaface 4939 case IORING_OP_SEND:
3529d8c2 4940 ret = io_sendmsg_prep(req, sqe);
03b1230c
JA
4941 break;
4942 case IORING_OP_RECVMSG:
fddaface 4943 case IORING_OP_RECV:
3529d8c2 4944 ret = io_recvmsg_prep(req, sqe);
03b1230c 4945 break;
f499a021 4946 case IORING_OP_CONNECT:
3529d8c2 4947 ret = io_connect_prep(req, sqe);
f499a021 4948 break;
2d28390a 4949 case IORING_OP_TIMEOUT:
3529d8c2 4950 ret = io_timeout_prep(req, sqe, false);
b7bb4f7d 4951 break;
b29472ee 4952 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2 4953 ret = io_timeout_remove_prep(req, sqe);
b29472ee 4954 break;
fbf23849 4955 case IORING_OP_ASYNC_CANCEL:
3529d8c2 4956 ret = io_async_cancel_prep(req, sqe);
fbf23849 4957 break;
2d28390a 4958 case IORING_OP_LINK_TIMEOUT:
3529d8c2 4959 ret = io_timeout_prep(req, sqe, true);
b7bb4f7d 4960 break;
8ed8d3c3 4961 case IORING_OP_ACCEPT:
3529d8c2 4962 ret = io_accept_prep(req, sqe);
8ed8d3c3 4963 break;
d63d1b5e
JA
4964 case IORING_OP_FALLOCATE:
4965 ret = io_fallocate_prep(req, sqe);
4966 break;
15b71abe
JA
4967 case IORING_OP_OPENAT:
4968 ret = io_openat_prep(req, sqe);
4969 break;
b5dba59e
JA
4970 case IORING_OP_CLOSE:
4971 ret = io_close_prep(req, sqe);
4972 break;
05f3fb3c
JA
4973 case IORING_OP_FILES_UPDATE:
4974 ret = io_files_update_prep(req, sqe);
4975 break;
eddc7ef5
JA
4976 case IORING_OP_STATX:
4977 ret = io_statx_prep(req, sqe);
4978 break;
4840e418
JA
4979 case IORING_OP_FADVISE:
4980 ret = io_fadvise_prep(req, sqe);
4981 break;
c1ca757b
JA
4982 case IORING_OP_MADVISE:
4983 ret = io_madvise_prep(req, sqe);
4984 break;
cebdb986
JA
4985 case IORING_OP_OPENAT2:
4986 ret = io_openat2_prep(req, sqe);
4987 break;
3e4827b0
JA
4988 case IORING_OP_EPOLL_CTL:
4989 ret = io_epoll_ctl_prep(req, sqe);
4990 break;
7d67af2c
PB
4991 case IORING_OP_SPLICE:
4992 ret = io_splice_prep(req, sqe);
4993 break;
ddf0322d
JA
4994 case IORING_OP_PROVIDE_BUFFERS:
4995 ret = io_provide_buffers_prep(req, sqe);
4996 break;
067524e9
JA
4997 case IORING_OP_REMOVE_BUFFERS:
4998 ret = io_remove_buffers_prep(req, sqe);
4999 break;
f2a8d5c7
PB
5000 case IORING_OP_TEE:
5001 ret = io_tee_prep(req, sqe);
5002 break;
f67676d1 5003 default:
e781573e
JA
5004 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5005 req->opcode);
5006 ret = -EINVAL;
b7bb4f7d 5007 break;
f67676d1
JA
5008 }
5009
b7bb4f7d 5010 return ret;
f67676d1
JA
5011}
5012
3529d8c2 5013static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
de0617e4 5014{
a197f664 5015 struct io_ring_ctx *ctx = req->ctx;
f67676d1 5016 int ret;
de0617e4 5017
9d858b21 5018 /* Still need defer if there is pending req in defer list. */
4ee36314 5019 if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list))
de0617e4
JA
5020 return 0;
5021
650b5481
PB
5022 if (!req->io) {
5023 if (io_alloc_async_ctx(req))
5024 return -EAGAIN;
5025 ret = io_req_defer_prep(req, sqe);
5026 if (ret < 0)
5027 return ret;
5028 }
2d28390a 5029
de0617e4 5030 spin_lock_irq(&ctx->completion_lock);
9d858b21 5031 if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
de0617e4 5032 spin_unlock_irq(&ctx->completion_lock);
de0617e4
JA
5033 return 0;
5034 }
5035
915967f6 5036 trace_io_uring_defer(ctx, req, req->user_data);
de0617e4
JA
5037 list_add_tail(&req->list, &ctx->defer_list);
5038 spin_unlock_irq(&ctx->completion_lock);
5039 return -EIOCBQUEUED;
5040}
5041
99bc4c38
PB
5042static void io_cleanup_req(struct io_kiocb *req)
5043{
5044 struct io_async_ctx *io = req->io;
5045
5046 switch (req->opcode) {
5047 case IORING_OP_READV:
5048 case IORING_OP_READ_FIXED:
5049 case IORING_OP_READ:
bcda7baa
JA
5050 if (req->flags & REQ_F_BUFFER_SELECTED)
5051 kfree((void *)(unsigned long)req->rw.addr);
5052 /* fallthrough */
99bc4c38
PB
5053 case IORING_OP_WRITEV:
5054 case IORING_OP_WRITE_FIXED:
5055 case IORING_OP_WRITE:
5056 if (io->rw.iov != io->rw.fast_iov)
5057 kfree(io->rw.iov);
5058 break;
99bc4c38 5059 case IORING_OP_RECVMSG:
52de1fe1
JA
5060 if (req->flags & REQ_F_BUFFER_SELECTED)
5061 kfree(req->sr_msg.kbuf);
5062 /* fallthrough */
5063 case IORING_OP_SENDMSG:
99bc4c38
PB
5064 if (io->msg.iov != io->msg.fast_iov)
5065 kfree(io->msg.iov);
5066 break;
bcda7baa
JA
5067 case IORING_OP_RECV:
5068 if (req->flags & REQ_F_BUFFER_SELECTED)
5069 kfree(req->sr_msg.kbuf);
5070 break;
8fef80bf
PB
5071 case IORING_OP_OPENAT:
5072 case IORING_OP_OPENAT2:
8fef80bf 5073 break;
7d67af2c 5074 case IORING_OP_SPLICE:
f2a8d5c7 5075 case IORING_OP_TEE:
7d67af2c
PB
5076 io_put_file(req, req->splice.file_in,
5077 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
5078 break;
99bc4c38
PB
5079 }
5080
5081 req->flags &= ~REQ_F_NEED_CLEANUP;
5082}
5083
3529d8c2 5084static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
014db007 5085 bool force_nonblock)
2b188cc1 5086{
a197f664 5087 struct io_ring_ctx *ctx = req->ctx;
d625c6ee 5088 int ret;
2b188cc1 5089
d625c6ee 5090 switch (req->opcode) {
2b188cc1 5091 case IORING_OP_NOP:
78e19bbe 5092 ret = io_nop(req);
2b188cc1
JA
5093 break;
5094 case IORING_OP_READV:
edafccee 5095 case IORING_OP_READ_FIXED:
3a6820f2 5096 case IORING_OP_READ:
3529d8c2
JA
5097 if (sqe) {
5098 ret = io_read_prep(req, sqe, force_nonblock);
5099 if (ret < 0)
5100 break;
5101 }
014db007 5102 ret = io_read(req, force_nonblock);
edafccee 5103 break;
3529d8c2 5104 case IORING_OP_WRITEV:
edafccee 5105 case IORING_OP_WRITE_FIXED:
3a6820f2 5106 case IORING_OP_WRITE:
3529d8c2
JA
5107 if (sqe) {
5108 ret = io_write_prep(req, sqe, force_nonblock);
5109 if (ret < 0)
5110 break;
5111 }
014db007 5112 ret = io_write(req, force_nonblock);
2b188cc1 5113 break;
c992fe29 5114 case IORING_OP_FSYNC:
3529d8c2
JA
5115 if (sqe) {
5116 ret = io_prep_fsync(req, sqe);
5117 if (ret < 0)
5118 break;
5119 }
014db007 5120 ret = io_fsync(req, force_nonblock);
c992fe29 5121 break;
221c5eb2 5122 case IORING_OP_POLL_ADD:
3529d8c2
JA
5123 if (sqe) {
5124 ret = io_poll_add_prep(req, sqe);
5125 if (ret)
5126 break;
5127 }
014db007 5128 ret = io_poll_add(req);
221c5eb2
JA
5129 break;
5130 case IORING_OP_POLL_REMOVE:
3529d8c2
JA
5131 if (sqe) {
5132 ret = io_poll_remove_prep(req, sqe);
5133 if (ret < 0)
5134 break;
5135 }
fc4df999 5136 ret = io_poll_remove(req);
221c5eb2 5137 break;
5d17b4a4 5138 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2
JA
5139 if (sqe) {
5140 ret = io_prep_sfr(req, sqe);
5141 if (ret < 0)
5142 break;
5143 }
014db007 5144 ret = io_sync_file_range(req, force_nonblock);
5d17b4a4 5145 break;
0fa03c62 5146 case IORING_OP_SENDMSG:
fddaface 5147 case IORING_OP_SEND:
3529d8c2
JA
5148 if (sqe) {
5149 ret = io_sendmsg_prep(req, sqe);
5150 if (ret < 0)
5151 break;
5152 }
fddaface 5153 if (req->opcode == IORING_OP_SENDMSG)
014db007 5154 ret = io_sendmsg(req, force_nonblock);
fddaface 5155 else
014db007 5156 ret = io_send(req, force_nonblock);
0fa03c62 5157 break;
aa1fa28f 5158 case IORING_OP_RECVMSG:
fddaface 5159 case IORING_OP_RECV:
3529d8c2
JA
5160 if (sqe) {
5161 ret = io_recvmsg_prep(req, sqe);
5162 if (ret)
5163 break;
5164 }
fddaface 5165 if (req->opcode == IORING_OP_RECVMSG)
014db007 5166 ret = io_recvmsg(req, force_nonblock);
fddaface 5167 else
014db007 5168 ret = io_recv(req, force_nonblock);
aa1fa28f 5169 break;
5262f567 5170 case IORING_OP_TIMEOUT:
3529d8c2
JA
5171 if (sqe) {
5172 ret = io_timeout_prep(req, sqe, false);
5173 if (ret)
5174 break;
5175 }
fc4df999 5176 ret = io_timeout(req);
5262f567 5177 break;
11365043 5178 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2
JA
5179 if (sqe) {
5180 ret = io_timeout_remove_prep(req, sqe);
5181 if (ret)
5182 break;
5183 }
fc4df999 5184 ret = io_timeout_remove(req);
11365043 5185 break;
17f2fe35 5186 case IORING_OP_ACCEPT:
3529d8c2
JA
5187 if (sqe) {
5188 ret = io_accept_prep(req, sqe);
5189 if (ret)
5190 break;
5191 }
014db007 5192 ret = io_accept(req, force_nonblock);
17f2fe35 5193 break;
f8e85cf2 5194 case IORING_OP_CONNECT:
3529d8c2
JA
5195 if (sqe) {
5196 ret = io_connect_prep(req, sqe);
5197 if (ret)
5198 break;
5199 }
014db007 5200 ret = io_connect(req, force_nonblock);
f8e85cf2 5201 break;
62755e35 5202 case IORING_OP_ASYNC_CANCEL:
3529d8c2
JA
5203 if (sqe) {
5204 ret = io_async_cancel_prep(req, sqe);
5205 if (ret)
5206 break;
5207 }
014db007 5208 ret = io_async_cancel(req);
62755e35 5209 break;
d63d1b5e
JA
5210 case IORING_OP_FALLOCATE:
5211 if (sqe) {
5212 ret = io_fallocate_prep(req, sqe);
5213 if (ret)
5214 break;
5215 }
014db007 5216 ret = io_fallocate(req, force_nonblock);
d63d1b5e 5217 break;
15b71abe
JA
5218 case IORING_OP_OPENAT:
5219 if (sqe) {
5220 ret = io_openat_prep(req, sqe);
5221 if (ret)
5222 break;
5223 }
014db007 5224 ret = io_openat(req, force_nonblock);
15b71abe 5225 break;
b5dba59e
JA
5226 case IORING_OP_CLOSE:
5227 if (sqe) {
5228 ret = io_close_prep(req, sqe);
5229 if (ret)
5230 break;
5231 }
014db007 5232 ret = io_close(req, force_nonblock);
b5dba59e 5233 break;
05f3fb3c
JA
5234 case IORING_OP_FILES_UPDATE:
5235 if (sqe) {
5236 ret = io_files_update_prep(req, sqe);
5237 if (ret)
5238 break;
5239 }
5240 ret = io_files_update(req, force_nonblock);
5241 break;
eddc7ef5
JA
5242 case IORING_OP_STATX:
5243 if (sqe) {
5244 ret = io_statx_prep(req, sqe);
5245 if (ret)
5246 break;
5247 }
014db007 5248 ret = io_statx(req, force_nonblock);
eddc7ef5 5249 break;
4840e418
JA
5250 case IORING_OP_FADVISE:
5251 if (sqe) {
5252 ret = io_fadvise_prep(req, sqe);
5253 if (ret)
5254 break;
5255 }
014db007 5256 ret = io_fadvise(req, force_nonblock);
4840e418 5257 break;
c1ca757b
JA
5258 case IORING_OP_MADVISE:
5259 if (sqe) {
5260 ret = io_madvise_prep(req, sqe);
5261 if (ret)
5262 break;
5263 }
014db007 5264 ret = io_madvise(req, force_nonblock);
c1ca757b 5265 break;
cebdb986
JA
5266 case IORING_OP_OPENAT2:
5267 if (sqe) {
5268 ret = io_openat2_prep(req, sqe);
5269 if (ret)
5270 break;
5271 }
014db007 5272 ret = io_openat2(req, force_nonblock);
cebdb986 5273 break;
3e4827b0
JA
5274 case IORING_OP_EPOLL_CTL:
5275 if (sqe) {
5276 ret = io_epoll_ctl_prep(req, sqe);
5277 if (ret)
5278 break;
5279 }
014db007 5280 ret = io_epoll_ctl(req, force_nonblock);
3e4827b0 5281 break;
7d67af2c
PB
5282 case IORING_OP_SPLICE:
5283 if (sqe) {
5284 ret = io_splice_prep(req, sqe);
5285 if (ret < 0)
5286 break;
5287 }
014db007 5288 ret = io_splice(req, force_nonblock);
7d67af2c 5289 break;
ddf0322d
JA
5290 case IORING_OP_PROVIDE_BUFFERS:
5291 if (sqe) {
5292 ret = io_provide_buffers_prep(req, sqe);
5293 if (ret)
5294 break;
5295 }
5296 ret = io_provide_buffers(req, force_nonblock);
5297 break;
067524e9
JA
5298 case IORING_OP_REMOVE_BUFFERS:
5299 if (sqe) {
5300 ret = io_remove_buffers_prep(req, sqe);
5301 if (ret)
5302 break;
5303 }
5304 ret = io_remove_buffers(req, force_nonblock);
3e4827b0 5305 break;
f2a8d5c7
PB
5306 case IORING_OP_TEE:
5307 if (sqe) {
5308 ret = io_tee_prep(req, sqe);
5309 if (ret < 0)
5310 break;
5311 }
5312 ret = io_tee(req, force_nonblock);
5313 break;
2b188cc1
JA
5314 default:
5315 ret = -EINVAL;
5316 break;
5317 }
5318
def596e9
JA
5319 if (ret)
5320 return ret;
5321
b532576e
JA
5322 /* If the op doesn't have a file, we're not polling for it */
5323 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
11ba820b
JA
5324 const bool in_async = io_wq_current_is_worker();
5325
9e645e11 5326 if (req->result == -EAGAIN)
def596e9
JA
5327 return -EAGAIN;
5328
11ba820b
JA
5329 /* workqueue context doesn't hold uring_lock, grab it now */
5330 if (in_async)
5331 mutex_lock(&ctx->uring_lock);
5332
def596e9 5333 io_iopoll_req_issued(req);
11ba820b
JA
5334
5335 if (in_async)
5336 mutex_unlock(&ctx->uring_lock);
def596e9
JA
5337 }
5338
5339 return 0;
2b188cc1
JA
5340}
5341
d4c81f38
PB
5342static void io_arm_async_linked_timeout(struct io_kiocb *req)
5343{
5344 struct io_kiocb *link;
5345
5346 /* link head's timeout is queued in io_queue_async_work() */
5347 if (!(req->flags & REQ_F_QUEUE_TIMEOUT))
5348 return;
5349
5350 link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
5351 io_queue_linked_timeout(link);
5352}
5353
561fb04a 5354static void io_wq_submit_work(struct io_wq_work **workptr)
2b188cc1 5355{
561fb04a 5356 struct io_wq_work *work = *workptr;
2b188cc1 5357 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
561fb04a 5358 int ret = 0;
2b188cc1 5359
d4c81f38
PB
5360 io_arm_async_linked_timeout(req);
5361
0c9d5ccd
JA
5362 /* if NO_CANCEL is set, we must still run the work */
5363 if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
5364 IO_WQ_WORK_CANCEL) {
561fb04a 5365 ret = -ECANCELED;
0c9d5ccd 5366 }
31b51510 5367
561fb04a 5368 if (!ret) {
561fb04a 5369 do {
014db007 5370 ret = io_issue_sqe(req, NULL, false);
561fb04a
JA
5371 /*
5372 * We can get EAGAIN for polled IO even though we're
5373 * forcing a sync submission from here, since we can't
5374 * wait for request slots on the block side.
5375 */
5376 if (ret != -EAGAIN)
5377 break;
5378 cond_resched();
5379 } while (1);
5380 }
31b51510 5381
561fb04a 5382 if (ret) {
4e88d6e7 5383 req_set_fail_links(req);
78e19bbe 5384 io_cqring_add_event(req, ret);
817869d2 5385 io_put_req(req);
edafccee 5386 }
2b188cc1 5387
e9fd9396 5388 io_steal_work(req, workptr);
2b188cc1
JA
5389}
5390
65e19f54
JA
5391static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
5392 int index)
5393{
5394 struct fixed_file_table *table;
5395
05f3fb3c 5396 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
84695089 5397 return table->files[index & IORING_FILE_TABLE_MASK];
65e19f54
JA
5398}
5399
8da11c19
PB
5400static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
5401 int fd, struct file **out_file, bool fixed)
09bb8394 5402{
a197f664 5403 struct io_ring_ctx *ctx = req->ctx;
8da11c19 5404 struct file *file;
09bb8394 5405
8da11c19 5406 if (fixed) {
05f3fb3c 5407 if (unlikely(!ctx->file_data ||
09bb8394
JA
5408 (unsigned) fd >= ctx->nr_user_files))
5409 return -EBADF;
b7620121 5410 fd = array_index_nospec(fd, ctx->nr_user_files);
8da11c19 5411 file = io_file_from_index(ctx, fd);
fd2206e4
JA
5412 if (file) {
5413 req->fixed_file_refs = ctx->file_data->cur_refs;
5414 percpu_ref_get(req->fixed_file_refs);
5415 }
09bb8394 5416 } else {
c826bd7a 5417 trace_io_uring_file_get(ctx, fd);
8da11c19 5418 file = __io_file_get(state, fd);
09bb8394
JA
5419 }
5420
fd2206e4
JA
5421 if (file || io_op_defs[req->opcode].needs_file_no_error) {
5422 *out_file = file;
5423 return 0;
5424 }
5425 return -EBADF;
09bb8394
JA
5426}
5427
8da11c19 5428static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
63ff8223 5429 int fd)
8da11c19 5430{
8da11c19
PB
5431 bool fixed;
5432
63ff8223 5433 fixed = (req->flags & REQ_F_FIXED_FILE) != 0;
0cdaf760 5434 if (unlikely(!fixed && io_async_submit(req->ctx)))
8da11c19
PB
5435 return -EBADF;
5436
5437 return io_file_get(state, req, fd, &req->file, fixed);
5438}
5439
a197f664 5440static int io_grab_files(struct io_kiocb *req)
fcb323cc
JA
5441{
5442 int ret = -EBADF;
a197f664 5443 struct io_ring_ctx *ctx = req->ctx;
fcb323cc 5444
5b0bbee4 5445 if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
f86cd20c 5446 return 0;
b14cca0c 5447 if (!ctx->ring_file)
b5dba59e
JA
5448 return -EBADF;
5449
fcb323cc
JA
5450 rcu_read_lock();
5451 spin_lock_irq(&ctx->inflight_lock);
5452 /*
5453 * We use the f_ops->flush() handler to ensure that we can flush
5454 * out work accessing these files if the fd is closed. Check if
5455 * the fd has changed since we started down this path, and disallow
5456 * this operation if it has.
5457 */
b14cca0c 5458 if (fcheck(ctx->ring_fd) == ctx->ring_file) {
fcb323cc
JA
5459 list_add(&req->inflight_entry, &ctx->inflight_list);
5460 req->flags |= REQ_F_INFLIGHT;
5461 req->work.files = current->files;
5462 ret = 0;
5463 }
5464 spin_unlock_irq(&ctx->inflight_lock);
5465 rcu_read_unlock();
5466
5467 return ret;
5468}
5469
2665abfd 5470static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 5471{
ad8a48ac
JA
5472 struct io_timeout_data *data = container_of(timer,
5473 struct io_timeout_data, timer);
5474 struct io_kiocb *req = data->req;
2665abfd
JA
5475 struct io_ring_ctx *ctx = req->ctx;
5476 struct io_kiocb *prev = NULL;
5477 unsigned long flags;
2665abfd
JA
5478
5479 spin_lock_irqsave(&ctx->completion_lock, flags);
5480
5481 /*
5482 * We don't expect the list to be empty, that will only happen if we
5483 * race with the completion of the linked work.
5484 */
4493233e
PB
5485 if (!list_empty(&req->link_list)) {
5486 prev = list_entry(req->link_list.prev, struct io_kiocb,
5487 link_list);
5d960724 5488 if (refcount_inc_not_zero(&prev->refs)) {
4493233e 5489 list_del_init(&req->link_list);
5d960724
JA
5490 prev->flags &= ~REQ_F_LINK_TIMEOUT;
5491 } else
76a46e06 5492 prev = NULL;
2665abfd
JA
5493 }
5494
5495 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5496
5497 if (prev) {
4e88d6e7 5498 req_set_fail_links(prev);
014db007 5499 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
76a46e06 5500 io_put_req(prev);
47f46768
JA
5501 } else {
5502 io_cqring_add_event(req, -ETIME);
5503 io_put_req(req);
2665abfd 5504 }
2665abfd
JA
5505 return HRTIMER_NORESTART;
5506}
5507
ad8a48ac 5508static void io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 5509{
76a46e06 5510 struct io_ring_ctx *ctx = req->ctx;
2665abfd 5511
76a46e06
JA
5512 /*
5513 * If the list is now empty, then our linked request finished before
5514 * we got a chance to setup the timer
5515 */
5516 spin_lock_irq(&ctx->completion_lock);
4493233e 5517 if (!list_empty(&req->link_list)) {
2d28390a 5518 struct io_timeout_data *data = &req->io->timeout;
94ae5e77 5519
ad8a48ac
JA
5520 data->timer.function = io_link_timeout_fn;
5521 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
5522 data->mode);
2665abfd 5523 }
76a46e06 5524 spin_unlock_irq(&ctx->completion_lock);
2665abfd 5525
2665abfd 5526 /* drop submission reference */
76a46e06
JA
5527 io_put_req(req);
5528}
2665abfd 5529
ad8a48ac 5530static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
2665abfd
JA
5531{
5532 struct io_kiocb *nxt;
5533
dea3b49c 5534 if (!(req->flags & REQ_F_LINK_HEAD))
2665abfd 5535 return NULL;
d7718a9d
JA
5536 /* for polled retry, if flag is set, we already went through here */
5537 if (req->flags & REQ_F_POLLED)
5538 return NULL;
2665abfd 5539
4493233e
PB
5540 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
5541 link_list);
d625c6ee 5542 if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
76a46e06 5543 return NULL;
2665abfd 5544
76a46e06 5545 req->flags |= REQ_F_LINK_TIMEOUT;
76a46e06 5546 return nxt;
2665abfd
JA
5547}
5548
3529d8c2 5549static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2b188cc1 5550{
4a0a7a18 5551 struct io_kiocb *linked_timeout;
4bc4494e 5552 struct io_kiocb *nxt;
193155c8 5553 const struct cred *old_creds = NULL;
e0c5c576 5554 int ret;
2b188cc1 5555
4a0a7a18
JA
5556again:
5557 linked_timeout = io_prep_linked_timeout(req);
5558
7cdaf587
XW
5559 if ((req->flags & REQ_F_WORK_INITIALIZED) && req->work.creds &&
5560 req->work.creds != current_cred()) {
193155c8
JA
5561 if (old_creds)
5562 revert_creds(old_creds);
5563 if (old_creds == req->work.creds)
5564 old_creds = NULL; /* restored original creds */
5565 else
5566 old_creds = override_creds(req->work.creds);
5567 }
5568
014db007 5569 ret = io_issue_sqe(req, sqe, true);
491381ce
JA
5570
5571 /*
5572 * We async punt it if the file wasn't marked NOWAIT, or if the file
5573 * doesn't support non-blocking read/write attempts
5574 */
5575 if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
5576 (req->flags & REQ_F_MUST_PUNT))) {
d7718a9d
JA
5577 if (io_arm_poll_handler(req)) {
5578 if (linked_timeout)
5579 io_queue_linked_timeout(linked_timeout);
4bc4494e 5580 goto exit;
d7718a9d 5581 }
86a761f8 5582punt:
7cdaf587
XW
5583 io_req_init_async(req);
5584
f86cd20c 5585 if (io_op_defs[req->opcode].file_table) {
bbad27b2
PB
5586 ret = io_grab_files(req);
5587 if (ret)
5588 goto err;
2b188cc1 5589 }
bbad27b2
PB
5590
5591 /*
5592 * Queued up for async execution, worker will release
5593 * submit reference when the iocb is actually submitted.
5594 */
5595 io_queue_async_work(req);
4bc4494e 5596 goto exit;
2b188cc1 5597 }
e65ef56d 5598
fcb323cc 5599err:
4bc4494e 5600 nxt = NULL;
76a46e06 5601 /* drop submission reference */
2a44f467 5602 io_put_req_find_next(req, &nxt);
e65ef56d 5603
f9bd67f6 5604 if (linked_timeout) {
76a46e06 5605 if (!ret)
f9bd67f6 5606 io_queue_linked_timeout(linked_timeout);
76a46e06 5607 else
f9bd67f6 5608 io_put_req(linked_timeout);
76a46e06
JA
5609 }
5610
e65ef56d 5611 /* and drop final reference, if we failed */
9e645e11 5612 if (ret) {
78e19bbe 5613 io_cqring_add_event(req, ret);
4e88d6e7 5614 req_set_fail_links(req);
e65ef56d 5615 io_put_req(req);
9e645e11 5616 }
4a0a7a18
JA
5617 if (nxt) {
5618 req = nxt;
86a761f8
PB
5619
5620 if (req->flags & REQ_F_FORCE_ASYNC)
5621 goto punt;
4a0a7a18
JA
5622 goto again;
5623 }
4bc4494e 5624exit:
193155c8
JA
5625 if (old_creds)
5626 revert_creds(old_creds);
2b188cc1
JA
5627}
5628
3529d8c2 5629static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4fe2c963
JL
5630{
5631 int ret;
5632
3529d8c2 5633 ret = io_req_defer(req, sqe);
4fe2c963
JL
5634 if (ret) {
5635 if (ret != -EIOCBQUEUED) {
1118591a 5636fail_req:
78e19bbe 5637 io_cqring_add_event(req, ret);
4e88d6e7 5638 req_set_fail_links(req);
78e19bbe 5639 io_double_put_req(req);
4fe2c963 5640 }
2550878f 5641 } else if (req->flags & REQ_F_FORCE_ASYNC) {
bd2ab18a
PB
5642 if (!req->io) {
5643 ret = -EAGAIN;
5644 if (io_alloc_async_ctx(req))
5645 goto fail_req;
5646 ret = io_req_defer_prep(req, sqe);
5647 if (unlikely(ret < 0))
5648 goto fail_req;
5649 }
5650
ce35a47a
JA
5651 /*
5652 * Never try inline submit of IOSQE_ASYNC is set, go straight
5653 * to async execution.
5654 */
5655 req->work.flags |= IO_WQ_WORK_CONCURRENT;
5656 io_queue_async_work(req);
5657 } else {
3529d8c2 5658 __io_queue_sqe(req, sqe);
ce35a47a 5659 }
4fe2c963
JL
5660}
5661
1b4a51b6 5662static inline void io_queue_link_head(struct io_kiocb *req)
4fe2c963 5663{
94ae5e77 5664 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
1b4a51b6
PB
5665 io_cqring_add_event(req, -ECANCELED);
5666 io_double_put_req(req);
5667 } else
3529d8c2 5668 io_queue_sqe(req, NULL);
4fe2c963
JL
5669}
5670
1d4240cc 5671static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
7d01bd74 5672 struct io_kiocb **link)
9e645e11 5673{
a197f664 5674 struct io_ring_ctx *ctx = req->ctx;
ef4ff581 5675 int ret;
9e645e11 5676
9e645e11
JA
5677 /*
5678 * If we already have a head request, queue this one for async
5679 * submittal once the head completes. If we don't have a head but
5680 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
5681 * submitted sync once the chain is complete. If none of those
5682 * conditions are true (normal request), then just queue it.
5683 */
5684 if (*link) {
9d76377f 5685 struct io_kiocb *head = *link;
4e88d6e7 5686
8cdf2193
PB
5687 /*
5688 * Taking sequential execution of a link, draining both sides
5689 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
5690 * requests in the link. So, it drains the head and the
5691 * next after the link request. The last one is done via
5692 * drain_next flag to persist the effect across calls.
5693 */
ef4ff581 5694 if (req->flags & REQ_F_IO_DRAIN) {
711be031
PB
5695 head->flags |= REQ_F_IO_DRAIN;
5696 ctx->drain_next = 1;
5697 }
1d4240cc
PB
5698 if (io_alloc_async_ctx(req))
5699 return -EAGAIN;
9e645e11 5700
3529d8c2 5701 ret = io_req_defer_prep(req, sqe);
2d28390a 5702 if (ret) {
4e88d6e7 5703 /* fail even hard links since we don't submit */
9d76377f 5704 head->flags |= REQ_F_FAIL_LINK;
1d4240cc 5705 return ret;
2d28390a 5706 }
9d76377f
PB
5707 trace_io_uring_link(ctx, req, head);
5708 list_add_tail(&req->link_list, &head->link_list);
32fe525b
PB
5709
5710 /* last request of a link, enqueue the link */
ef4ff581 5711 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
32fe525b
PB
5712 io_queue_link_head(head);
5713 *link = NULL;
5714 }
9e645e11 5715 } else {
711be031
PB
5716 if (unlikely(ctx->drain_next)) {
5717 req->flags |= REQ_F_IO_DRAIN;
ef4ff581 5718 ctx->drain_next = 0;
711be031 5719 }
ef4ff581 5720 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
dea3b49c 5721 req->flags |= REQ_F_LINK_HEAD;
711be031 5722 INIT_LIST_HEAD(&req->link_list);
f1d96a8f 5723
1d4240cc
PB
5724 if (io_alloc_async_ctx(req))
5725 return -EAGAIN;
5726
711be031
PB
5727 ret = io_req_defer_prep(req, sqe);
5728 if (ret)
5729 req->flags |= REQ_F_FAIL_LINK;
5730 *link = req;
5731 } else {
5732 io_queue_sqe(req, sqe);
5733 }
9e645e11 5734 }
2e6e1fde 5735
1d4240cc 5736 return 0;
9e645e11
JA
5737}
5738
9a56a232
JA
5739/*
5740 * Batched submission is done, ensure local IO is flushed out.
5741 */
5742static void io_submit_state_end(struct io_submit_state *state)
5743{
5744 blk_finish_plug(&state->plug);
9f13c35b 5745 io_state_file_put(state);
2579f913 5746 if (state->free_reqs)
6c8a3134 5747 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
9a56a232
JA
5748}
5749
5750/*
5751 * Start submission side cache.
5752 */
5753static void io_submit_state_start(struct io_submit_state *state,
22efde59 5754 unsigned int max_ios)
9a56a232
JA
5755{
5756 blk_start_plug(&state->plug);
2579f913 5757 state->free_reqs = 0;
9a56a232
JA
5758 state->file = NULL;
5759 state->ios_left = max_ios;
5760}
5761
2b188cc1
JA
5762static void io_commit_sqring(struct io_ring_ctx *ctx)
5763{
75b28aff 5764 struct io_rings *rings = ctx->rings;
2b188cc1 5765
caf582c6
PB
5766 /*
5767 * Ensure any loads from the SQEs are done at this point,
5768 * since once we write the new head, the application could
5769 * write new data to them.
5770 */
5771 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
5772}
5773
2b188cc1 5774/*
3529d8c2 5775 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
2b188cc1
JA
5776 * that is mapped by userspace. This means that care needs to be taken to
5777 * ensure that reads are stable, as we cannot rely on userspace always
5778 * being a good citizen. If members of the sqe are validated and then later
5779 * used, it's important that those reads are done through READ_ONCE() to
5780 * prevent a re-load down the line.
5781 */
709b302f 5782static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2b188cc1 5783{
75b28aff 5784 u32 *sq_array = ctx->sq_array;
2b188cc1
JA
5785 unsigned head;
5786
5787 /*
5788 * The cached sq head (or cq tail) serves two purposes:
5789 *
5790 * 1) allows us to batch the cost of updating the user visible
5791 * head updates.
5792 * 2) allows the kernel side to track the head on its own, even
5793 * though the application is the one updating it.
5794 */
ee7d46d9 5795 head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
709b302f
PB
5796 if (likely(head < ctx->sq_entries))
5797 return &ctx->sq_sqes[head];
2b188cc1
JA
5798
5799 /* drop invalid entries */
498ccd9e 5800 ctx->cached_sq_dropped++;
ee7d46d9 5801 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
709b302f
PB
5802 return NULL;
5803}
5804
5805static inline void io_consume_sqe(struct io_ring_ctx *ctx)
5806{
5807 ctx->cached_sq_head++;
2b188cc1
JA
5808}
5809
ef4ff581
PB
5810#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
5811 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
5812 IOSQE_BUFFER_SELECT)
5813
5814static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
5815 const struct io_uring_sqe *sqe,
0cdaf760 5816 struct io_submit_state *state)
0553b8bd 5817{
ef4ff581 5818 unsigned int sqe_flags;
63ff8223 5819 int id;
ef4ff581 5820
0553b8bd
PB
5821 /*
5822 * All io need record the previous position, if LINK vs DARIN,
5823 * it can be used to mark the position of the first IO in the
5824 * link list.
5825 */
31af27c7 5826 req->sequence = ctx->cached_sq_head - ctx->cached_sq_dropped;
0553b8bd
PB
5827 req->opcode = READ_ONCE(sqe->opcode);
5828 req->user_data = READ_ONCE(sqe->user_data);
5829 req->io = NULL;
5830 req->file = NULL;
5831 req->ctx = ctx;
5832 req->flags = 0;
5833 /* one is dropped after submission, the other at completion */
5834 refcount_set(&req->refs, 2);
4dd2824d 5835 req->task = current;
0553b8bd 5836 req->result = 0;
ef4ff581
PB
5837
5838 if (unlikely(req->opcode >= IORING_OP_LAST))
5839 return -EINVAL;
5840
5841 if (io_op_defs[req->opcode].needs_mm && !current->mm) {
5842 if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
5843 return -EFAULT;
f5678e7f 5844 kthread_use_mm(ctx->sqo_mm);
ef4ff581
PB
5845 }
5846
5847 sqe_flags = READ_ONCE(sqe->flags);
5848 /* enforce forwards compatibility on users */
5849 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
5850 return -EINVAL;
5851
5852 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
5853 !io_op_defs[req->opcode].buffer_select)
5854 return -EOPNOTSUPP;
5855
5856 id = READ_ONCE(sqe->personality);
5857 if (id) {
7cdaf587 5858 io_req_init_async(req);
ef4ff581
PB
5859 req->work.creds = idr_find(&ctx->personality_idr, id);
5860 if (unlikely(!req->work.creds))
5861 return -EINVAL;
5862 get_cred(req->work.creds);
5863 }
5864
5865 /* same numerical values with corresponding REQ_F_*, safe to copy */
c11368a5 5866 req->flags |= sqe_flags;
ef4ff581 5867
63ff8223
JA
5868 if (!io_op_defs[req->opcode].needs_file)
5869 return 0;
5870
5871 return io_req_set_file(state, req, READ_ONCE(sqe->fd));
0553b8bd
PB
5872}
5873
fb5ccc98 5874static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
0cdaf760 5875 struct file *ring_file, int ring_fd)
6c271ce2
JA
5876{
5877 struct io_submit_state state, *statep = NULL;
9e645e11 5878 struct io_kiocb *link = NULL;
9e645e11 5879 int i, submitted = 0;
6c271ce2 5880
c4a2ed72 5881 /* if we have a backlog and couldn't flush it all, return BUSY */
ad3eb2c8
JA
5882 if (test_bit(0, &ctx->sq_check_overflow)) {
5883 if (!list_empty(&ctx->cq_overflow_list) &&
5884 !io_cqring_overflow_flush(ctx, false))
5885 return -EBUSY;
5886 }
6c271ce2 5887
ee7d46d9
PB
5888 /* make sure SQ entry isn't read before tail */
5889 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
9ef4f124 5890
2b85edfc
PB
5891 if (!percpu_ref_tryget_many(&ctx->refs, nr))
5892 return -EAGAIN;
6c271ce2
JA
5893
5894 if (nr > IO_PLUG_THRESHOLD) {
22efde59 5895 io_submit_state_start(&state, nr);
6c271ce2
JA
5896 statep = &state;
5897 }
5898
b14cca0c
PB
5899 ctx->ring_fd = ring_fd;
5900 ctx->ring_file = ring_file;
5901
6c271ce2 5902 for (i = 0; i < nr; i++) {
3529d8c2 5903 const struct io_uring_sqe *sqe;
196be95c 5904 struct io_kiocb *req;
1cb1edb2 5905 int err;
fb5ccc98 5906
b1e50e54
PB
5907 sqe = io_get_sqe(ctx);
5908 if (unlikely(!sqe)) {
5909 io_consume_sqe(ctx);
5910 break;
5911 }
0553b8bd 5912 req = io_alloc_req(ctx, statep);
196be95c
PB
5913 if (unlikely(!req)) {
5914 if (!submitted)
5915 submitted = -EAGAIN;
fb5ccc98 5916 break;
196be95c 5917 }
fb5ccc98 5918
0cdaf760 5919 err = io_init_req(ctx, req, sqe, statep);
709b302f 5920 io_consume_sqe(ctx);
d3656344
JA
5921 /* will complete beyond this point, count as submitted */
5922 submitted++;
5923
ef4ff581 5924 if (unlikely(err)) {
1cb1edb2
PB
5925fail_req:
5926 io_cqring_add_event(req, err);
d3656344 5927 io_double_put_req(req);
196be95c
PB
5928 break;
5929 }
fb5ccc98 5930
354420f7 5931 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
0cdaf760 5932 true, io_async_submit(ctx));
7d01bd74 5933 err = io_submit_sqe(req, sqe, &link);
1d4240cc
PB
5934 if (err)
5935 goto fail_req;
6c271ce2
JA
5936 }
5937
9466f437
PB
5938 if (unlikely(submitted != nr)) {
5939 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
5940
5941 percpu_ref_put_many(&ctx->refs, nr - ref_used);
5942 }
9e645e11 5943 if (link)
1b4a51b6 5944 io_queue_link_head(link);
6c271ce2
JA
5945 if (statep)
5946 io_submit_state_end(&state);
5947
ae9428ca
PB
5948 /* Commit SQ ring head once we've consumed and submitted all SQEs */
5949 io_commit_sqring(ctx);
5950
6c271ce2
JA
5951 return submitted;
5952}
5953
bf9c2f1c
PB
5954static inline void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
5955{
5956 struct mm_struct *mm = current->mm;
5957
5958 if (mm) {
f5678e7f 5959 kthread_unuse_mm(mm);
bf9c2f1c
PB
5960 mmput(mm);
5961 }
5962}
5963
6c271ce2
JA
5964static int io_sq_thread(void *data)
5965{
6c271ce2 5966 struct io_ring_ctx *ctx = data;
181e448d 5967 const struct cred *old_cred;
6c271ce2 5968 DEFINE_WAIT(wait);
6c271ce2 5969 unsigned long timeout;
bdcd3eab 5970 int ret = 0;
6c271ce2 5971
0f158b4c 5972 complete(&ctx->sq_thread_comp);
a4c0b3de 5973
181e448d 5974 old_cred = override_creds(ctx->creds);
6c271ce2 5975
bdcd3eab 5976 timeout = jiffies + ctx->sq_thread_idle;
2bbcd6d3 5977 while (!kthread_should_park()) {
fb5ccc98 5978 unsigned int to_submit;
6c271ce2 5979
bdcd3eab 5980 if (!list_empty(&ctx->poll_list)) {
6c271ce2
JA
5981 unsigned nr_events = 0;
5982
bdcd3eab
XW
5983 mutex_lock(&ctx->uring_lock);
5984 if (!list_empty(&ctx->poll_list))
5985 io_iopoll_getevents(ctx, &nr_events, 0);
5986 else
6c271ce2 5987 timeout = jiffies + ctx->sq_thread_idle;
bdcd3eab 5988 mutex_unlock(&ctx->uring_lock);
6c271ce2
JA
5989 }
5990
fb5ccc98 5991 to_submit = io_sqring_entries(ctx);
c1edbf5f
JA
5992
5993 /*
5994 * If submit got -EBUSY, flag us as needing the application
5995 * to enter the kernel to reap and flush events.
5996 */
5997 if (!to_submit || ret == -EBUSY) {
7143b5ac
SG
5998 /*
5999 * Drop cur_mm before scheduling, we can't hold it for
6000 * long periods (or over schedule()). Do this before
6001 * adding ourselves to the waitqueue, as the unuse/drop
6002 * may sleep.
6003 */
bf9c2f1c 6004 io_sq_thread_drop_mm(ctx);
7143b5ac 6005
6c271ce2
JA
6006 /*
6007 * We're polling. If we're within the defined idle
6008 * period, then let us spin without work before going
c1edbf5f
JA
6009 * to sleep. The exception is if we got EBUSY doing
6010 * more IO, we should wait for the application to
6011 * reap events and wake us up.
6c271ce2 6012 */
bdcd3eab 6013 if (!list_empty(&ctx->poll_list) ||
df069d80
JA
6014 (!time_after(jiffies, timeout) && ret != -EBUSY &&
6015 !percpu_ref_is_dying(&ctx->refs))) {
b41e9852
JA
6016 if (current->task_works)
6017 task_work_run();
9831a90c 6018 cond_resched();
6c271ce2
JA
6019 continue;
6020 }
6021
6c271ce2
JA
6022 prepare_to_wait(&ctx->sqo_wait, &wait,
6023 TASK_INTERRUPTIBLE);
6024
bdcd3eab
XW
6025 /*
6026 * While doing polled IO, before going to sleep, we need
6027 * to check if there are new reqs added to poll_list, it
6028 * is because reqs may have been punted to io worker and
6029 * will be added to poll_list later, hence check the
6030 * poll_list again.
6031 */
6032 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6033 !list_empty_careful(&ctx->poll_list)) {
6034 finish_wait(&ctx->sqo_wait, &wait);
6035 continue;
6036 }
6037
6c271ce2 6038 /* Tell userspace we may need a wakeup call */
75b28aff 6039 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
0d7bae69
SB
6040 /* make sure to read SQ tail after writing flags */
6041 smp_mb();
6c271ce2 6042
fb5ccc98 6043 to_submit = io_sqring_entries(ctx);
c1edbf5f 6044 if (!to_submit || ret == -EBUSY) {
2bbcd6d3 6045 if (kthread_should_park()) {
6c271ce2
JA
6046 finish_wait(&ctx->sqo_wait, &wait);
6047 break;
6048 }
b41e9852
JA
6049 if (current->task_works) {
6050 task_work_run();
10bea96d 6051 finish_wait(&ctx->sqo_wait, &wait);
b41e9852
JA
6052 continue;
6053 }
6c271ce2
JA
6054 if (signal_pending(current))
6055 flush_signals(current);
6056 schedule();
6057 finish_wait(&ctx->sqo_wait, &wait);
6058
75b28aff 6059 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
d4ae271d 6060 ret = 0;
6c271ce2
JA
6061 continue;
6062 }
6063 finish_wait(&ctx->sqo_wait, &wait);
6064
75b28aff 6065 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
6066 }
6067
8a4955ff 6068 mutex_lock(&ctx->uring_lock);
6b668c9b
XW
6069 if (likely(!percpu_ref_is_dying(&ctx->refs)))
6070 ret = io_submit_sqes(ctx, to_submit, NULL, -1);
8a4955ff 6071 mutex_unlock(&ctx->uring_lock);
bdcd3eab 6072 timeout = jiffies + ctx->sq_thread_idle;
6c271ce2
JA
6073 }
6074
b41e9852
JA
6075 if (current->task_works)
6076 task_work_run();
6077
bf9c2f1c 6078 io_sq_thread_drop_mm(ctx);
181e448d 6079 revert_creds(old_cred);
06058632 6080
2bbcd6d3 6081 kthread_parkme();
06058632 6082
6c271ce2
JA
6083 return 0;
6084}
6085
bda52162
JA
6086struct io_wait_queue {
6087 struct wait_queue_entry wq;
6088 struct io_ring_ctx *ctx;
6089 unsigned to_wait;
6090 unsigned nr_timeouts;
6091};
6092
1d7bb1d5 6093static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
bda52162
JA
6094{
6095 struct io_ring_ctx *ctx = iowq->ctx;
6096
6097 /*
d195a66e 6098 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
6099 * started waiting. For timeouts, we always want to return to userspace,
6100 * regardless of event count.
6101 */
1d7bb1d5 6102 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
bda52162
JA
6103 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6104}
6105
6106static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6107 int wake_flags, void *key)
6108{
6109 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6110 wq);
6111
1d7bb1d5
JA
6112 /* use noflush == true, as we can't safely rely on locking context */
6113 if (!io_should_wake(iowq, true))
bda52162
JA
6114 return -1;
6115
6116 return autoremove_wake_function(curr, mode, wake_flags, key);
6117}
6118
2b188cc1
JA
6119/*
6120 * Wait until events become available, if we don't already have some. The
6121 * application must reap them itself, as they reside on the shared cq ring.
6122 */
6123static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
6124 const sigset_t __user *sig, size_t sigsz)
6125{
bda52162
JA
6126 struct io_wait_queue iowq = {
6127 .wq = {
6128 .private = current,
6129 .func = io_wake_function,
6130 .entry = LIST_HEAD_INIT(iowq.wq.entry),
6131 },
6132 .ctx = ctx,
6133 .to_wait = min_events,
6134 };
75b28aff 6135 struct io_rings *rings = ctx->rings;
e9ffa5c2 6136 int ret = 0;
2b188cc1 6137
b41e9852
JA
6138 do {
6139 if (io_cqring_events(ctx, false) >= min_events)
6140 return 0;
6141 if (!current->task_works)
6142 break;
6143 task_work_run();
6144 } while (1);
2b188cc1
JA
6145
6146 if (sig) {
9e75ad5d
AB
6147#ifdef CONFIG_COMPAT
6148 if (in_compat_syscall())
6149 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 6150 sigsz);
9e75ad5d
AB
6151 else
6152#endif
b772434b 6153 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 6154
2b188cc1
JA
6155 if (ret)
6156 return ret;
6157 }
6158
bda52162 6159 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
c826bd7a 6160 trace_io_uring_cqring_wait(ctx, min_events);
bda52162
JA
6161 do {
6162 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
6163 TASK_INTERRUPTIBLE);
b41e9852
JA
6164 if (current->task_works)
6165 task_work_run();
1d7bb1d5 6166 if (io_should_wake(&iowq, false))
bda52162
JA
6167 break;
6168 schedule();
6169 if (signal_pending(current)) {
e9ffa5c2 6170 ret = -EINTR;
bda52162
JA
6171 break;
6172 }
6173 } while (1);
6174 finish_wait(&ctx->wait, &iowq.wq);
6175
e9ffa5c2 6176 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 6177
75b28aff 6178 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
6179}
6180
6b06314c
JA
6181static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
6182{
6183#if defined(CONFIG_UNIX)
6184 if (ctx->ring_sock) {
6185 struct sock *sock = ctx->ring_sock->sk;
6186 struct sk_buff *skb;
6187
6188 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
6189 kfree_skb(skb);
6190 }
6191#else
6192 int i;
6193
65e19f54
JA
6194 for (i = 0; i < ctx->nr_user_files; i++) {
6195 struct file *file;
6196
6197 file = io_file_from_index(ctx, i);
6198 if (file)
6199 fput(file);
6200 }
6b06314c
JA
6201#endif
6202}
6203
05f3fb3c
JA
6204static void io_file_ref_kill(struct percpu_ref *ref)
6205{
6206 struct fixed_file_data *data;
6207
6208 data = container_of(ref, struct fixed_file_data, refs);
6209 complete(&data->done);
6210}
6211
6b06314c
JA
6212static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
6213{
05f3fb3c 6214 struct fixed_file_data *data = ctx->file_data;
05589553 6215 struct fixed_file_ref_node *ref_node = NULL;
65e19f54
JA
6216 unsigned nr_tables, i;
6217
05f3fb3c 6218 if (!data)
6b06314c
JA
6219 return -ENXIO;
6220
6a4d07cd 6221 spin_lock(&data->lock);
05589553
XW
6222 if (!list_empty(&data->ref_list))
6223 ref_node = list_first_entry(&data->ref_list,
6224 struct fixed_file_ref_node, node);
6a4d07cd 6225 spin_unlock(&data->lock);
05589553
XW
6226 if (ref_node)
6227 percpu_ref_kill(&ref_node->refs);
6228
6229 percpu_ref_kill(&data->refs);
6230
6231 /* wait for all refs nodes to complete */
4a38aed2 6232 flush_delayed_work(&ctx->file_put_work);
2faf852d 6233 wait_for_completion(&data->done);
05f3fb3c 6234
6b06314c 6235 __io_sqe_files_unregister(ctx);
65e19f54
JA
6236 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
6237 for (i = 0; i < nr_tables; i++)
05f3fb3c
JA
6238 kfree(data->table[i].files);
6239 kfree(data->table);
05589553
XW
6240 percpu_ref_exit(&data->refs);
6241 kfree(data);
05f3fb3c 6242 ctx->file_data = NULL;
6b06314c
JA
6243 ctx->nr_user_files = 0;
6244 return 0;
6245}
6246
6c271ce2
JA
6247static void io_sq_thread_stop(struct io_ring_ctx *ctx)
6248{
6249 if (ctx->sqo_thread) {
0f158b4c 6250 wait_for_completion(&ctx->sq_thread_comp);
2bbcd6d3
RP
6251 /*
6252 * The park is a bit of a work-around, without it we get
6253 * warning spews on shutdown with SQPOLL set and affinity
6254 * set to a single CPU.
6255 */
06058632 6256 kthread_park(ctx->sqo_thread);
6c271ce2
JA
6257 kthread_stop(ctx->sqo_thread);
6258 ctx->sqo_thread = NULL;
6259 }
6260}
6261
6b06314c
JA
6262static void io_finish_async(struct io_ring_ctx *ctx)
6263{
6c271ce2
JA
6264 io_sq_thread_stop(ctx);
6265
561fb04a
JA
6266 if (ctx->io_wq) {
6267 io_wq_destroy(ctx->io_wq);
6268 ctx->io_wq = NULL;
6b06314c
JA
6269 }
6270}
6271
6272#if defined(CONFIG_UNIX)
6b06314c
JA
6273/*
6274 * Ensure the UNIX gc is aware of our file set, so we are certain that
6275 * the io_uring can be safely unregistered on process exit, even if we have
6276 * loops in the file referencing.
6277 */
6278static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
6279{
6280 struct sock *sk = ctx->ring_sock->sk;
6281 struct scm_fp_list *fpl;
6282 struct sk_buff *skb;
08a45173 6283 int i, nr_files;
6b06314c 6284
6b06314c
JA
6285 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
6286 if (!fpl)
6287 return -ENOMEM;
6288
6289 skb = alloc_skb(0, GFP_KERNEL);
6290 if (!skb) {
6291 kfree(fpl);
6292 return -ENOMEM;
6293 }
6294
6295 skb->sk = sk;
6b06314c 6296
08a45173 6297 nr_files = 0;
6b06314c
JA
6298 fpl->user = get_uid(ctx->user);
6299 for (i = 0; i < nr; i++) {
65e19f54
JA
6300 struct file *file = io_file_from_index(ctx, i + offset);
6301
6302 if (!file)
08a45173 6303 continue;
65e19f54 6304 fpl->fp[nr_files] = get_file(file);
08a45173
JA
6305 unix_inflight(fpl->user, fpl->fp[nr_files]);
6306 nr_files++;
6b06314c
JA
6307 }
6308
08a45173
JA
6309 if (nr_files) {
6310 fpl->max = SCM_MAX_FD;
6311 fpl->count = nr_files;
6312 UNIXCB(skb).fp = fpl;
05f3fb3c 6313 skb->destructor = unix_destruct_scm;
08a45173
JA
6314 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
6315 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 6316
08a45173
JA
6317 for (i = 0; i < nr_files; i++)
6318 fput(fpl->fp[i]);
6319 } else {
6320 kfree_skb(skb);
6321 kfree(fpl);
6322 }
6b06314c
JA
6323
6324 return 0;
6325}
6326
6327/*
6328 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
6329 * causes regular reference counting to break down. We rely on the UNIX
6330 * garbage collection to take care of this problem for us.
6331 */
6332static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6333{
6334 unsigned left, total;
6335 int ret = 0;
6336
6337 total = 0;
6338 left = ctx->nr_user_files;
6339 while (left) {
6340 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
6341
6342 ret = __io_sqe_files_scm(ctx, this_files, total);
6343 if (ret)
6344 break;
6345 left -= this_files;
6346 total += this_files;
6347 }
6348
6349 if (!ret)
6350 return 0;
6351
6352 while (total < ctx->nr_user_files) {
65e19f54
JA
6353 struct file *file = io_file_from_index(ctx, total);
6354
6355 if (file)
6356 fput(file);
6b06314c
JA
6357 total++;
6358 }
6359
6360 return ret;
6361}
6362#else
6363static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6364{
6365 return 0;
6366}
6367#endif
6368
65e19f54
JA
6369static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
6370 unsigned nr_files)
6371{
6372 int i;
6373
6374 for (i = 0; i < nr_tables; i++) {
05f3fb3c 6375 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
6376 unsigned this_files;
6377
6378 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
6379 table->files = kcalloc(this_files, sizeof(struct file *),
6380 GFP_KERNEL);
6381 if (!table->files)
6382 break;
6383 nr_files -= this_files;
6384 }
6385
6386 if (i == nr_tables)
6387 return 0;
6388
6389 for (i = 0; i < nr_tables; i++) {
05f3fb3c 6390 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
6391 kfree(table->files);
6392 }
6393 return 1;
6394}
6395
05f3fb3c
JA
6396static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
6397{
6398#if defined(CONFIG_UNIX)
6399 struct sock *sock = ctx->ring_sock->sk;
6400 struct sk_buff_head list, *head = &sock->sk_receive_queue;
6401 struct sk_buff *skb;
6402 int i;
6403
6404 __skb_queue_head_init(&list);
6405
6406 /*
6407 * Find the skb that holds this file in its SCM_RIGHTS. When found,
6408 * remove this entry and rearrange the file array.
6409 */
6410 skb = skb_dequeue(head);
6411 while (skb) {
6412 struct scm_fp_list *fp;
6413
6414 fp = UNIXCB(skb).fp;
6415 for (i = 0; i < fp->count; i++) {
6416 int left;
6417
6418 if (fp->fp[i] != file)
6419 continue;
6420
6421 unix_notinflight(fp->user, fp->fp[i]);
6422 left = fp->count - 1 - i;
6423 if (left) {
6424 memmove(&fp->fp[i], &fp->fp[i + 1],
6425 left * sizeof(struct file *));
6426 }
6427 fp->count--;
6428 if (!fp->count) {
6429 kfree_skb(skb);
6430 skb = NULL;
6431 } else {
6432 __skb_queue_tail(&list, skb);
6433 }
6434 fput(file);
6435 file = NULL;
6436 break;
6437 }
6438
6439 if (!file)
6440 break;
6441
6442 __skb_queue_tail(&list, skb);
6443
6444 skb = skb_dequeue(head);
6445 }
6446
6447 if (skb_peek(&list)) {
6448 spin_lock_irq(&head->lock);
6449 while ((skb = __skb_dequeue(&list)) != NULL)
6450 __skb_queue_tail(head, skb);
6451 spin_unlock_irq(&head->lock);
6452 }
6453#else
6454 fput(file);
6455#endif
6456}
6457
6458struct io_file_put {
05589553 6459 struct list_head list;
05f3fb3c 6460 struct file *file;
05f3fb3c
JA
6461};
6462
4a38aed2 6463static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
65e19f54 6464{
4a38aed2
JA
6465 struct fixed_file_data *file_data = ref_node->file_data;
6466 struct io_ring_ctx *ctx = file_data->ctx;
05f3fb3c 6467 struct io_file_put *pfile, *tmp;
05589553
XW
6468
6469 list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
6a4d07cd 6470 list_del(&pfile->list);
05589553
XW
6471 io_ring_file_put(ctx, pfile->file);
6472 kfree(pfile);
65e19f54 6473 }
05589553 6474
6a4d07cd
JA
6475 spin_lock(&file_data->lock);
6476 list_del(&ref_node->node);
6477 spin_unlock(&file_data->lock);
05589553
XW
6478
6479 percpu_ref_exit(&ref_node->refs);
6480 kfree(ref_node);
6481 percpu_ref_put(&file_data->refs);
2faf852d 6482}
65e19f54 6483
4a38aed2
JA
6484static void io_file_put_work(struct work_struct *work)
6485{
6486 struct io_ring_ctx *ctx;
6487 struct llist_node *node;
6488
6489 ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
6490 node = llist_del_all(&ctx->file_put_llist);
6491
6492 while (node) {
6493 struct fixed_file_ref_node *ref_node;
6494 struct llist_node *next = node->next;
6495
6496 ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
6497 __io_file_put_work(ref_node);
6498 node = next;
6499 }
6500}
6501
05589553 6502static void io_file_data_ref_zero(struct percpu_ref *ref)
2faf852d 6503{
05589553 6504 struct fixed_file_ref_node *ref_node;
4a38aed2
JA
6505 struct io_ring_ctx *ctx;
6506 bool first_add;
6507 int delay = HZ;
65e19f54 6508
05589553 6509 ref_node = container_of(ref, struct fixed_file_ref_node, refs);
4a38aed2 6510 ctx = ref_node->file_data->ctx;
05589553 6511
4a38aed2
JA
6512 if (percpu_ref_is_dying(&ctx->file_data->refs))
6513 delay = 0;
05589553 6514
4a38aed2
JA
6515 first_add = llist_add(&ref_node->llist, &ctx->file_put_llist);
6516 if (!delay)
6517 mod_delayed_work(system_wq, &ctx->file_put_work, 0);
6518 else if (first_add)
6519 queue_delayed_work(system_wq, &ctx->file_put_work, delay);
05f3fb3c 6520}
65e19f54 6521
05589553
XW
6522static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
6523 struct io_ring_ctx *ctx)
05f3fb3c 6524{
05589553 6525 struct fixed_file_ref_node *ref_node;
05f3fb3c 6526
05589553
XW
6527 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
6528 if (!ref_node)
6529 return ERR_PTR(-ENOMEM);
05f3fb3c 6530
05589553
XW
6531 if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
6532 0, GFP_KERNEL)) {
6533 kfree(ref_node);
6534 return ERR_PTR(-ENOMEM);
6535 }
6536 INIT_LIST_HEAD(&ref_node->node);
6537 INIT_LIST_HEAD(&ref_node->file_list);
05589553
XW
6538 ref_node->file_data = ctx->file_data;
6539 return ref_node;
05589553
XW
6540}
6541
6542static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
6543{
6544 percpu_ref_exit(&ref_node->refs);
6545 kfree(ref_node);
65e19f54
JA
6546}
6547
6b06314c
JA
6548static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
6549 unsigned nr_args)
6550{
6551 __s32 __user *fds = (__s32 __user *) arg;
65e19f54 6552 unsigned nr_tables;
05f3fb3c 6553 struct file *file;
6b06314c
JA
6554 int fd, ret = 0;
6555 unsigned i;
05589553 6556 struct fixed_file_ref_node *ref_node;
6b06314c 6557
05f3fb3c 6558 if (ctx->file_data)
6b06314c
JA
6559 return -EBUSY;
6560 if (!nr_args)
6561 return -EINVAL;
6562 if (nr_args > IORING_MAX_FIXED_FILES)
6563 return -EMFILE;
6564
05f3fb3c
JA
6565 ctx->file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
6566 if (!ctx->file_data)
6567 return -ENOMEM;
6568 ctx->file_data->ctx = ctx;
6569 init_completion(&ctx->file_data->done);
05589553 6570 INIT_LIST_HEAD(&ctx->file_data->ref_list);
f7fe9346 6571 spin_lock_init(&ctx->file_data->lock);
05f3fb3c 6572
65e19f54 6573 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
05f3fb3c
JA
6574 ctx->file_data->table = kcalloc(nr_tables,
6575 sizeof(struct fixed_file_table),
65e19f54 6576 GFP_KERNEL);
05f3fb3c
JA
6577 if (!ctx->file_data->table) {
6578 kfree(ctx->file_data);
6579 ctx->file_data = NULL;
6b06314c 6580 return -ENOMEM;
05f3fb3c
JA
6581 }
6582
05589553 6583 if (percpu_ref_init(&ctx->file_data->refs, io_file_ref_kill,
05f3fb3c
JA
6584 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
6585 kfree(ctx->file_data->table);
6586 kfree(ctx->file_data);
6587 ctx->file_data = NULL;
6b06314c 6588 return -ENOMEM;
05f3fb3c 6589 }
6b06314c 6590
65e19f54 6591 if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
05f3fb3c
JA
6592 percpu_ref_exit(&ctx->file_data->refs);
6593 kfree(ctx->file_data->table);
6594 kfree(ctx->file_data);
6595 ctx->file_data = NULL;
65e19f54
JA
6596 return -ENOMEM;
6597 }
6598
08a45173 6599 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
65e19f54
JA
6600 struct fixed_file_table *table;
6601 unsigned index;
6602
6b06314c
JA
6603 ret = -EFAULT;
6604 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
6605 break;
08a45173
JA
6606 /* allow sparse sets */
6607 if (fd == -1) {
6608 ret = 0;
6609 continue;
6610 }
6b06314c 6611
05f3fb3c 6612 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54 6613 index = i & IORING_FILE_TABLE_MASK;
05f3fb3c 6614 file = fget(fd);
6b06314c
JA
6615
6616 ret = -EBADF;
05f3fb3c 6617 if (!file)
6b06314c 6618 break;
05f3fb3c 6619
6b06314c
JA
6620 /*
6621 * Don't allow io_uring instances to be registered. If UNIX
6622 * isn't enabled, then this causes a reference cycle and this
6623 * instance can never get freed. If UNIX is enabled we'll
6624 * handle it just fine, but there's still no point in allowing
6625 * a ring fd as it doesn't support regular read/write anyway.
6626 */
05f3fb3c
JA
6627 if (file->f_op == &io_uring_fops) {
6628 fput(file);
6b06314c
JA
6629 break;
6630 }
6b06314c 6631 ret = 0;
05f3fb3c 6632 table->files[index] = file;
6b06314c
JA
6633 }
6634
6635 if (ret) {
65e19f54 6636 for (i = 0; i < ctx->nr_user_files; i++) {
65e19f54
JA
6637 file = io_file_from_index(ctx, i);
6638 if (file)
6639 fput(file);
6640 }
6641 for (i = 0; i < nr_tables; i++)
05f3fb3c 6642 kfree(ctx->file_data->table[i].files);
6b06314c 6643
05f3fb3c
JA
6644 kfree(ctx->file_data->table);
6645 kfree(ctx->file_data);
6646 ctx->file_data = NULL;
6b06314c
JA
6647 ctx->nr_user_files = 0;
6648 return ret;
6649 }
6650
6651 ret = io_sqe_files_scm(ctx);
05589553 6652 if (ret) {
6b06314c 6653 io_sqe_files_unregister(ctx);
05589553
XW
6654 return ret;
6655 }
6b06314c 6656
05589553
XW
6657 ref_node = alloc_fixed_file_ref_node(ctx);
6658 if (IS_ERR(ref_node)) {
6659 io_sqe_files_unregister(ctx);
6660 return PTR_ERR(ref_node);
6661 }
6662
6663 ctx->file_data->cur_refs = &ref_node->refs;
6a4d07cd 6664 spin_lock(&ctx->file_data->lock);
05589553 6665 list_add(&ref_node->node, &ctx->file_data->ref_list);
6a4d07cd 6666 spin_unlock(&ctx->file_data->lock);
05589553 6667 percpu_ref_get(&ctx->file_data->refs);
6b06314c
JA
6668 return ret;
6669}
6670
c3a31e60
JA
6671static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
6672 int index)
6673{
6674#if defined(CONFIG_UNIX)
6675 struct sock *sock = ctx->ring_sock->sk;
6676 struct sk_buff_head *head = &sock->sk_receive_queue;
6677 struct sk_buff *skb;
6678
6679 /*
6680 * See if we can merge this file into an existing skb SCM_RIGHTS
6681 * file set. If there's no room, fall back to allocating a new skb
6682 * and filling it in.
6683 */
6684 spin_lock_irq(&head->lock);
6685 skb = skb_peek(head);
6686 if (skb) {
6687 struct scm_fp_list *fpl = UNIXCB(skb).fp;
6688
6689 if (fpl->count < SCM_MAX_FD) {
6690 __skb_unlink(skb, head);
6691 spin_unlock_irq(&head->lock);
6692 fpl->fp[fpl->count] = get_file(file);
6693 unix_inflight(fpl->user, fpl->fp[fpl->count]);
6694 fpl->count++;
6695 spin_lock_irq(&head->lock);
6696 __skb_queue_head(head, skb);
6697 } else {
6698 skb = NULL;
6699 }
6700 }
6701 spin_unlock_irq(&head->lock);
6702
6703 if (skb) {
6704 fput(file);
6705 return 0;
6706 }
6707
6708 return __io_sqe_files_scm(ctx, 1, index);
6709#else
6710 return 0;
6711#endif
6712}
6713
a5318d3c 6714static int io_queue_file_removal(struct fixed_file_data *data,
05589553 6715 struct file *file)
05f3fb3c 6716{
a5318d3c 6717 struct io_file_put *pfile;
05589553
XW
6718 struct percpu_ref *refs = data->cur_refs;
6719 struct fixed_file_ref_node *ref_node;
05f3fb3c 6720
05f3fb3c 6721 pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
a5318d3c
HD
6722 if (!pfile)
6723 return -ENOMEM;
05f3fb3c 6724
05589553 6725 ref_node = container_of(refs, struct fixed_file_ref_node, refs);
05f3fb3c 6726 pfile->file = file;
05589553
XW
6727 list_add(&pfile->list, &ref_node->file_list);
6728
a5318d3c 6729 return 0;
05f3fb3c
JA
6730}
6731
6732static int __io_sqe_files_update(struct io_ring_ctx *ctx,
6733 struct io_uring_files_update *up,
6734 unsigned nr_args)
6735{
6736 struct fixed_file_data *data = ctx->file_data;
05589553 6737 struct fixed_file_ref_node *ref_node;
05f3fb3c 6738 struct file *file;
c3a31e60
JA
6739 __s32 __user *fds;
6740 int fd, i, err;
6741 __u32 done;
05589553 6742 bool needs_switch = false;
c3a31e60 6743
05f3fb3c 6744 if (check_add_overflow(up->offset, nr_args, &done))
c3a31e60
JA
6745 return -EOVERFLOW;
6746 if (done > ctx->nr_user_files)
6747 return -EINVAL;
6748
05589553
XW
6749 ref_node = alloc_fixed_file_ref_node(ctx);
6750 if (IS_ERR(ref_node))
6751 return PTR_ERR(ref_node);
6752
c3a31e60 6753 done = 0;
05f3fb3c 6754 fds = u64_to_user_ptr(up->fds);
c3a31e60 6755 while (nr_args) {
65e19f54
JA
6756 struct fixed_file_table *table;
6757 unsigned index;
6758
c3a31e60
JA
6759 err = 0;
6760 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
6761 err = -EFAULT;
6762 break;
6763 }
05f3fb3c
JA
6764 i = array_index_nospec(up->offset, ctx->nr_user_files);
6765 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54
JA
6766 index = i & IORING_FILE_TABLE_MASK;
6767 if (table->files[index]) {
05f3fb3c 6768 file = io_file_from_index(ctx, index);
a5318d3c
HD
6769 err = io_queue_file_removal(data, file);
6770 if (err)
6771 break;
65e19f54 6772 table->files[index] = NULL;
05589553 6773 needs_switch = true;
c3a31e60
JA
6774 }
6775 if (fd != -1) {
c3a31e60
JA
6776 file = fget(fd);
6777 if (!file) {
6778 err = -EBADF;
6779 break;
6780 }
6781 /*
6782 * Don't allow io_uring instances to be registered. If
6783 * UNIX isn't enabled, then this causes a reference
6784 * cycle and this instance can never get freed. If UNIX
6785 * is enabled we'll handle it just fine, but there's
6786 * still no point in allowing a ring fd as it doesn't
6787 * support regular read/write anyway.
6788 */
6789 if (file->f_op == &io_uring_fops) {
6790 fput(file);
6791 err = -EBADF;
6792 break;
6793 }
65e19f54 6794 table->files[index] = file;
c3a31e60
JA
6795 err = io_sqe_file_register(ctx, file, i);
6796 if (err)
6797 break;
6798 }
6799 nr_args--;
6800 done++;
05f3fb3c
JA
6801 up->offset++;
6802 }
6803
05589553
XW
6804 if (needs_switch) {
6805 percpu_ref_kill(data->cur_refs);
6a4d07cd 6806 spin_lock(&data->lock);
05589553
XW
6807 list_add(&ref_node->node, &data->ref_list);
6808 data->cur_refs = &ref_node->refs;
6a4d07cd 6809 spin_unlock(&data->lock);
05589553
XW
6810 percpu_ref_get(&ctx->file_data->refs);
6811 } else
6812 destroy_fixed_file_ref_node(ref_node);
c3a31e60
JA
6813
6814 return done ? done : err;
6815}
05589553 6816
05f3fb3c
JA
6817static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
6818 unsigned nr_args)
6819{
6820 struct io_uring_files_update up;
6821
6822 if (!ctx->file_data)
6823 return -ENXIO;
6824 if (!nr_args)
6825 return -EINVAL;
6826 if (copy_from_user(&up, arg, sizeof(up)))
6827 return -EFAULT;
6828 if (up.resv)
6829 return -EINVAL;
6830
6831 return __io_sqe_files_update(ctx, &up, nr_args);
6832}
c3a31e60 6833
e9fd9396 6834static void io_free_work(struct io_wq_work *work)
7d723065
JA
6835{
6836 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6837
e9fd9396 6838 /* Consider that io_steal_work() relies on this ref */
7d723065
JA
6839 io_put_req(req);
6840}
6841
24369c2e
PB
6842static int io_init_wq_offload(struct io_ring_ctx *ctx,
6843 struct io_uring_params *p)
6844{
6845 struct io_wq_data data;
6846 struct fd f;
6847 struct io_ring_ctx *ctx_attach;
6848 unsigned int concurrency;
6849 int ret = 0;
6850
6851 data.user = ctx->user;
e9fd9396 6852 data.free_work = io_free_work;
f5fa38c5 6853 data.do_work = io_wq_submit_work;
24369c2e
PB
6854
6855 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
6856 /* Do QD, or 4 * CPUS, whatever is smallest */
6857 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
6858
6859 ctx->io_wq = io_wq_create(concurrency, &data);
6860 if (IS_ERR(ctx->io_wq)) {
6861 ret = PTR_ERR(ctx->io_wq);
6862 ctx->io_wq = NULL;
6863 }
6864 return ret;
6865 }
6866
6867 f = fdget(p->wq_fd);
6868 if (!f.file)
6869 return -EBADF;
6870
6871 if (f.file->f_op != &io_uring_fops) {
6872 ret = -EINVAL;
6873 goto out_fput;
6874 }
6875
6876 ctx_attach = f.file->private_data;
6877 /* @io_wq is protected by holding the fd */
6878 if (!io_wq_get(ctx_attach->io_wq, &data)) {
6879 ret = -EINVAL;
6880 goto out_fput;
6881 }
6882
6883 ctx->io_wq = ctx_attach->io_wq;
6884out_fput:
6885 fdput(f);
6886 return ret;
6887}
6888
6c271ce2
JA
6889static int io_sq_offload_start(struct io_ring_ctx *ctx,
6890 struct io_uring_params *p)
2b188cc1
JA
6891{
6892 int ret;
6893
6894 mmgrab(current->mm);
6895 ctx->sqo_mm = current->mm;
6896
6c271ce2 6897 if (ctx->flags & IORING_SETUP_SQPOLL) {
3ec482d1
JA
6898 ret = -EPERM;
6899 if (!capable(CAP_SYS_ADMIN))
6900 goto err;
6901
917257da
JA
6902 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
6903 if (!ctx->sq_thread_idle)
6904 ctx->sq_thread_idle = HZ;
6905
6c271ce2 6906 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 6907 int cpu = p->sq_thread_cpu;
6c271ce2 6908
917257da 6909 ret = -EINVAL;
44a9bd18
JA
6910 if (cpu >= nr_cpu_ids)
6911 goto err;
7889f44d 6912 if (!cpu_online(cpu))
917257da
JA
6913 goto err;
6914
6c271ce2
JA
6915 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
6916 ctx, cpu,
6917 "io_uring-sq");
6918 } else {
6919 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
6920 "io_uring-sq");
6921 }
6922 if (IS_ERR(ctx->sqo_thread)) {
6923 ret = PTR_ERR(ctx->sqo_thread);
6924 ctx->sqo_thread = NULL;
6925 goto err;
6926 }
6927 wake_up_process(ctx->sqo_thread);
6928 } else if (p->flags & IORING_SETUP_SQ_AFF) {
6929 /* Can't have SQ_AFF without SQPOLL */
6930 ret = -EINVAL;
6931 goto err;
6932 }
6933
24369c2e
PB
6934 ret = io_init_wq_offload(ctx, p);
6935 if (ret)
2b188cc1 6936 goto err;
2b188cc1
JA
6937
6938 return 0;
6939err:
54a91f3b 6940 io_finish_async(ctx);
2b188cc1
JA
6941 mmdrop(ctx->sqo_mm);
6942 ctx->sqo_mm = NULL;
6943 return ret;
6944}
6945
6946static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
6947{
6948 atomic_long_sub(nr_pages, &user->locked_vm);
6949}
6950
6951static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
6952{
6953 unsigned long page_limit, cur_pages, new_pages;
6954
6955 /* Don't allow more pages than we can safely lock */
6956 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
6957
6958 do {
6959 cur_pages = atomic_long_read(&user->locked_vm);
6960 new_pages = cur_pages + nr_pages;
6961 if (new_pages > page_limit)
6962 return -ENOMEM;
6963 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
6964 new_pages) != cur_pages);
6965
6966 return 0;
6967}
6968
6969static void io_mem_free(void *ptr)
6970{
52e04ef4
MR
6971 struct page *page;
6972
6973 if (!ptr)
6974 return;
2b188cc1 6975
52e04ef4 6976 page = virt_to_head_page(ptr);
2b188cc1
JA
6977 if (put_page_testzero(page))
6978 free_compound_page(page);
6979}
6980
6981static void *io_mem_alloc(size_t size)
6982{
6983 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
6984 __GFP_NORETRY;
6985
6986 return (void *) __get_free_pages(gfp_flags, get_order(size));
6987}
6988
75b28aff
HV
6989static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
6990 size_t *sq_offset)
6991{
6992 struct io_rings *rings;
6993 size_t off, sq_array_size;
6994
6995 off = struct_size(rings, cqes, cq_entries);
6996 if (off == SIZE_MAX)
6997 return SIZE_MAX;
6998
6999#ifdef CONFIG_SMP
7000 off = ALIGN(off, SMP_CACHE_BYTES);
7001 if (off == 0)
7002 return SIZE_MAX;
7003#endif
7004
7005 sq_array_size = array_size(sizeof(u32), sq_entries);
7006 if (sq_array_size == SIZE_MAX)
7007 return SIZE_MAX;
7008
7009 if (check_add_overflow(off, sq_array_size, &off))
7010 return SIZE_MAX;
7011
7012 if (sq_offset)
7013 *sq_offset = off;
7014
7015 return off;
7016}
7017
2b188cc1
JA
7018static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
7019{
75b28aff 7020 size_t pages;
2b188cc1 7021
75b28aff
HV
7022 pages = (size_t)1 << get_order(
7023 rings_size(sq_entries, cq_entries, NULL));
7024 pages += (size_t)1 << get_order(
7025 array_size(sizeof(struct io_uring_sqe), sq_entries));
2b188cc1 7026
75b28aff 7027 return pages;
2b188cc1
JA
7028}
7029
edafccee
JA
7030static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
7031{
7032 int i, j;
7033
7034 if (!ctx->user_bufs)
7035 return -ENXIO;
7036
7037 for (i = 0; i < ctx->nr_user_bufs; i++) {
7038 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
7039
7040 for (j = 0; j < imu->nr_bvecs; j++)
f1f6a7dd 7041 unpin_user_page(imu->bvec[j].bv_page);
edafccee
JA
7042
7043 if (ctx->account_mem)
7044 io_unaccount_mem(ctx->user, imu->nr_bvecs);
d4ef6475 7045 kvfree(imu->bvec);
edafccee
JA
7046 imu->nr_bvecs = 0;
7047 }
7048
7049 kfree(ctx->user_bufs);
7050 ctx->user_bufs = NULL;
7051 ctx->nr_user_bufs = 0;
7052 return 0;
7053}
7054
7055static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
7056 void __user *arg, unsigned index)
7057{
7058 struct iovec __user *src;
7059
7060#ifdef CONFIG_COMPAT
7061 if (ctx->compat) {
7062 struct compat_iovec __user *ciovs;
7063 struct compat_iovec ciov;
7064
7065 ciovs = (struct compat_iovec __user *) arg;
7066 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
7067 return -EFAULT;
7068
d55e5f5b 7069 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
7070 dst->iov_len = ciov.iov_len;
7071 return 0;
7072 }
7073#endif
7074 src = (struct iovec __user *) arg;
7075 if (copy_from_user(dst, &src[index], sizeof(*dst)))
7076 return -EFAULT;
7077 return 0;
7078}
7079
7080static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
7081 unsigned nr_args)
7082{
7083 struct vm_area_struct **vmas = NULL;
7084 struct page **pages = NULL;
7085 int i, j, got_pages = 0;
7086 int ret = -EINVAL;
7087
7088 if (ctx->user_bufs)
7089 return -EBUSY;
7090 if (!nr_args || nr_args > UIO_MAXIOV)
7091 return -EINVAL;
7092
7093 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
7094 GFP_KERNEL);
7095 if (!ctx->user_bufs)
7096 return -ENOMEM;
7097
7098 for (i = 0; i < nr_args; i++) {
7099 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
7100 unsigned long off, start, end, ubuf;
7101 int pret, nr_pages;
7102 struct iovec iov;
7103 size_t size;
7104
7105 ret = io_copy_iov(ctx, &iov, arg, i);
7106 if (ret)
a278682d 7107 goto err;
edafccee
JA
7108
7109 /*
7110 * Don't impose further limits on the size and buffer
7111 * constraints here, we'll -EINVAL later when IO is
7112 * submitted if they are wrong.
7113 */
7114 ret = -EFAULT;
7115 if (!iov.iov_base || !iov.iov_len)
7116 goto err;
7117
7118 /* arbitrary limit, but we need something */
7119 if (iov.iov_len > SZ_1G)
7120 goto err;
7121
7122 ubuf = (unsigned long) iov.iov_base;
7123 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
7124 start = ubuf >> PAGE_SHIFT;
7125 nr_pages = end - start;
7126
7127 if (ctx->account_mem) {
7128 ret = io_account_mem(ctx->user, nr_pages);
7129 if (ret)
7130 goto err;
7131 }
7132
7133 ret = 0;
7134 if (!pages || nr_pages > got_pages) {
a8c73c1a
DE
7135 kvfree(vmas);
7136 kvfree(pages);
d4ef6475 7137 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
edafccee 7138 GFP_KERNEL);
d4ef6475 7139 vmas = kvmalloc_array(nr_pages,
edafccee
JA
7140 sizeof(struct vm_area_struct *),
7141 GFP_KERNEL);
7142 if (!pages || !vmas) {
7143 ret = -ENOMEM;
7144 if (ctx->account_mem)
7145 io_unaccount_mem(ctx->user, nr_pages);
7146 goto err;
7147 }
7148 got_pages = nr_pages;
7149 }
7150
d4ef6475 7151 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
edafccee
JA
7152 GFP_KERNEL);
7153 ret = -ENOMEM;
7154 if (!imu->bvec) {
7155 if (ctx->account_mem)
7156 io_unaccount_mem(ctx->user, nr_pages);
7157 goto err;
7158 }
7159
7160 ret = 0;
d8ed45c5 7161 mmap_read_lock(current->mm);
2113b05d 7162 pret = pin_user_pages(ubuf, nr_pages,
932f4a63
IW
7163 FOLL_WRITE | FOLL_LONGTERM,
7164 pages, vmas);
edafccee
JA
7165 if (pret == nr_pages) {
7166 /* don't support file backed memory */
7167 for (j = 0; j < nr_pages; j++) {
7168 struct vm_area_struct *vma = vmas[j];
7169
7170 if (vma->vm_file &&
7171 !is_file_hugepages(vma->vm_file)) {
7172 ret = -EOPNOTSUPP;
7173 break;
7174 }
7175 }
7176 } else {
7177 ret = pret < 0 ? pret : -EFAULT;
7178 }
d8ed45c5 7179 mmap_read_unlock(current->mm);
edafccee
JA
7180 if (ret) {
7181 /*
7182 * if we did partial map, or found file backed vmas,
7183 * release any pages we did get
7184 */
27c4d3a3 7185 if (pret > 0)
f1f6a7dd 7186 unpin_user_pages(pages, pret);
edafccee
JA
7187 if (ctx->account_mem)
7188 io_unaccount_mem(ctx->user, nr_pages);
d4ef6475 7189 kvfree(imu->bvec);
edafccee
JA
7190 goto err;
7191 }
7192
7193 off = ubuf & ~PAGE_MASK;
7194 size = iov.iov_len;
7195 for (j = 0; j < nr_pages; j++) {
7196 size_t vec_len;
7197
7198 vec_len = min_t(size_t, size, PAGE_SIZE - off);
7199 imu->bvec[j].bv_page = pages[j];
7200 imu->bvec[j].bv_len = vec_len;
7201 imu->bvec[j].bv_offset = off;
7202 off = 0;
7203 size -= vec_len;
7204 }
7205 /* store original address for later verification */
7206 imu->ubuf = ubuf;
7207 imu->len = iov.iov_len;
7208 imu->nr_bvecs = nr_pages;
7209
7210 ctx->nr_user_bufs++;
7211 }
d4ef6475
MR
7212 kvfree(pages);
7213 kvfree(vmas);
edafccee
JA
7214 return 0;
7215err:
d4ef6475
MR
7216 kvfree(pages);
7217 kvfree(vmas);
edafccee
JA
7218 io_sqe_buffer_unregister(ctx);
7219 return ret;
7220}
7221
9b402849
JA
7222static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
7223{
7224 __s32 __user *fds = arg;
7225 int fd;
7226
7227 if (ctx->cq_ev_fd)
7228 return -EBUSY;
7229
7230 if (copy_from_user(&fd, fds, sizeof(*fds)))
7231 return -EFAULT;
7232
7233 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
7234 if (IS_ERR(ctx->cq_ev_fd)) {
7235 int ret = PTR_ERR(ctx->cq_ev_fd);
7236 ctx->cq_ev_fd = NULL;
7237 return ret;
7238 }
7239
7240 return 0;
7241}
7242
7243static int io_eventfd_unregister(struct io_ring_ctx *ctx)
7244{
7245 if (ctx->cq_ev_fd) {
7246 eventfd_ctx_put(ctx->cq_ev_fd);
7247 ctx->cq_ev_fd = NULL;
7248 return 0;
7249 }
7250
7251 return -ENXIO;
7252}
7253
5a2e745d
JA
7254static int __io_destroy_buffers(int id, void *p, void *data)
7255{
7256 struct io_ring_ctx *ctx = data;
7257 struct io_buffer *buf = p;
7258
067524e9 7259 __io_remove_buffers(ctx, buf, id, -1U);
5a2e745d
JA
7260 return 0;
7261}
7262
7263static void io_destroy_buffers(struct io_ring_ctx *ctx)
7264{
7265 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
7266 idr_destroy(&ctx->io_buffer_idr);
7267}
7268
2b188cc1
JA
7269static void io_ring_ctx_free(struct io_ring_ctx *ctx)
7270{
6b06314c 7271 io_finish_async(ctx);
2b188cc1
JA
7272 if (ctx->sqo_mm)
7273 mmdrop(ctx->sqo_mm);
def596e9
JA
7274
7275 io_iopoll_reap_events(ctx);
edafccee 7276 io_sqe_buffer_unregister(ctx);
6b06314c 7277 io_sqe_files_unregister(ctx);
9b402849 7278 io_eventfd_unregister(ctx);
5a2e745d 7279 io_destroy_buffers(ctx);
41726c9a 7280 idr_destroy(&ctx->personality_idr);
def596e9 7281
2b188cc1 7282#if defined(CONFIG_UNIX)
355e8d26
EB
7283 if (ctx->ring_sock) {
7284 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 7285 sock_release(ctx->ring_sock);
355e8d26 7286 }
2b188cc1
JA
7287#endif
7288
75b28aff 7289 io_mem_free(ctx->rings);
2b188cc1 7290 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
7291
7292 percpu_ref_exit(&ctx->refs);
7293 if (ctx->account_mem)
7294 io_unaccount_mem(ctx->user,
7295 ring_pages(ctx->sq_entries, ctx->cq_entries));
7296 free_uid(ctx->user);
181e448d 7297 put_cred(ctx->creds);
78076bb6 7298 kfree(ctx->cancel_hash);
0ddf92e8 7299 kmem_cache_free(req_cachep, ctx->fallback_req);
2b188cc1
JA
7300 kfree(ctx);
7301}
7302
7303static __poll_t io_uring_poll(struct file *file, poll_table *wait)
7304{
7305 struct io_ring_ctx *ctx = file->private_data;
7306 __poll_t mask = 0;
7307
7308 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
7309 /*
7310 * synchronizes with barrier from wq_has_sleeper call in
7311 * io_commit_cqring
7312 */
2b188cc1 7313 smp_rmb();
75b28aff
HV
7314 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
7315 ctx->rings->sq_ring_entries)
2b188cc1 7316 mask |= EPOLLOUT | EPOLLWRNORM;
63e5d81f 7317 if (io_cqring_events(ctx, false))
2b188cc1
JA
7318 mask |= EPOLLIN | EPOLLRDNORM;
7319
7320 return mask;
7321}
7322
7323static int io_uring_fasync(int fd, struct file *file, int on)
7324{
7325 struct io_ring_ctx *ctx = file->private_data;
7326
7327 return fasync_helper(fd, file, on, &ctx->cq_fasync);
7328}
7329
071698e1
JA
7330static int io_remove_personalities(int id, void *p, void *data)
7331{
7332 struct io_ring_ctx *ctx = data;
7333 const struct cred *cred;
7334
7335 cred = idr_remove(&ctx->personality_idr, id);
7336 if (cred)
7337 put_cred(cred);
7338 return 0;
7339}
7340
85faa7b8
JA
7341static void io_ring_exit_work(struct work_struct *work)
7342{
7343 struct io_ring_ctx *ctx;
7344
7345 ctx = container_of(work, struct io_ring_ctx, exit_work);
7346 if (ctx->rings)
7347 io_cqring_overflow_flush(ctx, true);
7348
0f158b4c 7349 wait_for_completion(&ctx->ref_comp);
85faa7b8
JA
7350 io_ring_ctx_free(ctx);
7351}
7352
2b188cc1
JA
7353static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
7354{
7355 mutex_lock(&ctx->uring_lock);
7356 percpu_ref_kill(&ctx->refs);
7357 mutex_unlock(&ctx->uring_lock);
7358
5262f567 7359 io_kill_timeouts(ctx);
221c5eb2 7360 io_poll_remove_all(ctx);
561fb04a
JA
7361
7362 if (ctx->io_wq)
7363 io_wq_cancel_all(ctx->io_wq);
7364
def596e9 7365 io_iopoll_reap_events(ctx);
15dff286
JA
7366 /* if we failed setting up the ctx, we might not have any rings */
7367 if (ctx->rings)
7368 io_cqring_overflow_flush(ctx, true);
071698e1 7369 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
85faa7b8
JA
7370 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
7371 queue_work(system_wq, &ctx->exit_work);
2b188cc1
JA
7372}
7373
7374static int io_uring_release(struct inode *inode, struct file *file)
7375{
7376 struct io_ring_ctx *ctx = file->private_data;
7377
7378 file->private_data = NULL;
7379 io_ring_ctx_wait_and_kill(ctx);
7380 return 0;
7381}
7382
67c4d9e6
PB
7383static bool io_wq_files_match(struct io_wq_work *work, void *data)
7384{
7385 struct files_struct *files = data;
7386
7387 return work->files == files;
7388}
7389
fcb323cc
JA
7390static void io_uring_cancel_files(struct io_ring_ctx *ctx,
7391 struct files_struct *files)
7392{
67c4d9e6
PB
7393 if (list_empty_careful(&ctx->inflight_list))
7394 return;
7395
7396 /* cancel all at once, should be faster than doing it one by one*/
7397 io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
7398
fcb323cc 7399 while (!list_empty_careful(&ctx->inflight_list)) {
d8f1b971
XW
7400 struct io_kiocb *cancel_req = NULL, *req;
7401 DEFINE_WAIT(wait);
fcb323cc
JA
7402
7403 spin_lock_irq(&ctx->inflight_lock);
7404 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
768134d4
JA
7405 if (req->work.files != files)
7406 continue;
7407 /* req is being completed, ignore */
7408 if (!refcount_inc_not_zero(&req->refs))
7409 continue;
7410 cancel_req = req;
7411 break;
fcb323cc 7412 }
768134d4 7413 if (cancel_req)
fcb323cc 7414 prepare_to_wait(&ctx->inflight_wait, &wait,
768134d4 7415 TASK_UNINTERRUPTIBLE);
fcb323cc
JA
7416 spin_unlock_irq(&ctx->inflight_lock);
7417
768134d4
JA
7418 /* We need to keep going until we don't find a matching req */
7419 if (!cancel_req)
fcb323cc 7420 break;
2f6d9b9d 7421
2ca10259
JA
7422 if (cancel_req->flags & REQ_F_OVERFLOW) {
7423 spin_lock_irq(&ctx->completion_lock);
7424 list_del(&cancel_req->list);
7425 cancel_req->flags &= ~REQ_F_OVERFLOW;
7426 if (list_empty(&ctx->cq_overflow_list)) {
7427 clear_bit(0, &ctx->sq_check_overflow);
7428 clear_bit(0, &ctx->cq_check_overflow);
7429 }
7430 spin_unlock_irq(&ctx->completion_lock);
7431
7432 WRITE_ONCE(ctx->rings->cq_overflow,
7433 atomic_inc_return(&ctx->cached_cq_overflow));
7434
7435 /*
7436 * Put inflight ref and overflow ref. If that's
7437 * all we had, then we're done with this request.
7438 */
7439 if (refcount_sub_and_test(2, &cancel_req->refs)) {
4518a3cc 7440 io_free_req(cancel_req);
d8f1b971 7441 finish_wait(&ctx->inflight_wait, &wait);
2ca10259
JA
7442 continue;
7443 }
7b53d598
PB
7444 } else {
7445 io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
7446 io_put_req(cancel_req);
2ca10259
JA
7447 }
7448
fcb323cc 7449 schedule();
d8f1b971 7450 finish_wait(&ctx->inflight_wait, &wait);
fcb323cc
JA
7451 }
7452}
7453
44e728b8
PB
7454static bool io_cancel_pid_cb(struct io_wq_work *work, void *data)
7455{
7456 pid_t pid = (pid_t) (unsigned long) data;
7457
7458 return work->task_pid == pid;
7459}
7460
fcb323cc
JA
7461static int io_uring_flush(struct file *file, void *data)
7462{
7463 struct io_ring_ctx *ctx = file->private_data;
7464
7465 io_uring_cancel_files(ctx, data);
6ab23144
JA
7466
7467 /*
7468 * If the task is going away, cancel work it may have pending
7469 */
44e728b8
PB
7470 if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
7471 void *data = (void *) (unsigned long)task_pid_vnr(current);
7472
7473 io_wq_cancel_cb(ctx->io_wq, io_cancel_pid_cb, data, true);
7474 }
6ab23144 7475
fcb323cc
JA
7476 return 0;
7477}
7478
6c5c240e
RP
7479static void *io_uring_validate_mmap_request(struct file *file,
7480 loff_t pgoff, size_t sz)
2b188cc1 7481{
2b188cc1 7482 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 7483 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
7484 struct page *page;
7485 void *ptr;
7486
7487 switch (offset) {
7488 case IORING_OFF_SQ_RING:
75b28aff
HV
7489 case IORING_OFF_CQ_RING:
7490 ptr = ctx->rings;
2b188cc1
JA
7491 break;
7492 case IORING_OFF_SQES:
7493 ptr = ctx->sq_sqes;
7494 break;
2b188cc1 7495 default:
6c5c240e 7496 return ERR_PTR(-EINVAL);
2b188cc1
JA
7497 }
7498
7499 page = virt_to_head_page(ptr);
a50b854e 7500 if (sz > page_size(page))
6c5c240e
RP
7501 return ERR_PTR(-EINVAL);
7502
7503 return ptr;
7504}
7505
7506#ifdef CONFIG_MMU
7507
7508static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7509{
7510 size_t sz = vma->vm_end - vma->vm_start;
7511 unsigned long pfn;
7512 void *ptr;
7513
7514 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
7515 if (IS_ERR(ptr))
7516 return PTR_ERR(ptr);
2b188cc1
JA
7517
7518 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
7519 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
7520}
7521
6c5c240e
RP
7522#else /* !CONFIG_MMU */
7523
7524static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7525{
7526 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
7527}
7528
7529static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
7530{
7531 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
7532}
7533
7534static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
7535 unsigned long addr, unsigned long len,
7536 unsigned long pgoff, unsigned long flags)
7537{
7538 void *ptr;
7539
7540 ptr = io_uring_validate_mmap_request(file, pgoff, len);
7541 if (IS_ERR(ptr))
7542 return PTR_ERR(ptr);
7543
7544 return (unsigned long) ptr;
7545}
7546
7547#endif /* !CONFIG_MMU */
7548
2b188cc1
JA
7549SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
7550 u32, min_complete, u32, flags, const sigset_t __user *, sig,
7551 size_t, sigsz)
7552{
7553 struct io_ring_ctx *ctx;
7554 long ret = -EBADF;
7555 int submitted = 0;
7556 struct fd f;
7557
b41e9852
JA
7558 if (current->task_works)
7559 task_work_run();
7560
6c271ce2 7561 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
2b188cc1
JA
7562 return -EINVAL;
7563
7564 f = fdget(fd);
7565 if (!f.file)
7566 return -EBADF;
7567
7568 ret = -EOPNOTSUPP;
7569 if (f.file->f_op != &io_uring_fops)
7570 goto out_fput;
7571
7572 ret = -ENXIO;
7573 ctx = f.file->private_data;
7574 if (!percpu_ref_tryget(&ctx->refs))
7575 goto out_fput;
7576
6c271ce2
JA
7577 /*
7578 * For SQ polling, the thread will do all submissions and completions.
7579 * Just return the requested submit count, and wake the thread if
7580 * we were asked to.
7581 */
b2a9eada 7582 ret = 0;
6c271ce2 7583 if (ctx->flags & IORING_SETUP_SQPOLL) {
c1edbf5f
JA
7584 if (!list_empty_careful(&ctx->cq_overflow_list))
7585 io_cqring_overflow_flush(ctx, false);
6c271ce2
JA
7586 if (flags & IORING_ENTER_SQ_WAKEUP)
7587 wake_up(&ctx->sqo_wait);
7588 submitted = to_submit;
b2a9eada 7589 } else if (to_submit) {
2b188cc1 7590 mutex_lock(&ctx->uring_lock);
0cdaf760 7591 submitted = io_submit_sqes(ctx, to_submit, f.file, fd);
2b188cc1 7592 mutex_unlock(&ctx->uring_lock);
7c504e65
PB
7593
7594 if (submitted != to_submit)
7595 goto out;
2b188cc1
JA
7596 }
7597 if (flags & IORING_ENTER_GETEVENTS) {
def596e9
JA
7598 unsigned nr_events = 0;
7599
2b188cc1
JA
7600 min_complete = min(min_complete, ctx->cq_entries);
7601
32b2244a
XW
7602 /*
7603 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
7604 * space applications don't need to do io completion events
7605 * polling again, they can rely on io_sq_thread to do polling
7606 * work, which can reduce cpu usage and uring_lock contention.
7607 */
7608 if (ctx->flags & IORING_SETUP_IOPOLL &&
7609 !(ctx->flags & IORING_SETUP_SQPOLL)) {
def596e9 7610 ret = io_iopoll_check(ctx, &nr_events, min_complete);
def596e9
JA
7611 } else {
7612 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
7613 }
2b188cc1
JA
7614 }
7615
7c504e65 7616out:
6805b32e 7617 percpu_ref_put(&ctx->refs);
2b188cc1
JA
7618out_fput:
7619 fdput(f);
7620 return submitted ? submitted : ret;
7621}
7622
bebdb65e 7623#ifdef CONFIG_PROC_FS
87ce955b
JA
7624static int io_uring_show_cred(int id, void *p, void *data)
7625{
7626 const struct cred *cred = p;
7627 struct seq_file *m = data;
7628 struct user_namespace *uns = seq_user_ns(m);
7629 struct group_info *gi;
7630 kernel_cap_t cap;
7631 unsigned __capi;
7632 int g;
7633
7634 seq_printf(m, "%5d\n", id);
7635 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
7636 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
7637 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
7638 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
7639 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
7640 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
7641 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
7642 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
7643 seq_puts(m, "\n\tGroups:\t");
7644 gi = cred->group_info;
7645 for (g = 0; g < gi->ngroups; g++) {
7646 seq_put_decimal_ull(m, g ? " " : "",
7647 from_kgid_munged(uns, gi->gid[g]));
7648 }
7649 seq_puts(m, "\n\tCapEff:\t");
7650 cap = cred->cap_effective;
7651 CAP_FOR_EACH_U32(__capi)
7652 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
7653 seq_putc(m, '\n');
7654 return 0;
7655}
7656
7657static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
7658{
7659 int i;
7660
7661 mutex_lock(&ctx->uring_lock);
7662 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
7663 for (i = 0; i < ctx->nr_user_files; i++) {
7664 struct fixed_file_table *table;
7665 struct file *f;
7666
7667 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7668 f = table->files[i & IORING_FILE_TABLE_MASK];
7669 if (f)
7670 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
7671 else
7672 seq_printf(m, "%5u: <none>\n", i);
7673 }
7674 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
7675 for (i = 0; i < ctx->nr_user_bufs; i++) {
7676 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
7677
7678 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
7679 (unsigned int) buf->len);
7680 }
7681 if (!idr_is_empty(&ctx->personality_idr)) {
7682 seq_printf(m, "Personalities:\n");
7683 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
7684 }
d7718a9d
JA
7685 seq_printf(m, "PollList:\n");
7686 spin_lock_irq(&ctx->completion_lock);
7687 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
7688 struct hlist_head *list = &ctx->cancel_hash[i];
7689 struct io_kiocb *req;
7690
7691 hlist_for_each_entry(req, list, hash_node)
7692 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
7693 req->task->task_works != NULL);
7694 }
7695 spin_unlock_irq(&ctx->completion_lock);
87ce955b
JA
7696 mutex_unlock(&ctx->uring_lock);
7697}
7698
7699static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
7700{
7701 struct io_ring_ctx *ctx = f->private_data;
7702
7703 if (percpu_ref_tryget(&ctx->refs)) {
7704 __io_uring_show_fdinfo(ctx, m);
7705 percpu_ref_put(&ctx->refs);
7706 }
7707}
bebdb65e 7708#endif
87ce955b 7709
2b188cc1
JA
7710static const struct file_operations io_uring_fops = {
7711 .release = io_uring_release,
fcb323cc 7712 .flush = io_uring_flush,
2b188cc1 7713 .mmap = io_uring_mmap,
6c5c240e
RP
7714#ifndef CONFIG_MMU
7715 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
7716 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
7717#endif
2b188cc1
JA
7718 .poll = io_uring_poll,
7719 .fasync = io_uring_fasync,
bebdb65e 7720#ifdef CONFIG_PROC_FS
87ce955b 7721 .show_fdinfo = io_uring_show_fdinfo,
bebdb65e 7722#endif
2b188cc1
JA
7723};
7724
7725static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
7726 struct io_uring_params *p)
7727{
75b28aff
HV
7728 struct io_rings *rings;
7729 size_t size, sq_array_offset;
2b188cc1 7730
75b28aff
HV
7731 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
7732 if (size == SIZE_MAX)
7733 return -EOVERFLOW;
7734
7735 rings = io_mem_alloc(size);
7736 if (!rings)
2b188cc1
JA
7737 return -ENOMEM;
7738
75b28aff
HV
7739 ctx->rings = rings;
7740 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
7741 rings->sq_ring_mask = p->sq_entries - 1;
7742 rings->cq_ring_mask = p->cq_entries - 1;
7743 rings->sq_ring_entries = p->sq_entries;
7744 rings->cq_ring_entries = p->cq_entries;
7745 ctx->sq_mask = rings->sq_ring_mask;
7746 ctx->cq_mask = rings->cq_ring_mask;
7747 ctx->sq_entries = rings->sq_ring_entries;
7748 ctx->cq_entries = rings->cq_ring_entries;
2b188cc1
JA
7749
7750 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
7751 if (size == SIZE_MAX) {
7752 io_mem_free(ctx->rings);
7753 ctx->rings = NULL;
2b188cc1 7754 return -EOVERFLOW;
eb065d30 7755 }
2b188cc1
JA
7756
7757 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
7758 if (!ctx->sq_sqes) {
7759 io_mem_free(ctx->rings);
7760 ctx->rings = NULL;
2b188cc1 7761 return -ENOMEM;
eb065d30 7762 }
2b188cc1 7763
2b188cc1
JA
7764 return 0;
7765}
7766
7767/*
7768 * Allocate an anonymous fd, this is what constitutes the application
7769 * visible backing of an io_uring instance. The application mmaps this
7770 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
7771 * we have to tie this fd to a socket for file garbage collection purposes.
7772 */
7773static int io_uring_get_fd(struct io_ring_ctx *ctx)
7774{
7775 struct file *file;
7776 int ret;
7777
7778#if defined(CONFIG_UNIX)
7779 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
7780 &ctx->ring_sock);
7781 if (ret)
7782 return ret;
7783#endif
7784
7785 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
7786 if (ret < 0)
7787 goto err;
7788
7789 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
7790 O_RDWR | O_CLOEXEC);
7791 if (IS_ERR(file)) {
7792 put_unused_fd(ret);
7793 ret = PTR_ERR(file);
7794 goto err;
7795 }
7796
7797#if defined(CONFIG_UNIX)
7798 ctx->ring_sock->file = file;
7799#endif
7800 fd_install(ret, file);
7801 return ret;
7802err:
7803#if defined(CONFIG_UNIX)
7804 sock_release(ctx->ring_sock);
7805 ctx->ring_sock = NULL;
7806#endif
7807 return ret;
7808}
7809
7f13657d
XW
7810static int io_uring_create(unsigned entries, struct io_uring_params *p,
7811 struct io_uring_params __user *params)
2b188cc1
JA
7812{
7813 struct user_struct *user = NULL;
7814 struct io_ring_ctx *ctx;
7815 bool account_mem;
7816 int ret;
7817
8110c1a6 7818 if (!entries)
2b188cc1 7819 return -EINVAL;
8110c1a6
JA
7820 if (entries > IORING_MAX_ENTRIES) {
7821 if (!(p->flags & IORING_SETUP_CLAMP))
7822 return -EINVAL;
7823 entries = IORING_MAX_ENTRIES;
7824 }
2b188cc1
JA
7825
7826 /*
7827 * Use twice as many entries for the CQ ring. It's possible for the
7828 * application to drive a higher depth than the size of the SQ ring,
7829 * since the sqes are only used at submission time. This allows for
33a107f0
JA
7830 * some flexibility in overcommitting a bit. If the application has
7831 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
7832 * of CQ ring entries manually.
2b188cc1
JA
7833 */
7834 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
7835 if (p->flags & IORING_SETUP_CQSIZE) {
7836 /*
7837 * If IORING_SETUP_CQSIZE is set, we do the same roundup
7838 * to a power-of-two, if it isn't already. We do NOT impose
7839 * any cq vs sq ring sizing.
7840 */
8110c1a6 7841 if (p->cq_entries < p->sq_entries)
33a107f0 7842 return -EINVAL;
8110c1a6
JA
7843 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
7844 if (!(p->flags & IORING_SETUP_CLAMP))
7845 return -EINVAL;
7846 p->cq_entries = IORING_MAX_CQ_ENTRIES;
7847 }
33a107f0
JA
7848 p->cq_entries = roundup_pow_of_two(p->cq_entries);
7849 } else {
7850 p->cq_entries = 2 * p->sq_entries;
7851 }
2b188cc1
JA
7852
7853 user = get_uid(current_user());
7854 account_mem = !capable(CAP_IPC_LOCK);
7855
7856 if (account_mem) {
7857 ret = io_account_mem(user,
7858 ring_pages(p->sq_entries, p->cq_entries));
7859 if (ret) {
7860 free_uid(user);
7861 return ret;
7862 }
7863 }
7864
7865 ctx = io_ring_ctx_alloc(p);
7866 if (!ctx) {
7867 if (account_mem)
7868 io_unaccount_mem(user, ring_pages(p->sq_entries,
7869 p->cq_entries));
7870 free_uid(user);
7871 return -ENOMEM;
7872 }
7873 ctx->compat = in_compat_syscall();
7874 ctx->account_mem = account_mem;
7875 ctx->user = user;
0b8c0ec7 7876 ctx->creds = get_current_cred();
2b188cc1
JA
7877
7878 ret = io_allocate_scq_urings(ctx, p);
7879 if (ret)
7880 goto err;
7881
6c271ce2 7882 ret = io_sq_offload_start(ctx, p);
2b188cc1
JA
7883 if (ret)
7884 goto err;
7885
2b188cc1 7886 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
7887 p->sq_off.head = offsetof(struct io_rings, sq.head);
7888 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
7889 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
7890 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
7891 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
7892 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
7893 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
7894
7895 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
7896 p->cq_off.head = offsetof(struct io_rings, cq.head);
7897 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
7898 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
7899 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
7900 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
7901 p->cq_off.cqes = offsetof(struct io_rings, cqes);
0d9b5b3a 7902 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
ac90f249 7903
7f13657d
XW
7904 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
7905 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
7906 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL;
7907
7908 if (copy_to_user(params, p, sizeof(*p))) {
7909 ret = -EFAULT;
7910 goto err;
7911 }
044c1ab3
JA
7912 /*
7913 * Install ring fd as the very last thing, so we don't risk someone
7914 * having closed it before we finish setup
7915 */
7916 ret = io_uring_get_fd(ctx);
7917 if (ret < 0)
7918 goto err;
7919
c826bd7a 7920 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
7921 return ret;
7922err:
7923 io_ring_ctx_wait_and_kill(ctx);
7924 return ret;
7925}
7926
7927/*
7928 * Sets up an aio uring context, and returns the fd. Applications asks for a
7929 * ring size, we return the actual sq/cq ring sizes (among other things) in the
7930 * params structure passed in.
7931 */
7932static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
7933{
7934 struct io_uring_params p;
2b188cc1
JA
7935 int i;
7936
7937 if (copy_from_user(&p, params, sizeof(p)))
7938 return -EFAULT;
7939 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
7940 if (p.resv[i])
7941 return -EINVAL;
7942 }
7943
6c271ce2 7944 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 7945 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
24369c2e 7946 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ))
2b188cc1
JA
7947 return -EINVAL;
7948
7f13657d 7949 return io_uring_create(entries, &p, params);
2b188cc1
JA
7950}
7951
7952SYSCALL_DEFINE2(io_uring_setup, u32, entries,
7953 struct io_uring_params __user *, params)
7954{
7955 return io_uring_setup(entries, params);
7956}
7957
66f4af93
JA
7958static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
7959{
7960 struct io_uring_probe *p;
7961 size_t size;
7962 int i, ret;
7963
7964 size = struct_size(p, ops, nr_args);
7965 if (size == SIZE_MAX)
7966 return -EOVERFLOW;
7967 p = kzalloc(size, GFP_KERNEL);
7968 if (!p)
7969 return -ENOMEM;
7970
7971 ret = -EFAULT;
7972 if (copy_from_user(p, arg, size))
7973 goto out;
7974 ret = -EINVAL;
7975 if (memchr_inv(p, 0, size))
7976 goto out;
7977
7978 p->last_op = IORING_OP_LAST - 1;
7979 if (nr_args > IORING_OP_LAST)
7980 nr_args = IORING_OP_LAST;
7981
7982 for (i = 0; i < nr_args; i++) {
7983 p->ops[i].op = i;
7984 if (!io_op_defs[i].not_supported)
7985 p->ops[i].flags = IO_URING_OP_SUPPORTED;
7986 }
7987 p->ops_len = i;
7988
7989 ret = 0;
7990 if (copy_to_user(arg, p, size))
7991 ret = -EFAULT;
7992out:
7993 kfree(p);
7994 return ret;
7995}
7996
071698e1
JA
7997static int io_register_personality(struct io_ring_ctx *ctx)
7998{
7999 const struct cred *creds = get_current_cred();
8000 int id;
8001
8002 id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
8003 USHRT_MAX, GFP_KERNEL);
8004 if (id < 0)
8005 put_cred(creds);
8006 return id;
8007}
8008
8009static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
8010{
8011 const struct cred *old_creds;
8012
8013 old_creds = idr_remove(&ctx->personality_idr, id);
8014 if (old_creds) {
8015 put_cred(old_creds);
8016 return 0;
8017 }
8018
8019 return -EINVAL;
8020}
8021
8022static bool io_register_op_must_quiesce(int op)
8023{
8024 switch (op) {
8025 case IORING_UNREGISTER_FILES:
8026 case IORING_REGISTER_FILES_UPDATE:
8027 case IORING_REGISTER_PROBE:
8028 case IORING_REGISTER_PERSONALITY:
8029 case IORING_UNREGISTER_PERSONALITY:
8030 return false;
8031 default:
8032 return true;
8033 }
8034}
8035
edafccee
JA
8036static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
8037 void __user *arg, unsigned nr_args)
b19062a5
JA
8038 __releases(ctx->uring_lock)
8039 __acquires(ctx->uring_lock)
edafccee
JA
8040{
8041 int ret;
8042
35fa71a0
JA
8043 /*
8044 * We're inside the ring mutex, if the ref is already dying, then
8045 * someone else killed the ctx or is already going through
8046 * io_uring_register().
8047 */
8048 if (percpu_ref_is_dying(&ctx->refs))
8049 return -ENXIO;
8050
071698e1 8051 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 8052 percpu_ref_kill(&ctx->refs);
b19062a5 8053
05f3fb3c
JA
8054 /*
8055 * Drop uring mutex before waiting for references to exit. If
8056 * another thread is currently inside io_uring_enter() it might
8057 * need to grab the uring_lock to make progress. If we hold it
8058 * here across the drain wait, then we can deadlock. It's safe
8059 * to drop the mutex here, since no new references will come in
8060 * after we've killed the percpu ref.
8061 */
8062 mutex_unlock(&ctx->uring_lock);
0f158b4c 8063 ret = wait_for_completion_interruptible(&ctx->ref_comp);
05f3fb3c 8064 mutex_lock(&ctx->uring_lock);
c150368b
JA
8065 if (ret) {
8066 percpu_ref_resurrect(&ctx->refs);
8067 ret = -EINTR;
8068 goto out;
8069 }
05f3fb3c 8070 }
edafccee
JA
8071
8072 switch (opcode) {
8073 case IORING_REGISTER_BUFFERS:
8074 ret = io_sqe_buffer_register(ctx, arg, nr_args);
8075 break;
8076 case IORING_UNREGISTER_BUFFERS:
8077 ret = -EINVAL;
8078 if (arg || nr_args)
8079 break;
8080 ret = io_sqe_buffer_unregister(ctx);
8081 break;
6b06314c
JA
8082 case IORING_REGISTER_FILES:
8083 ret = io_sqe_files_register(ctx, arg, nr_args);
8084 break;
8085 case IORING_UNREGISTER_FILES:
8086 ret = -EINVAL;
8087 if (arg || nr_args)
8088 break;
8089 ret = io_sqe_files_unregister(ctx);
8090 break;
c3a31e60
JA
8091 case IORING_REGISTER_FILES_UPDATE:
8092 ret = io_sqe_files_update(ctx, arg, nr_args);
8093 break;
9b402849 8094 case IORING_REGISTER_EVENTFD:
f2842ab5 8095 case IORING_REGISTER_EVENTFD_ASYNC:
9b402849
JA
8096 ret = -EINVAL;
8097 if (nr_args != 1)
8098 break;
8099 ret = io_eventfd_register(ctx, arg);
f2842ab5
JA
8100 if (ret)
8101 break;
8102 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
8103 ctx->eventfd_async = 1;
8104 else
8105 ctx->eventfd_async = 0;
9b402849
JA
8106 break;
8107 case IORING_UNREGISTER_EVENTFD:
8108 ret = -EINVAL;
8109 if (arg || nr_args)
8110 break;
8111 ret = io_eventfd_unregister(ctx);
8112 break;
66f4af93
JA
8113 case IORING_REGISTER_PROBE:
8114 ret = -EINVAL;
8115 if (!arg || nr_args > 256)
8116 break;
8117 ret = io_probe(ctx, arg, nr_args);
8118 break;
071698e1
JA
8119 case IORING_REGISTER_PERSONALITY:
8120 ret = -EINVAL;
8121 if (arg || nr_args)
8122 break;
8123 ret = io_register_personality(ctx);
8124 break;
8125 case IORING_UNREGISTER_PERSONALITY:
8126 ret = -EINVAL;
8127 if (arg)
8128 break;
8129 ret = io_unregister_personality(ctx, nr_args);
8130 break;
edafccee
JA
8131 default:
8132 ret = -EINVAL;
8133 break;
8134 }
8135
071698e1 8136 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 8137 /* bring the ctx back to life */
05f3fb3c 8138 percpu_ref_reinit(&ctx->refs);
c150368b 8139out:
0f158b4c 8140 reinit_completion(&ctx->ref_comp);
05f3fb3c 8141 }
edafccee
JA
8142 return ret;
8143}
8144
8145SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
8146 void __user *, arg, unsigned int, nr_args)
8147{
8148 struct io_ring_ctx *ctx;
8149 long ret = -EBADF;
8150 struct fd f;
8151
8152 f = fdget(fd);
8153 if (!f.file)
8154 return -EBADF;
8155
8156 ret = -EOPNOTSUPP;
8157 if (f.file->f_op != &io_uring_fops)
8158 goto out_fput;
8159
8160 ctx = f.file->private_data;
8161
8162 mutex_lock(&ctx->uring_lock);
8163 ret = __io_uring_register(ctx, opcode, arg, nr_args);
8164 mutex_unlock(&ctx->uring_lock);
c826bd7a
DD
8165 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
8166 ctx->cq_ev_fd != NULL, ret);
edafccee
JA
8167out_fput:
8168 fdput(f);
8169 return ret;
8170}
8171
2b188cc1
JA
8172static int __init io_uring_init(void)
8173{
d7f62e82
SM
8174#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
8175 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
8176 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
8177} while (0)
8178
8179#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
8180 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
8181 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
8182 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
8183 BUILD_BUG_SQE_ELEM(1, __u8, flags);
8184 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
8185 BUILD_BUG_SQE_ELEM(4, __s32, fd);
8186 BUILD_BUG_SQE_ELEM(8, __u64, off);
8187 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
8188 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7d67af2c 8189 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
d7f62e82
SM
8190 BUILD_BUG_SQE_ELEM(24, __u32, len);
8191 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
8192 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
8193 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
8194 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
8195 BUILD_BUG_SQE_ELEM(28, __u16, poll_events);
8196 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
8197 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
8198 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
8199 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
8200 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
8201 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
8202 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
8203 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7d67af2c 8204 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
d7f62e82
SM
8205 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
8206 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
8207 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7d67af2c 8208 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
d7f62e82 8209
d3656344 8210 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
84557871 8211 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
2b188cc1
JA
8212 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
8213 return 0;
8214};
8215__initcall(io_uring_init);