io_uring: support buffer selection for OP_READ and OP_RECV
[linux-2.6-block.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
47#include <linux/refcount.h>
48#include <linux/uio.h>
6b47ee6e 49#include <linux/bits.h>
2b188cc1
JA
50
51#include <linux/sched/signal.h>
52#include <linux/fs.h>
53#include <linux/file.h>
54#include <linux/fdtable.h>
55#include <linux/mm.h>
56#include <linux/mman.h>
57#include <linux/mmu_context.h>
58#include <linux/percpu.h>
59#include <linux/slab.h>
6c271ce2 60#include <linux/kthread.h>
2b188cc1 61#include <linux/blkdev.h>
edafccee 62#include <linux/bvec.h>
2b188cc1
JA
63#include <linux/net.h>
64#include <net/sock.h>
65#include <net/af_unix.h>
6b06314c 66#include <net/scm.h>
2b188cc1
JA
67#include <linux/anon_inodes.h>
68#include <linux/sched/mm.h>
69#include <linux/uaccess.h>
70#include <linux/nospec.h>
edafccee
JA
71#include <linux/sizes.h>
72#include <linux/hugetlb.h>
aa4c3967 73#include <linux/highmem.h>
15b71abe
JA
74#include <linux/namei.h>
75#include <linux/fsnotify.h>
4840e418 76#include <linux/fadvise.h>
3e4827b0 77#include <linux/eventpoll.h>
ff002b30 78#include <linux/fs_struct.h>
7d67af2c 79#include <linux/splice.h>
b41e9852 80#include <linux/task_work.h>
2b188cc1 81
c826bd7a
DD
82#define CREATE_TRACE_POINTS
83#include <trace/events/io_uring.h>
84
2b188cc1
JA
85#include <uapi/linux/io_uring.h>
86
87#include "internal.h"
561fb04a 88#include "io-wq.h"
2b188cc1 89
5277deaa 90#define IORING_MAX_ENTRIES 32768
33a107f0 91#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
65e19f54
JA
92
93/*
94 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
95 */
96#define IORING_FILE_TABLE_SHIFT 9
97#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
98#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
99#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
2b188cc1
JA
100
101struct io_uring {
102 u32 head ____cacheline_aligned_in_smp;
103 u32 tail ____cacheline_aligned_in_smp;
104};
105
1e84b97b 106/*
75b28aff
HV
107 * This data is shared with the application through the mmap at offsets
108 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
109 *
110 * The offsets to the member fields are published through struct
111 * io_sqring_offsets when calling io_uring_setup.
112 */
75b28aff 113struct io_rings {
1e84b97b
SB
114 /*
115 * Head and tail offsets into the ring; the offsets need to be
116 * masked to get valid indices.
117 *
75b28aff
HV
118 * The kernel controls head of the sq ring and the tail of the cq ring,
119 * and the application controls tail of the sq ring and the head of the
120 * cq ring.
1e84b97b 121 */
75b28aff 122 struct io_uring sq, cq;
1e84b97b 123 /*
75b28aff 124 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
125 * ring_entries - 1)
126 */
75b28aff
HV
127 u32 sq_ring_mask, cq_ring_mask;
128 /* Ring sizes (constant, power of 2) */
129 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
130 /*
131 * Number of invalid entries dropped by the kernel due to
132 * invalid index stored in array
133 *
134 * Written by the kernel, shouldn't be modified by the
135 * application (i.e. get number of "new events" by comparing to
136 * cached value).
137 *
138 * After a new SQ head value was read by the application this
139 * counter includes all submissions that were dropped reaching
140 * the new SQ head (and possibly more).
141 */
75b28aff 142 u32 sq_dropped;
1e84b97b
SB
143 /*
144 * Runtime flags
145 *
146 * Written by the kernel, shouldn't be modified by the
147 * application.
148 *
149 * The application needs a full memory barrier before checking
150 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
151 */
75b28aff 152 u32 sq_flags;
1e84b97b
SB
153 /*
154 * Number of completion events lost because the queue was full;
155 * this should be avoided by the application by making sure
0b4295b5 156 * there are not more requests pending than there is space in
1e84b97b
SB
157 * the completion queue.
158 *
159 * Written by the kernel, shouldn't be modified by the
160 * application (i.e. get number of "new events" by comparing to
161 * cached value).
162 *
163 * As completion events come in out of order this counter is not
164 * ordered with any other data.
165 */
75b28aff 166 u32 cq_overflow;
1e84b97b
SB
167 /*
168 * Ring buffer of completion events.
169 *
170 * The kernel writes completion events fresh every time they are
171 * produced, so the application is allowed to modify pending
172 * entries.
173 */
75b28aff 174 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
175};
176
edafccee
JA
177struct io_mapped_ubuf {
178 u64 ubuf;
179 size_t len;
180 struct bio_vec *bvec;
181 unsigned int nr_bvecs;
182};
183
65e19f54
JA
184struct fixed_file_table {
185 struct file **files;
31b51510
JA
186};
187
05f3fb3c
JA
188struct fixed_file_data {
189 struct fixed_file_table *table;
190 struct io_ring_ctx *ctx;
191
192 struct percpu_ref refs;
193 struct llist_head put_llist;
05f3fb3c
JA
194 struct work_struct ref_work;
195 struct completion done;
196};
197
5a2e745d
JA
198struct io_buffer {
199 struct list_head list;
200 __u64 addr;
201 __s32 len;
202 __u16 bid;
203};
204
2b188cc1
JA
205struct io_ring_ctx {
206 struct {
207 struct percpu_ref refs;
208 } ____cacheline_aligned_in_smp;
209
210 struct {
211 unsigned int flags;
e1d85334
RD
212 unsigned int compat: 1;
213 unsigned int account_mem: 1;
214 unsigned int cq_overflow_flushed: 1;
215 unsigned int drain_next: 1;
216 unsigned int eventfd_async: 1;
2b188cc1 217
75b28aff
HV
218 /*
219 * Ring buffer of indices into array of io_uring_sqe, which is
220 * mmapped by the application using the IORING_OFF_SQES offset.
221 *
222 * This indirection could e.g. be used to assign fixed
223 * io_uring_sqe entries to operations and only submit them to
224 * the queue when needed.
225 *
226 * The kernel modifies neither the indices array nor the entries
227 * array.
228 */
229 u32 *sq_array;
2b188cc1
JA
230 unsigned cached_sq_head;
231 unsigned sq_entries;
232 unsigned sq_mask;
6c271ce2 233 unsigned sq_thread_idle;
498ccd9e 234 unsigned cached_sq_dropped;
206aefde 235 atomic_t cached_cq_overflow;
ad3eb2c8 236 unsigned long sq_check_overflow;
de0617e4
JA
237
238 struct list_head defer_list;
5262f567 239 struct list_head timeout_list;
1d7bb1d5 240 struct list_head cq_overflow_list;
fcb323cc
JA
241
242 wait_queue_head_t inflight_wait;
ad3eb2c8 243 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
244 } ____cacheline_aligned_in_smp;
245
206aefde
JA
246 struct io_rings *rings;
247
2b188cc1 248 /* IO offload */
561fb04a 249 struct io_wq *io_wq;
6c271ce2 250 struct task_struct *sqo_thread; /* if using sq thread polling */
2b188cc1 251 struct mm_struct *sqo_mm;
6c271ce2 252 wait_queue_head_t sqo_wait;
75b28aff 253
6b06314c
JA
254 /*
255 * If used, fixed file set. Writers must ensure that ->refs is dead,
256 * readers must ensure that ->refs is alive as long as the file* is
257 * used. Only updated through io_uring_register(2).
258 */
05f3fb3c 259 struct fixed_file_data *file_data;
6b06314c 260 unsigned nr_user_files;
b14cca0c
PB
261 int ring_fd;
262 struct file *ring_file;
6b06314c 263
edafccee
JA
264 /* if used, fixed mapped user buffers */
265 unsigned nr_user_bufs;
266 struct io_mapped_ubuf *user_bufs;
267
2b188cc1
JA
268 struct user_struct *user;
269
0b8c0ec7 270 const struct cred *creds;
181e448d 271
206aefde
JA
272 /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */
273 struct completion *completions;
274
0ddf92e8
JA
275 /* if all else fails... */
276 struct io_kiocb *fallback_req;
277
206aefde
JA
278#if defined(CONFIG_UNIX)
279 struct socket *ring_sock;
280#endif
281
5a2e745d
JA
282 struct idr io_buffer_idr;
283
071698e1
JA
284 struct idr personality_idr;
285
206aefde
JA
286 struct {
287 unsigned cached_cq_tail;
288 unsigned cq_entries;
289 unsigned cq_mask;
290 atomic_t cq_timeouts;
ad3eb2c8 291 unsigned long cq_check_overflow;
206aefde
JA
292 struct wait_queue_head cq_wait;
293 struct fasync_struct *cq_fasync;
294 struct eventfd_ctx *cq_ev_fd;
295 } ____cacheline_aligned_in_smp;
2b188cc1
JA
296
297 struct {
298 struct mutex uring_lock;
299 wait_queue_head_t wait;
300 } ____cacheline_aligned_in_smp;
301
302 struct {
303 spinlock_t completion_lock;
e94f141b 304
def596e9
JA
305 /*
306 * ->poll_list is protected by the ctx->uring_lock for
307 * io_uring instances that don't use IORING_SETUP_SQPOLL.
308 * For SQPOLL, only the single threaded io_sq_thread() will
309 * manipulate the list, hence no extra locking is needed there.
310 */
311 struct list_head poll_list;
78076bb6
JA
312 struct hlist_head *cancel_hash;
313 unsigned cancel_hash_bits;
e94f141b 314 bool poll_multi_file;
31b51510 315
fcb323cc
JA
316 spinlock_t inflight_lock;
317 struct list_head inflight_list;
2b188cc1 318 } ____cacheline_aligned_in_smp;
2b188cc1
JA
319};
320
09bb8394
JA
321/*
322 * First field must be the file pointer in all the
323 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
324 */
221c5eb2
JA
325struct io_poll_iocb {
326 struct file *file;
0969e783
JA
327 union {
328 struct wait_queue_head *head;
329 u64 addr;
330 };
221c5eb2 331 __poll_t events;
8c838788 332 bool done;
221c5eb2 333 bool canceled;
392edb45 334 struct wait_queue_entry wait;
221c5eb2
JA
335};
336
b5dba59e
JA
337struct io_close {
338 struct file *file;
339 struct file *put_file;
340 int fd;
341};
342
ad8a48ac
JA
343struct io_timeout_data {
344 struct io_kiocb *req;
345 struct hrtimer timer;
346 struct timespec64 ts;
347 enum hrtimer_mode mode;
cc42e0ac 348 u32 seq_offset;
ad8a48ac
JA
349};
350
8ed8d3c3
JA
351struct io_accept {
352 struct file *file;
353 struct sockaddr __user *addr;
354 int __user *addr_len;
355 int flags;
356};
357
358struct io_sync {
359 struct file *file;
360 loff_t len;
361 loff_t off;
362 int flags;
d63d1b5e 363 int mode;
8ed8d3c3
JA
364};
365
fbf23849
JA
366struct io_cancel {
367 struct file *file;
368 u64 addr;
369};
370
b29472ee
JA
371struct io_timeout {
372 struct file *file;
373 u64 addr;
374 int flags;
26a61679 375 unsigned count;
b29472ee
JA
376};
377
9adbd45d
JA
378struct io_rw {
379 /* NOTE: kiocb has the file as the first member, so don't do it here */
380 struct kiocb kiocb;
381 u64 addr;
382 u64 len;
383};
384
3fbb51c1
JA
385struct io_connect {
386 struct file *file;
387 struct sockaddr __user *addr;
388 int addr_len;
389};
390
e47293fd
JA
391struct io_sr_msg {
392 struct file *file;
fddaface
JA
393 union {
394 struct user_msghdr __user *msg;
395 void __user *buf;
396 };
e47293fd 397 int msg_flags;
bcda7baa 398 int bgid;
fddaface 399 size_t len;
bcda7baa 400 struct io_buffer *kbuf;
e47293fd
JA
401};
402
15b71abe
JA
403struct io_open {
404 struct file *file;
405 int dfd;
eddc7ef5 406 union {
eddc7ef5
JA
407 unsigned mask;
408 };
15b71abe 409 struct filename *filename;
eddc7ef5 410 struct statx __user *buffer;
c12cedf2 411 struct open_how how;
15b71abe
JA
412};
413
05f3fb3c
JA
414struct io_files_update {
415 struct file *file;
416 u64 arg;
417 u32 nr_args;
418 u32 offset;
419};
420
4840e418
JA
421struct io_fadvise {
422 struct file *file;
423 u64 offset;
424 u32 len;
425 u32 advice;
426};
427
c1ca757b
JA
428struct io_madvise {
429 struct file *file;
430 u64 addr;
431 u32 len;
432 u32 advice;
433};
434
3e4827b0
JA
435struct io_epoll {
436 struct file *file;
437 int epfd;
438 int op;
439 int fd;
440 struct epoll_event event;
e47293fd
JA
441};
442
7d67af2c
PB
443struct io_splice {
444 struct file *file_out;
445 struct file *file_in;
446 loff_t off_out;
447 loff_t off_in;
448 u64 len;
449 unsigned int flags;
450};
451
ddf0322d
JA
452struct io_provide_buf {
453 struct file *file;
454 __u64 addr;
455 __s32 len;
456 __u32 bgid;
457 __u16 nbufs;
458 __u16 bid;
459};
460
f499a021
JA
461struct io_async_connect {
462 struct sockaddr_storage address;
463};
464
03b1230c
JA
465struct io_async_msghdr {
466 struct iovec fast_iov[UIO_FASTIOV];
467 struct iovec *iov;
468 struct sockaddr __user *uaddr;
469 struct msghdr msg;
b537916c 470 struct sockaddr_storage addr;
03b1230c
JA
471};
472
f67676d1
JA
473struct io_async_rw {
474 struct iovec fast_iov[UIO_FASTIOV];
475 struct iovec *iov;
476 ssize_t nr_segs;
477 ssize_t size;
478};
479
1a6b74fc 480struct io_async_ctx {
f67676d1
JA
481 union {
482 struct io_async_rw rw;
03b1230c 483 struct io_async_msghdr msg;
f499a021 484 struct io_async_connect connect;
2d28390a 485 struct io_timeout_data timeout;
f67676d1 486 };
1a6b74fc
JA
487};
488
6b47ee6e
PB
489enum {
490 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
491 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
492 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
493 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
494 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
bcda7baa 495 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
6b47ee6e
PB
496
497 REQ_F_LINK_NEXT_BIT,
498 REQ_F_FAIL_LINK_BIT,
499 REQ_F_INFLIGHT_BIT,
500 REQ_F_CUR_POS_BIT,
501 REQ_F_NOWAIT_BIT,
502 REQ_F_IOPOLL_COMPLETED_BIT,
503 REQ_F_LINK_TIMEOUT_BIT,
504 REQ_F_TIMEOUT_BIT,
505 REQ_F_ISREG_BIT,
506 REQ_F_MUST_PUNT_BIT,
507 REQ_F_TIMEOUT_NOSEQ_BIT,
508 REQ_F_COMP_LOCKED_BIT,
99bc4c38 509 REQ_F_NEED_CLEANUP_BIT,
2ca10259 510 REQ_F_OVERFLOW_BIT,
d7718a9d 511 REQ_F_POLLED_BIT,
bcda7baa 512 REQ_F_BUFFER_SELECTED_BIT,
6b47ee6e
PB
513};
514
515enum {
516 /* ctx owns file */
517 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
518 /* drain existing IO first */
519 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
520 /* linked sqes */
521 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
522 /* doesn't sever on completion < 0 */
523 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
524 /* IOSQE_ASYNC */
525 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
bcda7baa
JA
526 /* IOSQE_BUFFER_SELECT */
527 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
6b47ee6e
PB
528
529 /* already grabbed next link */
530 REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT),
531 /* fail rest of links */
532 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
533 /* on inflight list */
534 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
535 /* read/write uses file position */
536 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
537 /* must not punt to workers */
538 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
539 /* polled IO has completed */
540 REQ_F_IOPOLL_COMPLETED = BIT(REQ_F_IOPOLL_COMPLETED_BIT),
541 /* has linked timeout */
542 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
543 /* timeout request */
544 REQ_F_TIMEOUT = BIT(REQ_F_TIMEOUT_BIT),
545 /* regular file */
546 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
547 /* must be punted even for NONBLOCK */
548 REQ_F_MUST_PUNT = BIT(REQ_F_MUST_PUNT_BIT),
549 /* no timeout sequence */
550 REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
551 /* completion under lock */
552 REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
99bc4c38
PB
553 /* needs cleanup */
554 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
2ca10259
JA
555 /* in overflow list */
556 REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT),
d7718a9d
JA
557 /* already went through poll handler */
558 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
bcda7baa
JA
559 /* buffer already selected */
560 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
d7718a9d
JA
561};
562
563struct async_poll {
564 struct io_poll_iocb poll;
565 struct io_wq_work work;
6b47ee6e
PB
566};
567
09bb8394
JA
568/*
569 * NOTE! Each of the iocb union members has the file pointer
570 * as the first entry in their struct definition. So you can
571 * access the file pointer through any of the sub-structs,
572 * or directly as just 'ki_filp' in this struct.
573 */
2b188cc1 574struct io_kiocb {
221c5eb2 575 union {
09bb8394 576 struct file *file;
9adbd45d 577 struct io_rw rw;
221c5eb2 578 struct io_poll_iocb poll;
8ed8d3c3
JA
579 struct io_accept accept;
580 struct io_sync sync;
fbf23849 581 struct io_cancel cancel;
b29472ee 582 struct io_timeout timeout;
3fbb51c1 583 struct io_connect connect;
e47293fd 584 struct io_sr_msg sr_msg;
15b71abe 585 struct io_open open;
b5dba59e 586 struct io_close close;
05f3fb3c 587 struct io_files_update files_update;
4840e418 588 struct io_fadvise fadvise;
c1ca757b 589 struct io_madvise madvise;
3e4827b0 590 struct io_epoll epoll;
7d67af2c 591 struct io_splice splice;
ddf0322d 592 struct io_provide_buf pbuf;
221c5eb2 593 };
2b188cc1 594
1a6b74fc 595 struct io_async_ctx *io;
cf6fd4bd 596 bool needs_fixed_file;
d625c6ee 597 u8 opcode;
2b188cc1
JA
598
599 struct io_ring_ctx *ctx;
d7718a9d 600 struct list_head list;
2b188cc1 601 unsigned int flags;
c16361c1 602 refcount_t refs;
d7718a9d 603 struct task_struct *task;
2b188cc1 604 u64 user_data;
9e645e11 605 u32 result;
de0617e4 606 u32 sequence;
2b188cc1 607
d7718a9d
JA
608 struct list_head link_list;
609
fcb323cc
JA
610 struct list_head inflight_entry;
611
b41e9852
JA
612 union {
613 /*
614 * Only commands that never go async can use the below fields,
d7718a9d
JA
615 * obviously. Right now only IORING_OP_POLL_ADD uses them, and
616 * async armed poll handlers for regular commands. The latter
617 * restore the work, if needed.
b41e9852
JA
618 */
619 struct {
b41e9852 620 struct callback_head task_work;
d7718a9d
JA
621 struct hlist_node hash_node;
622 struct async_poll *apoll;
bcda7baa 623 int cflags;
b41e9852
JA
624 };
625 struct io_wq_work work;
626 };
2b188cc1
JA
627};
628
629#define IO_PLUG_THRESHOLD 2
def596e9 630#define IO_IOPOLL_BATCH 8
2b188cc1 631
9a56a232
JA
632struct io_submit_state {
633 struct blk_plug plug;
634
2579f913
JA
635 /*
636 * io_kiocb alloc cache
637 */
638 void *reqs[IO_IOPOLL_BATCH];
6c8a3134 639 unsigned int free_reqs;
2579f913 640
9a56a232
JA
641 /*
642 * File reference cache
643 */
644 struct file *file;
645 unsigned int fd;
646 unsigned int has_refs;
647 unsigned int used_refs;
648 unsigned int ios_left;
649};
650
d3656344
JA
651struct io_op_def {
652 /* needs req->io allocated for deferral/async */
653 unsigned async_ctx : 1;
654 /* needs current->mm setup, does mm access */
655 unsigned needs_mm : 1;
656 /* needs req->file assigned */
657 unsigned needs_file : 1;
658 /* needs req->file assigned IFF fd is >= 0 */
659 unsigned fd_non_neg : 1;
660 /* hash wq insertion if file is a regular file */
661 unsigned hash_reg_file : 1;
662 /* unbound wq insertion if file is a non-regular file */
663 unsigned unbound_nonreg_file : 1;
66f4af93
JA
664 /* opcode is not supported by this kernel */
665 unsigned not_supported : 1;
f86cd20c
JA
666 /* needs file table */
667 unsigned file_table : 1;
ff002b30
JA
668 /* needs ->fs */
669 unsigned needs_fs : 1;
8a72758c
JA
670 /* set if opcode supports polled "wait" */
671 unsigned pollin : 1;
672 unsigned pollout : 1;
bcda7baa
JA
673 /* op supports buffer selection */
674 unsigned buffer_select : 1;
d3656344
JA
675};
676
677static const struct io_op_def io_op_defs[] = {
0463b6c5
PB
678 [IORING_OP_NOP] = {},
679 [IORING_OP_READV] = {
d3656344
JA
680 .async_ctx = 1,
681 .needs_mm = 1,
682 .needs_file = 1,
683 .unbound_nonreg_file = 1,
8a72758c 684 .pollin = 1,
d3656344 685 },
0463b6c5 686 [IORING_OP_WRITEV] = {
d3656344
JA
687 .async_ctx = 1,
688 .needs_mm = 1,
689 .needs_file = 1,
690 .hash_reg_file = 1,
691 .unbound_nonreg_file = 1,
8a72758c 692 .pollout = 1,
d3656344 693 },
0463b6c5 694 [IORING_OP_FSYNC] = {
d3656344
JA
695 .needs_file = 1,
696 },
0463b6c5 697 [IORING_OP_READ_FIXED] = {
d3656344
JA
698 .needs_file = 1,
699 .unbound_nonreg_file = 1,
8a72758c 700 .pollin = 1,
d3656344 701 },
0463b6c5 702 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
703 .needs_file = 1,
704 .hash_reg_file = 1,
705 .unbound_nonreg_file = 1,
8a72758c 706 .pollout = 1,
d3656344 707 },
0463b6c5 708 [IORING_OP_POLL_ADD] = {
d3656344
JA
709 .needs_file = 1,
710 .unbound_nonreg_file = 1,
711 },
0463b6c5
PB
712 [IORING_OP_POLL_REMOVE] = {},
713 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344
JA
714 .needs_file = 1,
715 },
0463b6c5 716 [IORING_OP_SENDMSG] = {
d3656344
JA
717 .async_ctx = 1,
718 .needs_mm = 1,
719 .needs_file = 1,
720 .unbound_nonreg_file = 1,
ff002b30 721 .needs_fs = 1,
8a72758c 722 .pollout = 1,
d3656344 723 },
0463b6c5 724 [IORING_OP_RECVMSG] = {
d3656344
JA
725 .async_ctx = 1,
726 .needs_mm = 1,
727 .needs_file = 1,
728 .unbound_nonreg_file = 1,
ff002b30 729 .needs_fs = 1,
8a72758c 730 .pollin = 1,
d3656344 731 },
0463b6c5 732 [IORING_OP_TIMEOUT] = {
d3656344
JA
733 .async_ctx = 1,
734 .needs_mm = 1,
735 },
0463b6c5
PB
736 [IORING_OP_TIMEOUT_REMOVE] = {},
737 [IORING_OP_ACCEPT] = {
d3656344
JA
738 .needs_mm = 1,
739 .needs_file = 1,
740 .unbound_nonreg_file = 1,
f86cd20c 741 .file_table = 1,
8a72758c 742 .pollin = 1,
d3656344 743 },
0463b6c5
PB
744 [IORING_OP_ASYNC_CANCEL] = {},
745 [IORING_OP_LINK_TIMEOUT] = {
d3656344
JA
746 .async_ctx = 1,
747 .needs_mm = 1,
748 },
0463b6c5 749 [IORING_OP_CONNECT] = {
d3656344
JA
750 .async_ctx = 1,
751 .needs_mm = 1,
752 .needs_file = 1,
753 .unbound_nonreg_file = 1,
8a72758c 754 .pollout = 1,
d3656344 755 },
0463b6c5 756 [IORING_OP_FALLOCATE] = {
d3656344
JA
757 .needs_file = 1,
758 },
0463b6c5 759 [IORING_OP_OPENAT] = {
d3656344
JA
760 .needs_file = 1,
761 .fd_non_neg = 1,
f86cd20c 762 .file_table = 1,
ff002b30 763 .needs_fs = 1,
d3656344 764 },
0463b6c5 765 [IORING_OP_CLOSE] = {
d3656344 766 .needs_file = 1,
f86cd20c 767 .file_table = 1,
d3656344 768 },
0463b6c5 769 [IORING_OP_FILES_UPDATE] = {
d3656344 770 .needs_mm = 1,
f86cd20c 771 .file_table = 1,
d3656344 772 },
0463b6c5 773 [IORING_OP_STATX] = {
d3656344
JA
774 .needs_mm = 1,
775 .needs_file = 1,
776 .fd_non_neg = 1,
ff002b30 777 .needs_fs = 1,
d3656344 778 },
0463b6c5 779 [IORING_OP_READ] = {
3a6820f2
JA
780 .needs_mm = 1,
781 .needs_file = 1,
782 .unbound_nonreg_file = 1,
8a72758c 783 .pollin = 1,
bcda7baa 784 .buffer_select = 1,
3a6820f2 785 },
0463b6c5 786 [IORING_OP_WRITE] = {
3a6820f2
JA
787 .needs_mm = 1,
788 .needs_file = 1,
789 .unbound_nonreg_file = 1,
8a72758c 790 .pollout = 1,
3a6820f2 791 },
0463b6c5 792 [IORING_OP_FADVISE] = {
4840e418
JA
793 .needs_file = 1,
794 },
0463b6c5 795 [IORING_OP_MADVISE] = {
c1ca757b
JA
796 .needs_mm = 1,
797 },
0463b6c5 798 [IORING_OP_SEND] = {
fddaface
JA
799 .needs_mm = 1,
800 .needs_file = 1,
801 .unbound_nonreg_file = 1,
8a72758c 802 .pollout = 1,
fddaface 803 },
0463b6c5 804 [IORING_OP_RECV] = {
fddaface
JA
805 .needs_mm = 1,
806 .needs_file = 1,
807 .unbound_nonreg_file = 1,
8a72758c 808 .pollin = 1,
bcda7baa 809 .buffer_select = 1,
fddaface 810 },
0463b6c5 811 [IORING_OP_OPENAT2] = {
cebdb986
JA
812 .needs_file = 1,
813 .fd_non_neg = 1,
f86cd20c 814 .file_table = 1,
ff002b30 815 .needs_fs = 1,
cebdb986 816 },
3e4827b0
JA
817 [IORING_OP_EPOLL_CTL] = {
818 .unbound_nonreg_file = 1,
819 .file_table = 1,
820 },
7d67af2c
PB
821 [IORING_OP_SPLICE] = {
822 .needs_file = 1,
823 .hash_reg_file = 1,
824 .unbound_nonreg_file = 1,
ddf0322d
JA
825 },
826 [IORING_OP_PROVIDE_BUFFERS] = {},
d3656344
JA
827};
828
561fb04a 829static void io_wq_submit_work(struct io_wq_work **workptr);
78e19bbe 830static void io_cqring_fill_event(struct io_kiocb *req, long res);
ec9c02ad 831static void io_put_req(struct io_kiocb *req);
978db57e 832static void __io_double_put_req(struct io_kiocb *req);
94ae5e77
JA
833static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
834static void io_queue_linked_timeout(struct io_kiocb *req);
05f3fb3c
JA
835static int __io_sqe_files_update(struct io_ring_ctx *ctx,
836 struct io_uring_files_update *ip,
837 unsigned nr_args);
f86cd20c 838static int io_grab_files(struct io_kiocb *req);
2faf852d 839static void io_ring_file_ref_flush(struct fixed_file_data *data);
99bc4c38 840static void io_cleanup_req(struct io_kiocb *req);
b41e9852
JA
841static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
842 int fd, struct file **out_file, bool fixed);
843static void __io_queue_sqe(struct io_kiocb *req,
844 const struct io_uring_sqe *sqe);
de0617e4 845
2b188cc1
JA
846static struct kmem_cache *req_cachep;
847
848static const struct file_operations io_uring_fops;
849
850struct sock *io_uring_get_socket(struct file *file)
851{
852#if defined(CONFIG_UNIX)
853 if (file->f_op == &io_uring_fops) {
854 struct io_ring_ctx *ctx = file->private_data;
855
856 return ctx->ring_sock->sk;
857 }
858#endif
859 return NULL;
860}
861EXPORT_SYMBOL(io_uring_get_socket);
862
863static void io_ring_ctx_ref_free(struct percpu_ref *ref)
864{
865 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
866
206aefde 867 complete(&ctx->completions[0]);
2b188cc1
JA
868}
869
870static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
871{
872 struct io_ring_ctx *ctx;
78076bb6 873 int hash_bits;
2b188cc1
JA
874
875 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
876 if (!ctx)
877 return NULL;
878
0ddf92e8
JA
879 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
880 if (!ctx->fallback_req)
881 goto err;
882
206aefde
JA
883 ctx->completions = kmalloc(2 * sizeof(struct completion), GFP_KERNEL);
884 if (!ctx->completions)
885 goto err;
886
78076bb6
JA
887 /*
888 * Use 5 bits less than the max cq entries, that should give us around
889 * 32 entries per hash list if totally full and uniformly spread.
890 */
891 hash_bits = ilog2(p->cq_entries);
892 hash_bits -= 5;
893 if (hash_bits <= 0)
894 hash_bits = 1;
895 ctx->cancel_hash_bits = hash_bits;
896 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
897 GFP_KERNEL);
898 if (!ctx->cancel_hash)
899 goto err;
900 __hash_init(ctx->cancel_hash, 1U << hash_bits);
901
21482896 902 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
903 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
904 goto err;
2b188cc1
JA
905
906 ctx->flags = p->flags;
907 init_waitqueue_head(&ctx->cq_wait);
1d7bb1d5 908 INIT_LIST_HEAD(&ctx->cq_overflow_list);
206aefde
JA
909 init_completion(&ctx->completions[0]);
910 init_completion(&ctx->completions[1]);
5a2e745d 911 idr_init(&ctx->io_buffer_idr);
071698e1 912 idr_init(&ctx->personality_idr);
2b188cc1
JA
913 mutex_init(&ctx->uring_lock);
914 init_waitqueue_head(&ctx->wait);
915 spin_lock_init(&ctx->completion_lock);
def596e9 916 INIT_LIST_HEAD(&ctx->poll_list);
de0617e4 917 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 918 INIT_LIST_HEAD(&ctx->timeout_list);
fcb323cc
JA
919 init_waitqueue_head(&ctx->inflight_wait);
920 spin_lock_init(&ctx->inflight_lock);
921 INIT_LIST_HEAD(&ctx->inflight_list);
2b188cc1 922 return ctx;
206aefde 923err:
0ddf92e8
JA
924 if (ctx->fallback_req)
925 kmem_cache_free(req_cachep, ctx->fallback_req);
206aefde 926 kfree(ctx->completions);
78076bb6 927 kfree(ctx->cancel_hash);
206aefde
JA
928 kfree(ctx);
929 return NULL;
2b188cc1
JA
930}
931
9d858b21 932static inline bool __req_need_defer(struct io_kiocb *req)
7adf4eaf 933{
a197f664
JL
934 struct io_ring_ctx *ctx = req->ctx;
935
498ccd9e
JA
936 return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
937 + atomic_read(&ctx->cached_cq_overflow);
7adf4eaf
JA
938}
939
9d858b21 940static inline bool req_need_defer(struct io_kiocb *req)
de0617e4 941{
87987898 942 if (unlikely(req->flags & REQ_F_IO_DRAIN))
9d858b21 943 return __req_need_defer(req);
de0617e4 944
9d858b21 945 return false;
de0617e4
JA
946}
947
7adf4eaf 948static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
de0617e4
JA
949{
950 struct io_kiocb *req;
951
7adf4eaf 952 req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
9d858b21 953 if (req && !req_need_defer(req)) {
de0617e4
JA
954 list_del_init(&req->list);
955 return req;
956 }
957
958 return NULL;
959}
960
5262f567
JA
961static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
962{
7adf4eaf
JA
963 struct io_kiocb *req;
964
965 req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
93bd25bb
JA
966 if (req) {
967 if (req->flags & REQ_F_TIMEOUT_NOSEQ)
968 return NULL;
fb4b3d3f 969 if (!__req_need_defer(req)) {
93bd25bb
JA
970 list_del_init(&req->list);
971 return req;
972 }
7adf4eaf
JA
973 }
974
975 return NULL;
5262f567
JA
976}
977
de0617e4 978static void __io_commit_cqring(struct io_ring_ctx *ctx)
2b188cc1 979{
75b28aff 980 struct io_rings *rings = ctx->rings;
2b188cc1 981
07910158
PB
982 /* order cqe stores with ring update */
983 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
2b188cc1 984
07910158
PB
985 if (wq_has_sleeper(&ctx->cq_wait)) {
986 wake_up_interruptible(&ctx->cq_wait);
987 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
2b188cc1
JA
988 }
989}
990
cccf0ee8
JA
991static inline void io_req_work_grab_env(struct io_kiocb *req,
992 const struct io_op_def *def)
993{
994 if (!req->work.mm && def->needs_mm) {
995 mmgrab(current->mm);
996 req->work.mm = current->mm;
2b188cc1 997 }
cccf0ee8
JA
998 if (!req->work.creds)
999 req->work.creds = get_current_cred();
ff002b30
JA
1000 if (!req->work.fs && def->needs_fs) {
1001 spin_lock(&current->fs->lock);
1002 if (!current->fs->in_exec) {
1003 req->work.fs = current->fs;
1004 req->work.fs->users++;
1005 } else {
1006 req->work.flags |= IO_WQ_WORK_CANCEL;
1007 }
1008 spin_unlock(&current->fs->lock);
1009 }
6ab23144
JA
1010 if (!req->work.task_pid)
1011 req->work.task_pid = task_pid_vnr(current);
2b188cc1
JA
1012}
1013
cccf0ee8 1014static inline void io_req_work_drop_env(struct io_kiocb *req)
18d9be1a 1015{
cccf0ee8
JA
1016 if (req->work.mm) {
1017 mmdrop(req->work.mm);
1018 req->work.mm = NULL;
1019 }
1020 if (req->work.creds) {
1021 put_cred(req->work.creds);
1022 req->work.creds = NULL;
1023 }
ff002b30
JA
1024 if (req->work.fs) {
1025 struct fs_struct *fs = req->work.fs;
1026
1027 spin_lock(&req->work.fs->lock);
1028 if (--fs->users)
1029 fs = NULL;
1030 spin_unlock(&req->work.fs->lock);
1031 if (fs)
1032 free_fs_struct(fs);
1033 }
561fb04a
JA
1034}
1035
94ae5e77
JA
1036static inline bool io_prep_async_work(struct io_kiocb *req,
1037 struct io_kiocb **link)
18d9be1a 1038{
d3656344 1039 const struct io_op_def *def = &io_op_defs[req->opcode];
561fb04a 1040 bool do_hashed = false;
54a91f3b 1041
d3656344
JA
1042 if (req->flags & REQ_F_ISREG) {
1043 if (def->hash_reg_file)
3529d8c2 1044 do_hashed = true;
d3656344
JA
1045 } else {
1046 if (def->unbound_nonreg_file)
3529d8c2 1047 req->work.flags |= IO_WQ_WORK_UNBOUND;
54a91f3b 1048 }
cccf0ee8
JA
1049
1050 io_req_work_grab_env(req, def);
54a91f3b 1051
94ae5e77 1052 *link = io_prep_linked_timeout(req);
561fb04a
JA
1053 return do_hashed;
1054}
1055
a197f664 1056static inline void io_queue_async_work(struct io_kiocb *req)
561fb04a 1057{
a197f664 1058 struct io_ring_ctx *ctx = req->ctx;
94ae5e77
JA
1059 struct io_kiocb *link;
1060 bool do_hashed;
1061
1062 do_hashed = io_prep_async_work(req, &link);
561fb04a
JA
1063
1064 trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
1065 req->flags);
1066 if (!do_hashed) {
1067 io_wq_enqueue(ctx->io_wq, &req->work);
1068 } else {
1069 io_wq_enqueue_hashed(ctx->io_wq, &req->work,
1070 file_inode(req->file));
1071 }
94ae5e77
JA
1072
1073 if (link)
1074 io_queue_linked_timeout(link);
18d9be1a
JA
1075}
1076
5262f567
JA
1077static void io_kill_timeout(struct io_kiocb *req)
1078{
1079 int ret;
1080
2d28390a 1081 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
5262f567
JA
1082 if (ret != -1) {
1083 atomic_inc(&req->ctx->cq_timeouts);
842f9612 1084 list_del_init(&req->list);
78e19bbe 1085 io_cqring_fill_event(req, 0);
ec9c02ad 1086 io_put_req(req);
5262f567
JA
1087 }
1088}
1089
1090static void io_kill_timeouts(struct io_ring_ctx *ctx)
1091{
1092 struct io_kiocb *req, *tmp;
1093
1094 spin_lock_irq(&ctx->completion_lock);
1095 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
1096 io_kill_timeout(req);
1097 spin_unlock_irq(&ctx->completion_lock);
1098}
1099
de0617e4
JA
1100static void io_commit_cqring(struct io_ring_ctx *ctx)
1101{
1102 struct io_kiocb *req;
1103
5262f567
JA
1104 while ((req = io_get_timeout_req(ctx)) != NULL)
1105 io_kill_timeout(req);
1106
de0617e4
JA
1107 __io_commit_cqring(ctx);
1108
87987898 1109 while ((req = io_get_deferred_req(ctx)) != NULL)
a197f664 1110 io_queue_async_work(req);
de0617e4
JA
1111}
1112
2b188cc1
JA
1113static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1114{
75b28aff 1115 struct io_rings *rings = ctx->rings;
2b188cc1
JA
1116 unsigned tail;
1117
1118 tail = ctx->cached_cq_tail;
115e12e5
SB
1119 /*
1120 * writes to the cq entry need to come after reading head; the
1121 * control dependency is enough as we're using WRITE_ONCE to
1122 * fill the cq entry
1123 */
75b28aff 1124 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
2b188cc1
JA
1125 return NULL;
1126
1127 ctx->cached_cq_tail++;
75b28aff 1128 return &rings->cqes[tail & ctx->cq_mask];
2b188cc1
JA
1129}
1130
f2842ab5
JA
1131static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1132{
f0b493e6
JA
1133 if (!ctx->cq_ev_fd)
1134 return false;
f2842ab5
JA
1135 if (!ctx->eventfd_async)
1136 return true;
b41e9852 1137 return io_wq_current_is_worker();
f2842ab5
JA
1138}
1139
b41e9852 1140static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1d7bb1d5
JA
1141{
1142 if (waitqueue_active(&ctx->wait))
1143 wake_up(&ctx->wait);
1144 if (waitqueue_active(&ctx->sqo_wait))
1145 wake_up(&ctx->sqo_wait);
b41e9852 1146 if (io_should_trigger_evfd(ctx))
1d7bb1d5
JA
1147 eventfd_signal(ctx->cq_ev_fd, 1);
1148}
1149
c4a2ed72
JA
1150/* Returns true if there are no backlogged entries after the flush */
1151static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1d7bb1d5
JA
1152{
1153 struct io_rings *rings = ctx->rings;
1154 struct io_uring_cqe *cqe;
1155 struct io_kiocb *req;
1156 unsigned long flags;
1157 LIST_HEAD(list);
1158
1159 if (!force) {
1160 if (list_empty_careful(&ctx->cq_overflow_list))
c4a2ed72 1161 return true;
1d7bb1d5
JA
1162 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1163 rings->cq_ring_entries))
c4a2ed72 1164 return false;
1d7bb1d5
JA
1165 }
1166
1167 spin_lock_irqsave(&ctx->completion_lock, flags);
1168
1169 /* if force is set, the ring is going away. always drop after that */
1170 if (force)
69b3e546 1171 ctx->cq_overflow_flushed = 1;
1d7bb1d5 1172
c4a2ed72 1173 cqe = NULL;
1d7bb1d5
JA
1174 while (!list_empty(&ctx->cq_overflow_list)) {
1175 cqe = io_get_cqring(ctx);
1176 if (!cqe && !force)
1177 break;
1178
1179 req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
1180 list);
1181 list_move(&req->list, &list);
2ca10259 1182 req->flags &= ~REQ_F_OVERFLOW;
1d7bb1d5
JA
1183 if (cqe) {
1184 WRITE_ONCE(cqe->user_data, req->user_data);
1185 WRITE_ONCE(cqe->res, req->result);
bcda7baa 1186 WRITE_ONCE(cqe->flags, req->cflags);
1d7bb1d5
JA
1187 } else {
1188 WRITE_ONCE(ctx->rings->cq_overflow,
1189 atomic_inc_return(&ctx->cached_cq_overflow));
1190 }
1191 }
1192
1193 io_commit_cqring(ctx);
ad3eb2c8
JA
1194 if (cqe) {
1195 clear_bit(0, &ctx->sq_check_overflow);
1196 clear_bit(0, &ctx->cq_check_overflow);
1197 }
1d7bb1d5
JA
1198 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1199 io_cqring_ev_posted(ctx);
1200
1201 while (!list_empty(&list)) {
1202 req = list_first_entry(&list, struct io_kiocb, list);
1203 list_del(&req->list);
ec9c02ad 1204 io_put_req(req);
1d7bb1d5 1205 }
c4a2ed72
JA
1206
1207 return cqe != NULL;
1d7bb1d5
JA
1208}
1209
bcda7baa 1210static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1211{
78e19bbe 1212 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1213 struct io_uring_cqe *cqe;
1214
78e19bbe 1215 trace_io_uring_complete(ctx, req->user_data, res);
51c3ff62 1216
2b188cc1
JA
1217 /*
1218 * If we can't get a cq entry, userspace overflowed the
1219 * submission (by quite a lot). Increment the overflow count in
1220 * the ring.
1221 */
1222 cqe = io_get_cqring(ctx);
1d7bb1d5 1223 if (likely(cqe)) {
78e19bbe 1224 WRITE_ONCE(cqe->user_data, req->user_data);
2b188cc1 1225 WRITE_ONCE(cqe->res, res);
bcda7baa 1226 WRITE_ONCE(cqe->flags, cflags);
1d7bb1d5 1227 } else if (ctx->cq_overflow_flushed) {
498ccd9e
JA
1228 WRITE_ONCE(ctx->rings->cq_overflow,
1229 atomic_inc_return(&ctx->cached_cq_overflow));
1d7bb1d5 1230 } else {
ad3eb2c8
JA
1231 if (list_empty(&ctx->cq_overflow_list)) {
1232 set_bit(0, &ctx->sq_check_overflow);
1233 set_bit(0, &ctx->cq_check_overflow);
1234 }
2ca10259 1235 req->flags |= REQ_F_OVERFLOW;
1d7bb1d5
JA
1236 refcount_inc(&req->refs);
1237 req->result = res;
bcda7baa 1238 req->cflags = cflags;
1d7bb1d5 1239 list_add_tail(&req->list, &ctx->cq_overflow_list);
2b188cc1
JA
1240 }
1241}
1242
bcda7baa
JA
1243static void io_cqring_fill_event(struct io_kiocb *req, long res)
1244{
1245 __io_cqring_fill_event(req, res, 0);
1246}
1247
1248static void __io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1249{
78e19bbe 1250 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1251 unsigned long flags;
1252
1253 spin_lock_irqsave(&ctx->completion_lock, flags);
bcda7baa 1254 __io_cqring_fill_event(req, res, cflags);
2b188cc1
JA
1255 io_commit_cqring(ctx);
1256 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1257
8c838788 1258 io_cqring_ev_posted(ctx);
2b188cc1
JA
1259}
1260
bcda7baa
JA
1261static void io_cqring_add_event(struct io_kiocb *req, long res)
1262{
1263 __io_cqring_add_event(req, res, 0);
1264}
1265
0ddf92e8
JA
1266static inline bool io_is_fallback_req(struct io_kiocb *req)
1267{
1268 return req == (struct io_kiocb *)
1269 ((unsigned long) req->ctx->fallback_req & ~1UL);
1270}
1271
1272static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1273{
1274 struct io_kiocb *req;
1275
1276 req = ctx->fallback_req;
1277 if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req))
1278 return req;
1279
1280 return NULL;
1281}
1282
2579f913
JA
1283static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
1284 struct io_submit_state *state)
2b188cc1 1285{
fd6fab2c 1286 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2b188cc1
JA
1287 struct io_kiocb *req;
1288
2579f913 1289 if (!state) {
fd6fab2c 1290 req = kmem_cache_alloc(req_cachep, gfp);
2579f913 1291 if (unlikely(!req))
0ddf92e8 1292 goto fallback;
2579f913
JA
1293 } else if (!state->free_reqs) {
1294 size_t sz;
1295 int ret;
1296
1297 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
fd6fab2c
JA
1298 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1299
1300 /*
1301 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1302 * retry single alloc to be on the safe side.
1303 */
1304 if (unlikely(ret <= 0)) {
1305 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1306 if (!state->reqs[0])
0ddf92e8 1307 goto fallback;
fd6fab2c
JA
1308 ret = 1;
1309 }
2579f913 1310 state->free_reqs = ret - 1;
6c8a3134 1311 req = state->reqs[ret - 1];
2579f913 1312 } else {
2579f913 1313 state->free_reqs--;
6c8a3134 1314 req = state->reqs[state->free_reqs];
2b188cc1
JA
1315 }
1316
0ddf92e8 1317got_it:
1a6b74fc 1318 req->io = NULL;
60c112b0 1319 req->file = NULL;
2579f913
JA
1320 req->ctx = ctx;
1321 req->flags = 0;
e65ef56d
JA
1322 /* one is dropped after submission, the other at completion */
1323 refcount_set(&req->refs, 2);
9e645e11 1324 req->result = 0;
561fb04a 1325 INIT_IO_WORK(&req->work, io_wq_submit_work);
2579f913 1326 return req;
0ddf92e8
JA
1327fallback:
1328 req = io_get_fallback_req(ctx);
1329 if (req)
1330 goto got_it;
6805b32e 1331 percpu_ref_put(&ctx->refs);
2b188cc1
JA
1332 return NULL;
1333}
1334
8da11c19
PB
1335static inline void io_put_file(struct io_kiocb *req, struct file *file,
1336 bool fixed)
1337{
1338 if (fixed)
1339 percpu_ref_put(&req->ctx->file_data->refs);
1340 else
1341 fput(file);
1342}
1343
2b85edfc 1344static void __io_req_do_free(struct io_kiocb *req)
def596e9 1345{
2b85edfc
PB
1346 if (likely(!io_is_fallback_req(req)))
1347 kmem_cache_free(req_cachep, req);
1348 else
1349 clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
1350}
1351
c6ca97b3 1352static void __io_req_aux_free(struct io_kiocb *req)
2b188cc1 1353{
929a3af9
PB
1354 if (req->flags & REQ_F_NEED_CLEANUP)
1355 io_cleanup_req(req);
1356
96fd84d8 1357 kfree(req->io);
8da11c19
PB
1358 if (req->file)
1359 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
cccf0ee8
JA
1360
1361 io_req_work_drop_env(req);
def596e9
JA
1362}
1363
9e645e11 1364static void __io_free_req(struct io_kiocb *req)
2b188cc1 1365{
c6ca97b3 1366 __io_req_aux_free(req);
fcb323cc 1367
fcb323cc 1368 if (req->flags & REQ_F_INFLIGHT) {
c6ca97b3 1369 struct io_ring_ctx *ctx = req->ctx;
fcb323cc
JA
1370 unsigned long flags;
1371
1372 spin_lock_irqsave(&ctx->inflight_lock, flags);
1373 list_del(&req->inflight_entry);
1374 if (waitqueue_active(&ctx->inflight_wait))
1375 wake_up(&ctx->inflight_wait);
1376 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1377 }
2b85edfc
PB
1378
1379 percpu_ref_put(&req->ctx->refs);
1380 __io_req_do_free(req);
e65ef56d
JA
1381}
1382
c6ca97b3
JA
1383struct req_batch {
1384 void *reqs[IO_IOPOLL_BATCH];
1385 int to_free;
1386 int need_iter;
1387};
1388
1389static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
1390{
10fef4be
JA
1391 int fixed_refs = rb->to_free;
1392
c6ca97b3
JA
1393 if (!rb->to_free)
1394 return;
1395 if (rb->need_iter) {
1396 int i, inflight = 0;
1397 unsigned long flags;
1398
10fef4be 1399 fixed_refs = 0;
c6ca97b3
JA
1400 for (i = 0; i < rb->to_free; i++) {
1401 struct io_kiocb *req = rb->reqs[i];
1402
10fef4be 1403 if (req->flags & REQ_F_FIXED_FILE) {
c6ca97b3 1404 req->file = NULL;
10fef4be
JA
1405 fixed_refs++;
1406 }
c6ca97b3
JA
1407 if (req->flags & REQ_F_INFLIGHT)
1408 inflight++;
c6ca97b3
JA
1409 __io_req_aux_free(req);
1410 }
1411 if (!inflight)
1412 goto do_free;
1413
1414 spin_lock_irqsave(&ctx->inflight_lock, flags);
1415 for (i = 0; i < rb->to_free; i++) {
1416 struct io_kiocb *req = rb->reqs[i];
1417
10fef4be 1418 if (req->flags & REQ_F_INFLIGHT) {
c6ca97b3
JA
1419 list_del(&req->inflight_entry);
1420 if (!--inflight)
1421 break;
1422 }
1423 }
1424 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1425
1426 if (waitqueue_active(&ctx->inflight_wait))
1427 wake_up(&ctx->inflight_wait);
1428 }
1429do_free:
1430 kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
10fef4be
JA
1431 if (fixed_refs)
1432 percpu_ref_put_many(&ctx->file_data->refs, fixed_refs);
c6ca97b3 1433 percpu_ref_put_many(&ctx->refs, rb->to_free);
c6ca97b3 1434 rb->to_free = rb->need_iter = 0;
e65ef56d
JA
1435}
1436
a197f664 1437static bool io_link_cancel_timeout(struct io_kiocb *req)
2665abfd 1438{
a197f664 1439 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
1440 int ret;
1441
2d28390a 1442 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
2665abfd 1443 if (ret != -1) {
78e19bbe 1444 io_cqring_fill_event(req, -ECANCELED);
2665abfd
JA
1445 io_commit_cqring(ctx);
1446 req->flags &= ~REQ_F_LINK;
ec9c02ad 1447 io_put_req(req);
2665abfd
JA
1448 return true;
1449 }
1450
1451 return false;
e65ef56d
JA
1452}
1453
ba816ad6 1454static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
9e645e11 1455{
2665abfd 1456 struct io_ring_ctx *ctx = req->ctx;
2665abfd 1457 bool wake_ev = false;
9e645e11 1458
4d7dd462
JA
1459 /* Already got next link */
1460 if (req->flags & REQ_F_LINK_NEXT)
1461 return;
1462
9e645e11
JA
1463 /*
1464 * The list should never be empty when we are called here. But could
1465 * potentially happen if the chain is messed up, check to be on the
1466 * safe side.
1467 */
4493233e
PB
1468 while (!list_empty(&req->link_list)) {
1469 struct io_kiocb *nxt = list_first_entry(&req->link_list,
1470 struct io_kiocb, link_list);
94ae5e77 1471
4493233e
PB
1472 if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) &&
1473 (nxt->flags & REQ_F_TIMEOUT))) {
1474 list_del_init(&nxt->link_list);
94ae5e77 1475 wake_ev |= io_link_cancel_timeout(nxt);
94ae5e77
JA
1476 req->flags &= ~REQ_F_LINK_TIMEOUT;
1477 continue;
1478 }
9e645e11 1479
4493233e
PB
1480 list_del_init(&req->link_list);
1481 if (!list_empty(&nxt->link_list))
1482 nxt->flags |= REQ_F_LINK;
b18fdf71 1483 *nxtptr = nxt;
94ae5e77 1484 break;
9e645e11 1485 }
2665abfd 1486
4d7dd462 1487 req->flags |= REQ_F_LINK_NEXT;
2665abfd
JA
1488 if (wake_ev)
1489 io_cqring_ev_posted(ctx);
9e645e11
JA
1490}
1491
1492/*
1493 * Called if REQ_F_LINK is set, and we fail the head request
1494 */
1495static void io_fail_links(struct io_kiocb *req)
1496{
2665abfd 1497 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
1498 unsigned long flags;
1499
1500 spin_lock_irqsave(&ctx->completion_lock, flags);
9e645e11
JA
1501
1502 while (!list_empty(&req->link_list)) {
4493233e
PB
1503 struct io_kiocb *link = list_first_entry(&req->link_list,
1504 struct io_kiocb, link_list);
9e645e11 1505
4493233e 1506 list_del_init(&link->link_list);
c826bd7a 1507 trace_io_uring_fail_link(req, link);
2665abfd
JA
1508
1509 if ((req->flags & REQ_F_LINK_TIMEOUT) &&
d625c6ee 1510 link->opcode == IORING_OP_LINK_TIMEOUT) {
a197f664 1511 io_link_cancel_timeout(link);
2665abfd 1512 } else {
78e19bbe 1513 io_cqring_fill_event(link, -ECANCELED);
978db57e 1514 __io_double_put_req(link);
2665abfd 1515 }
5d960724 1516 req->flags &= ~REQ_F_LINK_TIMEOUT;
9e645e11 1517 }
2665abfd
JA
1518
1519 io_commit_cqring(ctx);
1520 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1521 io_cqring_ev_posted(ctx);
9e645e11
JA
1522}
1523
4d7dd462 1524static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
9e645e11 1525{
4d7dd462 1526 if (likely(!(req->flags & REQ_F_LINK)))
2665abfd 1527 return;
2665abfd 1528
9e645e11
JA
1529 /*
1530 * If LINK is set, we have dependent requests in this chain. If we
1531 * didn't fail this request, queue the first one up, moving any other
1532 * dependencies to the next request. In case of failure, fail the rest
1533 * of the chain.
1534 */
2665abfd
JA
1535 if (req->flags & REQ_F_FAIL_LINK) {
1536 io_fail_links(req);
7c9e7f0f
JA
1537 } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
1538 REQ_F_LINK_TIMEOUT) {
2665abfd
JA
1539 struct io_ring_ctx *ctx = req->ctx;
1540 unsigned long flags;
1541
1542 /*
1543 * If this is a timeout link, we could be racing with the
1544 * timeout timer. Grab the completion lock for this case to
7c9e7f0f 1545 * protect against that.
2665abfd
JA
1546 */
1547 spin_lock_irqsave(&ctx->completion_lock, flags);
1548 io_req_link_next(req, nxt);
1549 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1550 } else {
1551 io_req_link_next(req, nxt);
9e645e11 1552 }
4d7dd462 1553}
9e645e11 1554
c69f8dbe
JL
1555static void io_free_req(struct io_kiocb *req)
1556{
944e58bf
PB
1557 struct io_kiocb *nxt = NULL;
1558
1559 io_req_find_next(req, &nxt);
70cf9f32 1560 __io_free_req(req);
944e58bf
PB
1561
1562 if (nxt)
1563 io_queue_async_work(nxt);
c69f8dbe
JL
1564}
1565
7a743e22
PB
1566static void io_link_work_cb(struct io_wq_work **workptr)
1567{
1568 struct io_wq_work *work = *workptr;
1569 struct io_kiocb *link = work->data;
1570
1571 io_queue_linked_timeout(link);
1572 io_wq_submit_work(workptr);
1573}
1574
1575static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
1576{
1577 struct io_kiocb *link;
1578
1579 *workptr = &nxt->work;
1580 link = io_prep_linked_timeout(nxt);
1581 if (link) {
1582 nxt->work.func = io_link_work_cb;
1583 nxt->work.data = link;
1584 }
1585}
1586
ba816ad6
JA
1587/*
1588 * Drop reference to request, return next in chain (if there is one) if this
1589 * was the last reference to this request.
1590 */
f9bd67f6 1591__attribute__((nonnull))
ec9c02ad 1592static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
e65ef56d 1593{
2a44f467
JA
1594 if (refcount_dec_and_test(&req->refs)) {
1595 io_req_find_next(req, nxtptr);
4d7dd462 1596 __io_free_req(req);
2a44f467 1597 }
2b188cc1
JA
1598}
1599
e65ef56d
JA
1600static void io_put_req(struct io_kiocb *req)
1601{
1602 if (refcount_dec_and_test(&req->refs))
1603 io_free_req(req);
2b188cc1
JA
1604}
1605
e9fd9396
PB
1606static void io_steal_work(struct io_kiocb *req,
1607 struct io_wq_work **workptr)
7a743e22
PB
1608{
1609 /*
1610 * It's in an io-wq worker, so there always should be at least
1611 * one reference, which will be dropped in io_put_work() just
1612 * after the current handler returns.
1613 *
1614 * It also means, that if the counter dropped to 1, then there is
1615 * no asynchronous users left, so it's safe to steal the next work.
1616 */
7a743e22
PB
1617 if (refcount_read(&req->refs) == 1) {
1618 struct io_kiocb *nxt = NULL;
1619
1620 io_req_find_next(req, &nxt);
1621 if (nxt)
1622 io_wq_assign_next(workptr, nxt);
1623 }
1624}
1625
978db57e
JA
1626/*
1627 * Must only be used if we don't need to care about links, usually from
1628 * within the completion handling itself.
1629 */
1630static void __io_double_put_req(struct io_kiocb *req)
78e19bbe
JA
1631{
1632 /* drop both submit and complete references */
1633 if (refcount_sub_and_test(2, &req->refs))
1634 __io_free_req(req);
1635}
1636
978db57e
JA
1637static void io_double_put_req(struct io_kiocb *req)
1638{
1639 /* drop both submit and complete references */
1640 if (refcount_sub_and_test(2, &req->refs))
1641 io_free_req(req);
1642}
1643
1d7bb1d5 1644static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
a3a0e43f 1645{
84f97dc2
JA
1646 struct io_rings *rings = ctx->rings;
1647
ad3eb2c8
JA
1648 if (test_bit(0, &ctx->cq_check_overflow)) {
1649 /*
1650 * noflush == true is from the waitqueue handler, just ensure
1651 * we wake up the task, and the next invocation will flush the
1652 * entries. We cannot safely to it from here.
1653 */
1654 if (noflush && !list_empty(&ctx->cq_overflow_list))
1655 return -1U;
1d7bb1d5 1656
ad3eb2c8
JA
1657 io_cqring_overflow_flush(ctx, false);
1658 }
1d7bb1d5 1659
a3a0e43f
JA
1660 /* See comment at the top of this file */
1661 smp_rmb();
ad3eb2c8 1662 return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
a3a0e43f
JA
1663}
1664
fb5ccc98
PB
1665static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
1666{
1667 struct io_rings *rings = ctx->rings;
1668
1669 /* make sure SQ entry isn't read before tail */
1670 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
1671}
1672
8237e045 1673static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
e94f141b 1674{
c6ca97b3
JA
1675 if ((req->flags & REQ_F_LINK) || io_is_fallback_req(req))
1676 return false;
e94f141b 1677
c6ca97b3
JA
1678 if (!(req->flags & REQ_F_FIXED_FILE) || req->io)
1679 rb->need_iter++;
1680
1681 rb->reqs[rb->to_free++] = req;
1682 if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
1683 io_free_req_many(req->ctx, rb);
1684 return true;
e94f141b
JA
1685}
1686
bcda7baa
JA
1687static int io_put_kbuf(struct io_kiocb *req)
1688{
1689 struct io_buffer *kbuf = (struct io_buffer *) req->rw.addr;
1690 int cflags;
1691
1692 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
1693 cflags |= IORING_CQE_F_BUFFER;
1694 req->rw.addr = 0;
1695 kfree(kbuf);
1696 return cflags;
1697}
1698
def596e9
JA
1699/*
1700 * Find and free completed poll iocbs
1701 */
1702static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
1703 struct list_head *done)
1704{
8237e045 1705 struct req_batch rb;
def596e9 1706 struct io_kiocb *req;
def596e9 1707
c6ca97b3 1708 rb.to_free = rb.need_iter = 0;
def596e9 1709 while (!list_empty(done)) {
bcda7baa
JA
1710 int cflags = 0;
1711
def596e9
JA
1712 req = list_first_entry(done, struct io_kiocb, list);
1713 list_del(&req->list);
1714
bcda7baa
JA
1715 if (req->flags & REQ_F_BUFFER_SELECTED)
1716 cflags = io_put_kbuf(req);
1717
1718 __io_cqring_fill_event(req, req->result, cflags);
def596e9
JA
1719 (*nr_events)++;
1720
8237e045
JA
1721 if (refcount_dec_and_test(&req->refs) &&
1722 !io_req_multi_free(&rb, req))
1723 io_free_req(req);
def596e9 1724 }
def596e9 1725
09bb8394 1726 io_commit_cqring(ctx);
8237e045 1727 io_free_req_many(ctx, &rb);
def596e9
JA
1728}
1729
1730static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
1731 long min)
1732{
1733 struct io_kiocb *req, *tmp;
1734 LIST_HEAD(done);
1735 bool spin;
1736 int ret;
1737
1738 /*
1739 * Only spin for completions if we don't have multiple devices hanging
1740 * off our complete list, and we're under the requested amount.
1741 */
1742 spin = !ctx->poll_multi_file && *nr_events < min;
1743
1744 ret = 0;
1745 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
9adbd45d 1746 struct kiocb *kiocb = &req->rw.kiocb;
def596e9
JA
1747
1748 /*
1749 * Move completed entries to our local list. If we find a
1750 * request that requires polling, break out and complete
1751 * the done list first, if we have entries there.
1752 */
1753 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
1754 list_move_tail(&req->list, &done);
1755 continue;
1756 }
1757 if (!list_empty(&done))
1758 break;
1759
1760 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
1761 if (ret < 0)
1762 break;
1763
1764 if (ret && spin)
1765 spin = false;
1766 ret = 0;
1767 }
1768
1769 if (!list_empty(&done))
1770 io_iopoll_complete(ctx, nr_events, &done);
1771
1772 return ret;
1773}
1774
1775/*
d195a66e 1776 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
def596e9
JA
1777 * non-spinning poll check - we'll still enter the driver poll loop, but only
1778 * as a non-spinning completion check.
1779 */
1780static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
1781 long min)
1782{
08f5439f 1783 while (!list_empty(&ctx->poll_list) && !need_resched()) {
def596e9
JA
1784 int ret;
1785
1786 ret = io_do_iopoll(ctx, nr_events, min);
1787 if (ret < 0)
1788 return ret;
1789 if (!min || *nr_events >= min)
1790 return 0;
1791 }
1792
1793 return 1;
1794}
1795
1796/*
1797 * We can't just wait for polled events to come to us, we have to actively
1798 * find and complete them.
1799 */
1800static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
1801{
1802 if (!(ctx->flags & IORING_SETUP_IOPOLL))
1803 return;
1804
1805 mutex_lock(&ctx->uring_lock);
1806 while (!list_empty(&ctx->poll_list)) {
1807 unsigned int nr_events = 0;
1808
1809 io_iopoll_getevents(ctx, &nr_events, 1);
08f5439f
JA
1810
1811 /*
1812 * Ensure we allow local-to-the-cpu processing to take place,
1813 * in this case we need to ensure that we reap all events.
1814 */
1815 cond_resched();
def596e9
JA
1816 }
1817 mutex_unlock(&ctx->uring_lock);
1818}
1819
c7849be9
XW
1820static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1821 long min)
def596e9 1822{
2b2ed975 1823 int iters = 0, ret = 0;
500f9fba 1824
c7849be9
XW
1825 /*
1826 * We disallow the app entering submit/complete with polling, but we
1827 * still need to lock the ring to prevent racing with polled issue
1828 * that got punted to a workqueue.
1829 */
1830 mutex_lock(&ctx->uring_lock);
def596e9
JA
1831 do {
1832 int tmin = 0;
1833
a3a0e43f
JA
1834 /*
1835 * Don't enter poll loop if we already have events pending.
1836 * If we do, we can potentially be spinning for commands that
1837 * already triggered a CQE (eg in error).
1838 */
1d7bb1d5 1839 if (io_cqring_events(ctx, false))
a3a0e43f
JA
1840 break;
1841
500f9fba
JA
1842 /*
1843 * If a submit got punted to a workqueue, we can have the
1844 * application entering polling for a command before it gets
1845 * issued. That app will hold the uring_lock for the duration
1846 * of the poll right here, so we need to take a breather every
1847 * now and then to ensure that the issue has a chance to add
1848 * the poll to the issued list. Otherwise we can spin here
1849 * forever, while the workqueue is stuck trying to acquire the
1850 * very same mutex.
1851 */
1852 if (!(++iters & 7)) {
1853 mutex_unlock(&ctx->uring_lock);
1854 mutex_lock(&ctx->uring_lock);
1855 }
1856
def596e9
JA
1857 if (*nr_events < min)
1858 tmin = min - *nr_events;
1859
1860 ret = io_iopoll_getevents(ctx, nr_events, tmin);
1861 if (ret <= 0)
1862 break;
1863 ret = 0;
1864 } while (min && !*nr_events && !need_resched());
1865
500f9fba 1866 mutex_unlock(&ctx->uring_lock);
def596e9
JA
1867 return ret;
1868}
1869
491381ce 1870static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 1871{
491381ce
JA
1872 /*
1873 * Tell lockdep we inherited freeze protection from submission
1874 * thread.
1875 */
1876 if (req->flags & REQ_F_ISREG) {
1877 struct inode *inode = file_inode(req->file);
2b188cc1 1878
491381ce 1879 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2b188cc1 1880 }
491381ce 1881 file_end_write(req->file);
2b188cc1
JA
1882}
1883
4e88d6e7
JA
1884static inline void req_set_fail_links(struct io_kiocb *req)
1885{
1886 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1887 req->flags |= REQ_F_FAIL_LINK;
1888}
1889
ba816ad6 1890static void io_complete_rw_common(struct kiocb *kiocb, long res)
2b188cc1 1891{
9adbd45d 1892 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
bcda7baa 1893 int cflags = 0;
2b188cc1 1894
491381ce
JA
1895 if (kiocb->ki_flags & IOCB_WRITE)
1896 kiocb_end_write(req);
2b188cc1 1897
4e88d6e7
JA
1898 if (res != req->result)
1899 req_set_fail_links(req);
bcda7baa
JA
1900 if (req->flags & REQ_F_BUFFER_SELECTED)
1901 cflags = io_put_kbuf(req);
1902 __io_cqring_add_event(req, res, cflags);
ba816ad6
JA
1903}
1904
1905static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
1906{
9adbd45d 1907 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6
JA
1908
1909 io_complete_rw_common(kiocb, res);
e65ef56d 1910 io_put_req(req);
2b188cc1
JA
1911}
1912
def596e9
JA
1913static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
1914{
9adbd45d 1915 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 1916
491381ce
JA
1917 if (kiocb->ki_flags & IOCB_WRITE)
1918 kiocb_end_write(req);
def596e9 1919
4e88d6e7
JA
1920 if (res != req->result)
1921 req_set_fail_links(req);
9e645e11 1922 req->result = res;
def596e9
JA
1923 if (res != -EAGAIN)
1924 req->flags |= REQ_F_IOPOLL_COMPLETED;
1925}
1926
1927/*
1928 * After the iocb has been issued, it's safe to be found on the poll list.
1929 * Adding the kiocb to the list AFTER submission ensures that we don't
1930 * find it from a io_iopoll_getevents() thread before the issuer is done
1931 * accessing the kiocb cookie.
1932 */
1933static void io_iopoll_req_issued(struct io_kiocb *req)
1934{
1935 struct io_ring_ctx *ctx = req->ctx;
1936
1937 /*
1938 * Track whether we have multiple files in our lists. This will impact
1939 * how we do polling eventually, not spinning if we're on potentially
1940 * different devices.
1941 */
1942 if (list_empty(&ctx->poll_list)) {
1943 ctx->poll_multi_file = false;
1944 } else if (!ctx->poll_multi_file) {
1945 struct io_kiocb *list_req;
1946
1947 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
1948 list);
9adbd45d 1949 if (list_req->file != req->file)
def596e9
JA
1950 ctx->poll_multi_file = true;
1951 }
1952
1953 /*
1954 * For fast devices, IO may have already completed. If it has, add
1955 * it to the front so we find it first.
1956 */
1957 if (req->flags & REQ_F_IOPOLL_COMPLETED)
1958 list_add(&req->list, &ctx->poll_list);
1959 else
1960 list_add_tail(&req->list, &ctx->poll_list);
bdcd3eab
XW
1961
1962 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
1963 wq_has_sleeper(&ctx->sqo_wait))
1964 wake_up(&ctx->sqo_wait);
def596e9
JA
1965}
1966
3d6770fb 1967static void io_file_put(struct io_submit_state *state)
9a56a232 1968{
3d6770fb 1969 if (state->file) {
9a56a232
JA
1970 int diff = state->has_refs - state->used_refs;
1971
1972 if (diff)
1973 fput_many(state->file, diff);
1974 state->file = NULL;
1975 }
1976}
1977
1978/*
1979 * Get as many references to a file as we have IOs left in this submission,
1980 * assuming most submissions are for one file, or at least that each file
1981 * has more than one submission.
1982 */
8da11c19 1983static struct file *__io_file_get(struct io_submit_state *state, int fd)
9a56a232
JA
1984{
1985 if (!state)
1986 return fget(fd);
1987
1988 if (state->file) {
1989 if (state->fd == fd) {
1990 state->used_refs++;
1991 state->ios_left--;
1992 return state->file;
1993 }
3d6770fb 1994 io_file_put(state);
9a56a232
JA
1995 }
1996 state->file = fget_many(fd, state->ios_left);
1997 if (!state->file)
1998 return NULL;
1999
2000 state->fd = fd;
2001 state->has_refs = state->ios_left;
2002 state->used_refs = 1;
2003 state->ios_left--;
2004 return state->file;
2005}
2006
2b188cc1
JA
2007/*
2008 * If we tracked the file through the SCM inflight mechanism, we could support
2009 * any file. For now, just ensure that anything potentially problematic is done
2010 * inline.
2011 */
2012static bool io_file_supports_async(struct file *file)
2013{
2014 umode_t mode = file_inode(file)->i_mode;
2015
10d59345 2016 if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISSOCK(mode))
2b188cc1
JA
2017 return true;
2018 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
2019 return true;
2020
2021 return false;
2022}
2023
3529d8c2
JA
2024static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2025 bool force_nonblock)
2b188cc1 2026{
def596e9 2027 struct io_ring_ctx *ctx = req->ctx;
9adbd45d 2028 struct kiocb *kiocb = &req->rw.kiocb;
09bb8394
JA
2029 unsigned ioprio;
2030 int ret;
2b188cc1 2031
491381ce
JA
2032 if (S_ISREG(file_inode(req->file)->i_mode))
2033 req->flags |= REQ_F_ISREG;
2034
2b188cc1 2035 kiocb->ki_pos = READ_ONCE(sqe->off);
ba04291e
JA
2036 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
2037 req->flags |= REQ_F_CUR_POS;
2038 kiocb->ki_pos = req->file->f_pos;
2039 }
2b188cc1 2040 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
3e577dcd
PB
2041 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2042 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2043 if (unlikely(ret))
2044 return ret;
2b188cc1
JA
2045
2046 ioprio = READ_ONCE(sqe->ioprio);
2047 if (ioprio) {
2048 ret = ioprio_check_cap(ioprio);
2049 if (ret)
09bb8394 2050 return ret;
2b188cc1
JA
2051
2052 kiocb->ki_ioprio = ioprio;
2053 } else
2054 kiocb->ki_ioprio = get_current_ioprio();
2055
8449eeda 2056 /* don't allow async punt if RWF_NOWAIT was requested */
491381ce
JA
2057 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
2058 (req->file->f_flags & O_NONBLOCK))
8449eeda
SB
2059 req->flags |= REQ_F_NOWAIT;
2060
2061 if (force_nonblock)
2b188cc1 2062 kiocb->ki_flags |= IOCB_NOWAIT;
8449eeda 2063
def596e9 2064 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
2065 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2066 !kiocb->ki_filp->f_op->iopoll)
09bb8394 2067 return -EOPNOTSUPP;
2b188cc1 2068
def596e9
JA
2069 kiocb->ki_flags |= IOCB_HIPRI;
2070 kiocb->ki_complete = io_complete_rw_iopoll;
6873e0bd 2071 req->result = 0;
def596e9 2072 } else {
09bb8394
JA
2073 if (kiocb->ki_flags & IOCB_HIPRI)
2074 return -EINVAL;
def596e9
JA
2075 kiocb->ki_complete = io_complete_rw;
2076 }
9adbd45d 2077
3529d8c2
JA
2078 req->rw.addr = READ_ONCE(sqe->addr);
2079 req->rw.len = READ_ONCE(sqe->len);
bcda7baa 2080 /* we own ->private, reuse it for the buffer index / buffer ID */
9adbd45d 2081 req->rw.kiocb.private = (void *) (unsigned long)
3529d8c2 2082 READ_ONCE(sqe->buf_index);
2b188cc1 2083 return 0;
2b188cc1
JA
2084}
2085
2086static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2087{
2088 switch (ret) {
2089 case -EIOCBQUEUED:
2090 break;
2091 case -ERESTARTSYS:
2092 case -ERESTARTNOINTR:
2093 case -ERESTARTNOHAND:
2094 case -ERESTART_RESTARTBLOCK:
2095 /*
2096 * We can't just restart the syscall, since previously
2097 * submitted sqes may already be in progress. Just fail this
2098 * IO with EINTR.
2099 */
2100 ret = -EINTR;
2101 /* fall through */
2102 default:
2103 kiocb->ki_complete(kiocb, ret, 0);
2104 }
2105}
2106
014db007 2107static void kiocb_done(struct kiocb *kiocb, ssize_t ret)
ba816ad6 2108{
ba04291e
JA
2109 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2110
2111 if (req->flags & REQ_F_CUR_POS)
2112 req->file->f_pos = kiocb->ki_pos;
bcaec089 2113 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
014db007 2114 io_complete_rw(kiocb, ret, 0);
ba816ad6
JA
2115 else
2116 io_rw_done(kiocb, ret);
2117}
2118
9adbd45d 2119static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
7d009165 2120 struct iov_iter *iter)
edafccee 2121{
9adbd45d
JA
2122 struct io_ring_ctx *ctx = req->ctx;
2123 size_t len = req->rw.len;
edafccee
JA
2124 struct io_mapped_ubuf *imu;
2125 unsigned index, buf_index;
2126 size_t offset;
2127 u64 buf_addr;
2128
2129 /* attempt to use fixed buffers without having provided iovecs */
2130 if (unlikely(!ctx->user_bufs))
2131 return -EFAULT;
2132
9adbd45d 2133 buf_index = (unsigned long) req->rw.kiocb.private;
edafccee
JA
2134 if (unlikely(buf_index >= ctx->nr_user_bufs))
2135 return -EFAULT;
2136
2137 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2138 imu = &ctx->user_bufs[index];
9adbd45d 2139 buf_addr = req->rw.addr;
edafccee
JA
2140
2141 /* overflow */
2142 if (buf_addr + len < buf_addr)
2143 return -EFAULT;
2144 /* not inside the mapped region */
2145 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2146 return -EFAULT;
2147
2148 /*
2149 * May not be a start of buffer, set size appropriately
2150 * and advance us to the beginning.
2151 */
2152 offset = buf_addr - imu->ubuf;
2153 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
2154
2155 if (offset) {
2156 /*
2157 * Don't use iov_iter_advance() here, as it's really slow for
2158 * using the latter parts of a big fixed buffer - it iterates
2159 * over each segment manually. We can cheat a bit here, because
2160 * we know that:
2161 *
2162 * 1) it's a BVEC iter, we set it up
2163 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2164 * first and last bvec
2165 *
2166 * So just find our index, and adjust the iterator afterwards.
2167 * If the offset is within the first bvec (or the whole first
2168 * bvec, just use iov_iter_advance(). This makes it easier
2169 * since we can just skip the first segment, which may not
2170 * be PAGE_SIZE aligned.
2171 */
2172 const struct bio_vec *bvec = imu->bvec;
2173
2174 if (offset <= bvec->bv_len) {
2175 iov_iter_advance(iter, offset);
2176 } else {
2177 unsigned long seg_skip;
2178
2179 /* skip first vec */
2180 offset -= bvec->bv_len;
2181 seg_skip = 1 + (offset >> PAGE_SHIFT);
2182
2183 iter->bvec = bvec + seg_skip;
2184 iter->nr_segs -= seg_skip;
99c79f66 2185 iter->count -= bvec->bv_len + offset;
bd11b3a3 2186 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
2187 }
2188 }
2189
5e559561 2190 return len;
edafccee
JA
2191}
2192
bcda7baa
JA
2193static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2194{
2195 if (needs_lock)
2196 mutex_unlock(&ctx->uring_lock);
2197}
2198
2199static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2200{
2201 /*
2202 * "Normal" inline submissions always hold the uring_lock, since we
2203 * grab it from the system call. Same is true for the SQPOLL offload.
2204 * The only exception is when we've detached the request and issue it
2205 * from an async worker thread, grab the lock for that case.
2206 */
2207 if (needs_lock)
2208 mutex_lock(&ctx->uring_lock);
2209}
2210
2211static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2212 int bgid, struct io_buffer *kbuf,
2213 bool needs_lock)
2214{
2215 struct io_buffer *head;
2216
2217 if (req->flags & REQ_F_BUFFER_SELECTED)
2218 return kbuf;
2219
2220 io_ring_submit_lock(req->ctx, needs_lock);
2221
2222 lockdep_assert_held(&req->ctx->uring_lock);
2223
2224 head = idr_find(&req->ctx->io_buffer_idr, bgid);
2225 if (head) {
2226 if (!list_empty(&head->list)) {
2227 kbuf = list_last_entry(&head->list, struct io_buffer,
2228 list);
2229 list_del(&kbuf->list);
2230 } else {
2231 kbuf = head;
2232 idr_remove(&req->ctx->io_buffer_idr, bgid);
2233 }
2234 if (*len > kbuf->len)
2235 *len = kbuf->len;
2236 } else {
2237 kbuf = ERR_PTR(-ENOBUFS);
2238 }
2239
2240 io_ring_submit_unlock(req->ctx, needs_lock);
2241
2242 return kbuf;
2243}
2244
cf6fd4bd 2245static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
bcda7baa
JA
2246 struct iovec **iovec, struct iov_iter *iter,
2247 bool needs_lock)
2b188cc1 2248{
9adbd45d
JA
2249 void __user *buf = u64_to_user_ptr(req->rw.addr);
2250 size_t sqe_len = req->rw.len;
edafccee
JA
2251 u8 opcode;
2252
d625c6ee 2253 opcode = req->opcode;
7d009165 2254 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
edafccee 2255 *iovec = NULL;
9adbd45d 2256 return io_import_fixed(req, rw, iter);
edafccee 2257 }
2b188cc1 2258
bcda7baa
JA
2259 /* buffer index only valid with fixed read/write, or buffer select */
2260 if (req->rw.kiocb.private && !(req->flags & REQ_F_BUFFER_SELECT))
9adbd45d
JA
2261 return -EINVAL;
2262
3a6820f2
JA
2263 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
2264 ssize_t ret;
bcda7baa
JA
2265
2266 if (req->flags & REQ_F_BUFFER_SELECT) {
2267 struct io_buffer *kbuf = (struct io_buffer *) req->rw.addr;
2268 int bgid;
2269
2270 bgid = (int) (unsigned long) req->rw.kiocb.private;
2271 kbuf = io_buffer_select(req, &sqe_len, bgid, kbuf,
2272 needs_lock);
2273 if (IS_ERR(kbuf)) {
2274 *iovec = NULL;
2275 return PTR_ERR(kbuf);
2276 }
2277 req->rw.addr = (u64) kbuf;
2278 req->flags |= REQ_F_BUFFER_SELECTED;
2279 buf = u64_to_user_ptr(kbuf->addr);
2280 }
2281
3a6820f2
JA
2282 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
2283 *iovec = NULL;
3a901598 2284 return ret < 0 ? ret : sqe_len;
3a6820f2
JA
2285 }
2286
f67676d1
JA
2287 if (req->io) {
2288 struct io_async_rw *iorw = &req->io->rw;
2289
2290 *iovec = iorw->iov;
2291 iov_iter_init(iter, rw, *iovec, iorw->nr_segs, iorw->size);
2292 if (iorw->iov == iorw->fast_iov)
2293 *iovec = NULL;
2294 return iorw->size;
2295 }
2296
2b188cc1 2297#ifdef CONFIG_COMPAT
cf6fd4bd 2298 if (req->ctx->compat)
2b188cc1
JA
2299 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
2300 iovec, iter);
2301#endif
2302
2303 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
2304}
2305
31b51510 2306/*
32960613
JA
2307 * For files that don't have ->read_iter() and ->write_iter(), handle them
2308 * by looping over ->read() or ->write() manually.
31b51510 2309 */
32960613
JA
2310static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
2311 struct iov_iter *iter)
2312{
2313 ssize_t ret = 0;
2314
2315 /*
2316 * Don't support polled IO through this interface, and we can't
2317 * support non-blocking either. For the latter, this just causes
2318 * the kiocb to be handled from an async context.
2319 */
2320 if (kiocb->ki_flags & IOCB_HIPRI)
2321 return -EOPNOTSUPP;
2322 if (kiocb->ki_flags & IOCB_NOWAIT)
2323 return -EAGAIN;
2324
2325 while (iov_iter_count(iter)) {
311ae9e1 2326 struct iovec iovec;
32960613
JA
2327 ssize_t nr;
2328
311ae9e1
PB
2329 if (!iov_iter_is_bvec(iter)) {
2330 iovec = iov_iter_iovec(iter);
2331 } else {
2332 /* fixed buffers import bvec */
2333 iovec.iov_base = kmap(iter->bvec->bv_page)
2334 + iter->iov_offset;
2335 iovec.iov_len = min(iter->count,
2336 iter->bvec->bv_len - iter->iov_offset);
2337 }
2338
32960613
JA
2339 if (rw == READ) {
2340 nr = file->f_op->read(file, iovec.iov_base,
2341 iovec.iov_len, &kiocb->ki_pos);
2342 } else {
2343 nr = file->f_op->write(file, iovec.iov_base,
2344 iovec.iov_len, &kiocb->ki_pos);
2345 }
2346
311ae9e1
PB
2347 if (iov_iter_is_bvec(iter))
2348 kunmap(iter->bvec->bv_page);
2349
32960613
JA
2350 if (nr < 0) {
2351 if (!ret)
2352 ret = nr;
2353 break;
2354 }
2355 ret += nr;
2356 if (nr != iovec.iov_len)
2357 break;
2358 iov_iter_advance(iter, nr);
2359 }
2360
2361 return ret;
2362}
2363
b7bb4f7d 2364static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
f67676d1
JA
2365 struct iovec *iovec, struct iovec *fast_iov,
2366 struct iov_iter *iter)
2367{
2368 req->io->rw.nr_segs = iter->nr_segs;
2369 req->io->rw.size = io_size;
2370 req->io->rw.iov = iovec;
2371 if (!req->io->rw.iov) {
2372 req->io->rw.iov = req->io->rw.fast_iov;
2373 memcpy(req->io->rw.iov, fast_iov,
2374 sizeof(struct iovec) * iter->nr_segs);
99bc4c38
PB
2375 } else {
2376 req->flags |= REQ_F_NEED_CLEANUP;
f67676d1
JA
2377 }
2378}
2379
b7bb4f7d 2380static int io_alloc_async_ctx(struct io_kiocb *req)
f67676d1 2381{
d3656344
JA
2382 if (!io_op_defs[req->opcode].async_ctx)
2383 return 0;
f67676d1 2384 req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
06b76d44 2385 return req->io == NULL;
b7bb4f7d
JA
2386}
2387
b7bb4f7d
JA
2388static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
2389 struct iovec *iovec, struct iovec *fast_iov,
2390 struct iov_iter *iter)
2391{
980ad263 2392 if (!io_op_defs[req->opcode].async_ctx)
74566df3 2393 return 0;
5d204bcf
JA
2394 if (!req->io) {
2395 if (io_alloc_async_ctx(req))
2396 return -ENOMEM;
b7bb4f7d 2397
5d204bcf
JA
2398 io_req_map_rw(req, io_size, iovec, fast_iov, iter);
2399 }
b7bb4f7d 2400 return 0;
f67676d1
JA
2401}
2402
3529d8c2
JA
2403static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2404 bool force_nonblock)
f67676d1 2405{
3529d8c2
JA
2406 struct io_async_ctx *io;
2407 struct iov_iter iter;
f67676d1
JA
2408 ssize_t ret;
2409
3529d8c2
JA
2410 ret = io_prep_rw(req, sqe, force_nonblock);
2411 if (ret)
2412 return ret;
f67676d1 2413
3529d8c2
JA
2414 if (unlikely(!(req->file->f_mode & FMODE_READ)))
2415 return -EBADF;
f67676d1 2416
5f798bea
PB
2417 /* either don't need iovec imported or already have it */
2418 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
3529d8c2
JA
2419 return 0;
2420
2421 io = req->io;
2422 io->rw.iov = io->rw.fast_iov;
2423 req->io = NULL;
bcda7baa 2424 ret = io_import_iovec(READ, req, &io->rw.iov, &iter, !force_nonblock);
3529d8c2
JA
2425 req->io = io;
2426 if (ret < 0)
2427 return ret;
2428
2429 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2430 return 0;
f67676d1
JA
2431}
2432
014db007 2433static int io_read(struct io_kiocb *req, bool force_nonblock)
2b188cc1
JA
2434{
2435 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 2436 struct kiocb *kiocb = &req->rw.kiocb;
2b188cc1 2437 struct iov_iter iter;
31b51510 2438 size_t iov_count;
f67676d1 2439 ssize_t io_size, ret;
2b188cc1 2440
bcda7baa 2441 ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
06b76d44
JA
2442 if (ret < 0)
2443 return ret;
2b188cc1 2444
fd6c2e4c
JA
2445 /* Ensure we clear previously set non-block flag */
2446 if (!force_nonblock)
29de5f6a 2447 kiocb->ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 2448
797f3f53 2449 req->result = 0;
f67676d1 2450 io_size = ret;
9e645e11 2451 if (req->flags & REQ_F_LINK)
f67676d1
JA
2452 req->result = io_size;
2453
2454 /*
2455 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2456 * we know to async punt it even if it was opened O_NONBLOCK
2457 */
29de5f6a 2458 if (force_nonblock && !io_file_supports_async(req->file))
f67676d1 2459 goto copy_iov;
9e645e11 2460
31b51510 2461 iov_count = iov_iter_count(&iter);
9adbd45d 2462 ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
2b188cc1
JA
2463 if (!ret) {
2464 ssize_t ret2;
2465
9adbd45d
JA
2466 if (req->file->f_op->read_iter)
2467 ret2 = call_read_iter(req->file, kiocb, &iter);
32960613 2468 else
9adbd45d 2469 ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
32960613 2470
9d93a3f5 2471 /* Catch -EAGAIN return for forced non-blocking submission */
f67676d1 2472 if (!force_nonblock || ret2 != -EAGAIN) {
014db007 2473 kiocb_done(kiocb, ret2);
f67676d1
JA
2474 } else {
2475copy_iov:
b7bb4f7d 2476 ret = io_setup_async_rw(req, io_size, iovec,
f67676d1
JA
2477 inline_vecs, &iter);
2478 if (ret)
2479 goto out_free;
29de5f6a
JA
2480 /* any defer here is final, must blocking retry */
2481 if (!(req->flags & REQ_F_NOWAIT))
2482 req->flags |= REQ_F_MUST_PUNT;
f67676d1
JA
2483 return -EAGAIN;
2484 }
2b188cc1 2485 }
f67676d1 2486out_free:
1e95081c 2487 kfree(iovec);
99bc4c38 2488 req->flags &= ~REQ_F_NEED_CLEANUP;
2b188cc1
JA
2489 return ret;
2490}
2491
3529d8c2
JA
2492static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2493 bool force_nonblock)
f67676d1 2494{
3529d8c2
JA
2495 struct io_async_ctx *io;
2496 struct iov_iter iter;
f67676d1
JA
2497 ssize_t ret;
2498
3529d8c2
JA
2499 ret = io_prep_rw(req, sqe, force_nonblock);
2500 if (ret)
2501 return ret;
f67676d1 2502
3529d8c2
JA
2503 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
2504 return -EBADF;
f67676d1 2505
5f798bea
PB
2506 /* either don't need iovec imported or already have it */
2507 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
3529d8c2
JA
2508 return 0;
2509
2510 io = req->io;
2511 io->rw.iov = io->rw.fast_iov;
2512 req->io = NULL;
bcda7baa 2513 ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter, !force_nonblock);
3529d8c2
JA
2514 req->io = io;
2515 if (ret < 0)
2516 return ret;
2517
2518 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2519 return 0;
f67676d1
JA
2520}
2521
014db007 2522static int io_write(struct io_kiocb *req, bool force_nonblock)
2b188cc1
JA
2523{
2524 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 2525 struct kiocb *kiocb = &req->rw.kiocb;
2b188cc1 2526 struct iov_iter iter;
31b51510 2527 size_t iov_count;
f67676d1 2528 ssize_t ret, io_size;
2b188cc1 2529
bcda7baa 2530 ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
06b76d44
JA
2531 if (ret < 0)
2532 return ret;
2b188cc1 2533
fd6c2e4c
JA
2534 /* Ensure we clear previously set non-block flag */
2535 if (!force_nonblock)
9adbd45d 2536 req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 2537
797f3f53 2538 req->result = 0;
f67676d1 2539 io_size = ret;
9e645e11 2540 if (req->flags & REQ_F_LINK)
f67676d1 2541 req->result = io_size;
9e645e11 2542
f67676d1
JA
2543 /*
2544 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2545 * we know to async punt it even if it was opened O_NONBLOCK
2546 */
29de5f6a 2547 if (force_nonblock && !io_file_supports_async(req->file))
f67676d1 2548 goto copy_iov;
31b51510 2549
10d59345
JA
2550 /* file path doesn't support NOWAIT for non-direct_IO */
2551 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
2552 (req->flags & REQ_F_ISREG))
f67676d1 2553 goto copy_iov;
31b51510 2554
f67676d1 2555 iov_count = iov_iter_count(&iter);
9adbd45d 2556 ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
2b188cc1 2557 if (!ret) {
9bf7933f
RP
2558 ssize_t ret2;
2559
2b188cc1
JA
2560 /*
2561 * Open-code file_start_write here to grab freeze protection,
2562 * which will be released by another thread in
2563 * io_complete_rw(). Fool lockdep by telling it the lock got
2564 * released so that it doesn't complain about the held lock when
2565 * we return to userspace.
2566 */
491381ce 2567 if (req->flags & REQ_F_ISREG) {
9adbd45d 2568 __sb_start_write(file_inode(req->file)->i_sb,
2b188cc1 2569 SB_FREEZE_WRITE, true);
9adbd45d 2570 __sb_writers_release(file_inode(req->file)->i_sb,
2b188cc1
JA
2571 SB_FREEZE_WRITE);
2572 }
2573 kiocb->ki_flags |= IOCB_WRITE;
9bf7933f 2574
9adbd45d
JA
2575 if (req->file->f_op->write_iter)
2576 ret2 = call_write_iter(req->file, kiocb, &iter);
32960613 2577 else
9adbd45d 2578 ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
faac996c
JA
2579 /*
2580 * Raw bdev writes will -EOPNOTSUPP for IOCB_NOWAIT. Just
2581 * retry them without IOCB_NOWAIT.
2582 */
2583 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
2584 ret2 = -EAGAIN;
f67676d1 2585 if (!force_nonblock || ret2 != -EAGAIN) {
014db007 2586 kiocb_done(kiocb, ret2);
f67676d1
JA
2587 } else {
2588copy_iov:
b7bb4f7d 2589 ret = io_setup_async_rw(req, io_size, iovec,
f67676d1
JA
2590 inline_vecs, &iter);
2591 if (ret)
2592 goto out_free;
29de5f6a
JA
2593 /* any defer here is final, must blocking retry */
2594 req->flags |= REQ_F_MUST_PUNT;
f67676d1
JA
2595 return -EAGAIN;
2596 }
2b188cc1 2597 }
31b51510 2598out_free:
99bc4c38 2599 req->flags &= ~REQ_F_NEED_CLEANUP;
1e95081c 2600 kfree(iovec);
2b188cc1
JA
2601 return ret;
2602}
2603
7d67af2c
PB
2604static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2605{
2606 struct io_splice* sp = &req->splice;
2607 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
2608 int ret;
2609
2610 if (req->flags & REQ_F_NEED_CLEANUP)
2611 return 0;
2612
2613 sp->file_in = NULL;
2614 sp->off_in = READ_ONCE(sqe->splice_off_in);
2615 sp->off_out = READ_ONCE(sqe->off);
2616 sp->len = READ_ONCE(sqe->len);
2617 sp->flags = READ_ONCE(sqe->splice_flags);
2618
2619 if (unlikely(sp->flags & ~valid_flags))
2620 return -EINVAL;
2621
2622 ret = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), &sp->file_in,
2623 (sp->flags & SPLICE_F_FD_IN_FIXED));
2624 if (ret)
2625 return ret;
2626 req->flags |= REQ_F_NEED_CLEANUP;
2627
2628 if (!S_ISREG(file_inode(sp->file_in)->i_mode))
2629 req->work.flags |= IO_WQ_WORK_UNBOUND;
2630
2631 return 0;
2632}
2633
2634static bool io_splice_punt(struct file *file)
2635{
2636 if (get_pipe_info(file))
2637 return false;
2638 if (!io_file_supports_async(file))
2639 return true;
2640 return !(file->f_mode & O_NONBLOCK);
2641}
2642
014db007 2643static int io_splice(struct io_kiocb *req, bool force_nonblock)
7d67af2c
PB
2644{
2645 struct io_splice *sp = &req->splice;
2646 struct file *in = sp->file_in;
2647 struct file *out = sp->file_out;
2648 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
2649 loff_t *poff_in, *poff_out;
2650 long ret;
2651
2652 if (force_nonblock) {
2653 if (io_splice_punt(in) || io_splice_punt(out))
2654 return -EAGAIN;
2655 flags |= SPLICE_F_NONBLOCK;
2656 }
2657
2658 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
2659 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
2660 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
2661 if (force_nonblock && ret == -EAGAIN)
2662 return -EAGAIN;
2663
2664 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
2665 req->flags &= ~REQ_F_NEED_CLEANUP;
2666
2667 io_cqring_add_event(req, ret);
2668 if (ret != sp->len)
2669 req_set_fail_links(req);
014db007 2670 io_put_req(req);
7d67af2c
PB
2671 return 0;
2672}
2673
2b188cc1
JA
2674/*
2675 * IORING_OP_NOP just posts a completion event, nothing else.
2676 */
78e19bbe 2677static int io_nop(struct io_kiocb *req)
2b188cc1
JA
2678{
2679 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 2680
def596e9
JA
2681 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2682 return -EINVAL;
2683
78e19bbe 2684 io_cqring_add_event(req, 0);
e65ef56d 2685 io_put_req(req);
2b188cc1
JA
2686 return 0;
2687}
2688
3529d8c2 2689static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 2690{
6b06314c 2691 struct io_ring_ctx *ctx = req->ctx;
c992fe29 2692
09bb8394
JA
2693 if (!req->file)
2694 return -EBADF;
c992fe29 2695
6b06314c 2696 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 2697 return -EINVAL;
edafccee 2698 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
2699 return -EINVAL;
2700
8ed8d3c3
JA
2701 req->sync.flags = READ_ONCE(sqe->fsync_flags);
2702 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
2703 return -EINVAL;
2704
2705 req->sync.off = READ_ONCE(sqe->off);
2706 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
2707 return 0;
2708}
2709
8ed8d3c3
JA
2710static bool io_req_cancelled(struct io_kiocb *req)
2711{
2712 if (req->work.flags & IO_WQ_WORK_CANCEL) {
2713 req_set_fail_links(req);
2714 io_cqring_add_event(req, -ECANCELED);
e9fd9396 2715 io_put_req(req);
8ed8d3c3
JA
2716 return true;
2717 }
2718
2719 return false;
2720}
2721
014db007 2722static void __io_fsync(struct io_kiocb *req)
8ed8d3c3 2723{
8ed8d3c3 2724 loff_t end = req->sync.off + req->sync.len;
8ed8d3c3
JA
2725 int ret;
2726
9adbd45d 2727 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
2728 end > 0 ? end : LLONG_MAX,
2729 req->sync.flags & IORING_FSYNC_DATASYNC);
2730 if (ret < 0)
2731 req_set_fail_links(req);
2732 io_cqring_add_event(req, ret);
014db007 2733 io_put_req(req);
5ea62161
PB
2734}
2735
2736static void io_fsync_finish(struct io_wq_work **workptr)
2737{
2738 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
5ea62161
PB
2739
2740 if (io_req_cancelled(req))
2741 return;
014db007 2742 __io_fsync(req);
e9fd9396 2743 io_steal_work(req, workptr);
8ed8d3c3
JA
2744}
2745
014db007 2746static int io_fsync(struct io_kiocb *req, bool force_nonblock)
c992fe29 2747{
c992fe29 2748 /* fsync always requires a blocking context */
8ed8d3c3 2749 if (force_nonblock) {
8ed8d3c3 2750 req->work.func = io_fsync_finish;
c992fe29 2751 return -EAGAIN;
8ed8d3c3 2752 }
014db007 2753 __io_fsync(req);
c992fe29
CH
2754 return 0;
2755}
2756
014db007 2757static void __io_fallocate(struct io_kiocb *req)
8ed8d3c3 2758{
8ed8d3c3
JA
2759 int ret;
2760
d63d1b5e
JA
2761 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
2762 req->sync.len);
8ed8d3c3
JA
2763 if (ret < 0)
2764 req_set_fail_links(req);
2765 io_cqring_add_event(req, ret);
014db007 2766 io_put_req(req);
5ea62161
PB
2767}
2768
2769static void io_fallocate_finish(struct io_wq_work **workptr)
2770{
2771 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
5ea62161 2772
594506fe
PB
2773 if (io_req_cancelled(req))
2774 return;
014db007 2775 __io_fallocate(req);
e9fd9396 2776 io_steal_work(req, workptr);
5d17b4a4
JA
2777}
2778
d63d1b5e
JA
2779static int io_fallocate_prep(struct io_kiocb *req,
2780 const struct io_uring_sqe *sqe)
2781{
2782 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
2783 return -EINVAL;
2784
2785 req->sync.off = READ_ONCE(sqe->off);
2786 req->sync.len = READ_ONCE(sqe->addr);
2787 req->sync.mode = READ_ONCE(sqe->len);
2788 return 0;
2789}
2790
014db007 2791static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
5d17b4a4 2792{
d63d1b5e 2793 /* fallocate always requiring blocking context */
8ed8d3c3 2794 if (force_nonblock) {
d63d1b5e 2795 req->work.func = io_fallocate_finish;
5d17b4a4 2796 return -EAGAIN;
8ed8d3c3 2797 }
5d17b4a4 2798
014db007 2799 __io_fallocate(req);
5d17b4a4
JA
2800 return 0;
2801}
2802
15b71abe 2803static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 2804{
f8748881 2805 const char __user *fname;
15b71abe 2806 int ret;
b7bb4f7d 2807
15b71abe
JA
2808 if (sqe->ioprio || sqe->buf_index)
2809 return -EINVAL;
cf3040ca
JA
2810 if (sqe->flags & IOSQE_FIXED_FILE)
2811 return -EBADF;
0bdbdd08
PB
2812 if (req->flags & REQ_F_NEED_CLEANUP)
2813 return 0;
03b1230c 2814
15b71abe 2815 req->open.dfd = READ_ONCE(sqe->fd);
c12cedf2 2816 req->open.how.mode = READ_ONCE(sqe->len);
f8748881 2817 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
c12cedf2 2818 req->open.how.flags = READ_ONCE(sqe->open_flags);
3529d8c2 2819
f8748881 2820 req->open.filename = getname(fname);
15b71abe
JA
2821 if (IS_ERR(req->open.filename)) {
2822 ret = PTR_ERR(req->open.filename);
2823 req->open.filename = NULL;
2824 return ret;
2825 }
3529d8c2 2826
8fef80bf 2827 req->flags |= REQ_F_NEED_CLEANUP;
15b71abe 2828 return 0;
03b1230c
JA
2829}
2830
cebdb986 2831static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 2832{
cebdb986
JA
2833 struct open_how __user *how;
2834 const char __user *fname;
2835 size_t len;
0fa03c62
JA
2836 int ret;
2837
cebdb986 2838 if (sqe->ioprio || sqe->buf_index)
0fa03c62 2839 return -EINVAL;
cf3040ca
JA
2840 if (sqe->flags & IOSQE_FIXED_FILE)
2841 return -EBADF;
0bdbdd08
PB
2842 if (req->flags & REQ_F_NEED_CLEANUP)
2843 return 0;
0fa03c62 2844
cebdb986
JA
2845 req->open.dfd = READ_ONCE(sqe->fd);
2846 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
2847 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
2848 len = READ_ONCE(sqe->len);
0fa03c62 2849
cebdb986
JA
2850 if (len < OPEN_HOW_SIZE_VER0)
2851 return -EINVAL;
3529d8c2 2852
cebdb986
JA
2853 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
2854 len);
2855 if (ret)
2856 return ret;
3529d8c2 2857
cebdb986
JA
2858 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
2859 req->open.how.flags |= O_LARGEFILE;
0fa03c62 2860
cebdb986
JA
2861 req->open.filename = getname(fname);
2862 if (IS_ERR(req->open.filename)) {
2863 ret = PTR_ERR(req->open.filename);
2864 req->open.filename = NULL;
2865 return ret;
2866 }
2867
8fef80bf 2868 req->flags |= REQ_F_NEED_CLEANUP;
cebdb986
JA
2869 return 0;
2870}
2871
014db007 2872static int io_openat2(struct io_kiocb *req, bool force_nonblock)
15b71abe
JA
2873{
2874 struct open_flags op;
15b71abe
JA
2875 struct file *file;
2876 int ret;
2877
f86cd20c 2878 if (force_nonblock)
15b71abe 2879 return -EAGAIN;
15b71abe 2880
cebdb986 2881 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
2882 if (ret)
2883 goto err;
2884
cebdb986 2885 ret = get_unused_fd_flags(req->open.how.flags);
15b71abe
JA
2886 if (ret < 0)
2887 goto err;
2888
2889 file = do_filp_open(req->open.dfd, req->open.filename, &op);
2890 if (IS_ERR(file)) {
2891 put_unused_fd(ret);
2892 ret = PTR_ERR(file);
2893 } else {
2894 fsnotify_open(file);
2895 fd_install(ret, file);
2896 }
2897err:
2898 putname(req->open.filename);
8fef80bf 2899 req->flags &= ~REQ_F_NEED_CLEANUP;
15b71abe
JA
2900 if (ret < 0)
2901 req_set_fail_links(req);
2902 io_cqring_add_event(req, ret);
014db007 2903 io_put_req(req);
15b71abe
JA
2904 return 0;
2905}
2906
014db007 2907static int io_openat(struct io_kiocb *req, bool force_nonblock)
cebdb986
JA
2908{
2909 req->open.how = build_open_how(req->open.how.flags, req->open.how.mode);
014db007 2910 return io_openat2(req, force_nonblock);
cebdb986
JA
2911}
2912
ddf0322d
JA
2913static int io_provide_buffers_prep(struct io_kiocb *req,
2914 const struct io_uring_sqe *sqe)
2915{
2916 struct io_provide_buf *p = &req->pbuf;
2917 u64 tmp;
2918
2919 if (sqe->ioprio || sqe->rw_flags)
2920 return -EINVAL;
2921
2922 tmp = READ_ONCE(sqe->fd);
2923 if (!tmp || tmp > USHRT_MAX)
2924 return -E2BIG;
2925 p->nbufs = tmp;
2926 p->addr = READ_ONCE(sqe->addr);
2927 p->len = READ_ONCE(sqe->len);
2928
2929 if (!access_ok(u64_to_user_ptr(p->addr), p->len))
2930 return -EFAULT;
2931
2932 p->bgid = READ_ONCE(sqe->buf_group);
2933 tmp = READ_ONCE(sqe->off);
2934 if (tmp > USHRT_MAX)
2935 return -E2BIG;
2936 p->bid = tmp;
2937 return 0;
2938}
2939
2940static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
2941{
2942 struct io_buffer *buf;
2943 u64 addr = pbuf->addr;
2944 int i, bid = pbuf->bid;
2945
2946 for (i = 0; i < pbuf->nbufs; i++) {
2947 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
2948 if (!buf)
2949 break;
2950
2951 buf->addr = addr;
2952 buf->len = pbuf->len;
2953 buf->bid = bid;
2954 addr += pbuf->len;
2955 bid++;
2956 if (!*head) {
2957 INIT_LIST_HEAD(&buf->list);
2958 *head = buf;
2959 } else {
2960 list_add_tail(&buf->list, &(*head)->list);
2961 }
2962 }
2963
2964 return i ? i : -ENOMEM;
2965}
2966
ddf0322d
JA
2967static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock)
2968{
2969 struct io_provide_buf *p = &req->pbuf;
2970 struct io_ring_ctx *ctx = req->ctx;
2971 struct io_buffer *head, *list;
2972 int ret = 0;
2973
2974 io_ring_submit_lock(ctx, !force_nonblock);
2975
2976 lockdep_assert_held(&ctx->uring_lock);
2977
2978 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
2979
2980 ret = io_add_buffers(p, &head);
2981 if (ret < 0)
2982 goto out;
2983
2984 if (!list) {
2985 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
2986 GFP_KERNEL);
2987 if (ret < 0) {
2988 while (!list_empty(&head->list)) {
2989 struct io_buffer *buf;
2990
2991 buf = list_first_entry(&head->list,
2992 struct io_buffer, list);
2993 list_del(&buf->list);
2994 kfree(buf);
2995 }
2996 kfree(head);
2997 goto out;
2998 }
2999 }
3000out:
3001 io_ring_submit_unlock(ctx, !force_nonblock);
3002 if (ret < 0)
3003 req_set_fail_links(req);
3004 io_cqring_add_event(req, ret);
3005 io_put_req(req);
3006 return 0;
3007}
3008
3e4827b0
JA
3009static int io_epoll_ctl_prep(struct io_kiocb *req,
3010 const struct io_uring_sqe *sqe)
3011{
3012#if defined(CONFIG_EPOLL)
3013 if (sqe->ioprio || sqe->buf_index)
3014 return -EINVAL;
3015
3016 req->epoll.epfd = READ_ONCE(sqe->fd);
3017 req->epoll.op = READ_ONCE(sqe->len);
3018 req->epoll.fd = READ_ONCE(sqe->off);
3019
3020 if (ep_op_has_event(req->epoll.op)) {
3021 struct epoll_event __user *ev;
3022
3023 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
3024 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
3025 return -EFAULT;
3026 }
3027
3028 return 0;
3029#else
3030 return -EOPNOTSUPP;
3031#endif
3032}
3033
014db007 3034static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock)
3e4827b0
JA
3035{
3036#if defined(CONFIG_EPOLL)
3037 struct io_epoll *ie = &req->epoll;
3038 int ret;
3039
3040 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
3041 if (force_nonblock && ret == -EAGAIN)
3042 return -EAGAIN;
3043
3044 if (ret < 0)
3045 req_set_fail_links(req);
3046 io_cqring_add_event(req, ret);
014db007 3047 io_put_req(req);
3e4827b0
JA
3048 return 0;
3049#else
3050 return -EOPNOTSUPP;
3051#endif
3052}
3053
c1ca757b
JA
3054static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3055{
3056#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3057 if (sqe->ioprio || sqe->buf_index || sqe->off)
3058 return -EINVAL;
3059
3060 req->madvise.addr = READ_ONCE(sqe->addr);
3061 req->madvise.len = READ_ONCE(sqe->len);
3062 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
3063 return 0;
3064#else
3065 return -EOPNOTSUPP;
3066#endif
3067}
3068
014db007 3069static int io_madvise(struct io_kiocb *req, bool force_nonblock)
c1ca757b
JA
3070{
3071#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3072 struct io_madvise *ma = &req->madvise;
3073 int ret;
3074
3075 if (force_nonblock)
3076 return -EAGAIN;
3077
3078 ret = do_madvise(ma->addr, ma->len, ma->advice);
3079 if (ret < 0)
3080 req_set_fail_links(req);
3081 io_cqring_add_event(req, ret);
014db007 3082 io_put_req(req);
c1ca757b
JA
3083 return 0;
3084#else
3085 return -EOPNOTSUPP;
3086#endif
3087}
3088
4840e418
JA
3089static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3090{
3091 if (sqe->ioprio || sqe->buf_index || sqe->addr)
3092 return -EINVAL;
3093
3094 req->fadvise.offset = READ_ONCE(sqe->off);
3095 req->fadvise.len = READ_ONCE(sqe->len);
3096 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
3097 return 0;
3098}
3099
014db007 3100static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
4840e418
JA
3101{
3102 struct io_fadvise *fa = &req->fadvise;
3103 int ret;
3104
3e69426d
JA
3105 if (force_nonblock) {
3106 switch (fa->advice) {
3107 case POSIX_FADV_NORMAL:
3108 case POSIX_FADV_RANDOM:
3109 case POSIX_FADV_SEQUENTIAL:
3110 break;
3111 default:
3112 return -EAGAIN;
3113 }
3114 }
4840e418
JA
3115
3116 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
3117 if (ret < 0)
3118 req_set_fail_links(req);
3119 io_cqring_add_event(req, ret);
014db007 3120 io_put_req(req);
4840e418
JA
3121 return 0;
3122}
3123
eddc7ef5
JA
3124static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3125{
f8748881 3126 const char __user *fname;
eddc7ef5
JA
3127 unsigned lookup_flags;
3128 int ret;
3129
3130 if (sqe->ioprio || sqe->buf_index)
3131 return -EINVAL;
cf3040ca
JA
3132 if (sqe->flags & IOSQE_FIXED_FILE)
3133 return -EBADF;
0bdbdd08
PB
3134 if (req->flags & REQ_F_NEED_CLEANUP)
3135 return 0;
eddc7ef5
JA
3136
3137 req->open.dfd = READ_ONCE(sqe->fd);
3138 req->open.mask = READ_ONCE(sqe->len);
f8748881 3139 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
eddc7ef5 3140 req->open.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
c12cedf2 3141 req->open.how.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5 3142
c12cedf2 3143 if (vfs_stat_set_lookup_flags(&lookup_flags, req->open.how.flags))
eddc7ef5
JA
3144 return -EINVAL;
3145
f8748881 3146 req->open.filename = getname_flags(fname, lookup_flags, NULL);
eddc7ef5
JA
3147 if (IS_ERR(req->open.filename)) {
3148 ret = PTR_ERR(req->open.filename);
3149 req->open.filename = NULL;
3150 return ret;
3151 }
3152
8fef80bf 3153 req->flags |= REQ_F_NEED_CLEANUP;
eddc7ef5
JA
3154 return 0;
3155}
3156
014db007 3157static int io_statx(struct io_kiocb *req, bool force_nonblock)
eddc7ef5
JA
3158{
3159 struct io_open *ctx = &req->open;
3160 unsigned lookup_flags;
3161 struct path path;
3162 struct kstat stat;
3163 int ret;
3164
3165 if (force_nonblock)
3166 return -EAGAIN;
3167
c12cedf2 3168 if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags))
eddc7ef5
JA
3169 return -EINVAL;
3170
3171retry:
3172 /* filename_lookup() drops it, keep a reference */
3173 ctx->filename->refcnt++;
3174
3175 ret = filename_lookup(ctx->dfd, ctx->filename, lookup_flags, &path,
3176 NULL);
3177 if (ret)
3178 goto err;
3179
c12cedf2 3180 ret = vfs_getattr(&path, &stat, ctx->mask, ctx->how.flags);
eddc7ef5
JA
3181 path_put(&path);
3182 if (retry_estale(ret, lookup_flags)) {
3183 lookup_flags |= LOOKUP_REVAL;
3184 goto retry;
3185 }
3186 if (!ret)
3187 ret = cp_statx(&stat, ctx->buffer);
3188err:
3189 putname(ctx->filename);
8fef80bf 3190 req->flags &= ~REQ_F_NEED_CLEANUP;
eddc7ef5
JA
3191 if (ret < 0)
3192 req_set_fail_links(req);
3193 io_cqring_add_event(req, ret);
014db007 3194 io_put_req(req);
eddc7ef5
JA
3195 return 0;
3196}
3197
b5dba59e
JA
3198static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3199{
3200 /*
3201 * If we queue this for async, it must not be cancellable. That would
3202 * leave the 'file' in an undeterminate state.
3203 */
3204 req->work.flags |= IO_WQ_WORK_NO_CANCEL;
3205
3206 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
3207 sqe->rw_flags || sqe->buf_index)
3208 return -EINVAL;
3209 if (sqe->flags & IOSQE_FIXED_FILE)
cf3040ca 3210 return -EBADF;
b5dba59e
JA
3211
3212 req->close.fd = READ_ONCE(sqe->fd);
3213 if (req->file->f_op == &io_uring_fops ||
b14cca0c 3214 req->close.fd == req->ctx->ring_fd)
b5dba59e
JA
3215 return -EBADF;
3216
3217 return 0;
3218}
3219
a93b3331 3220/* only called when __close_fd_get_file() is done */
014db007 3221static void __io_close_finish(struct io_kiocb *req)
a93b3331
PB
3222{
3223 int ret;
3224
3225 ret = filp_close(req->close.put_file, req->work.files);
3226 if (ret < 0)
3227 req_set_fail_links(req);
3228 io_cqring_add_event(req, ret);
3229 fput(req->close.put_file);
014db007 3230 io_put_req(req);
a93b3331
PB
3231}
3232
b5dba59e
JA
3233static void io_close_finish(struct io_wq_work **workptr)
3234{
3235 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
b5dba59e 3236
7fbeb95d 3237 /* not cancellable, don't do io_req_cancelled() */
014db007 3238 __io_close_finish(req);
e9fd9396 3239 io_steal_work(req, workptr);
b5dba59e
JA
3240}
3241
014db007 3242static int io_close(struct io_kiocb *req, bool force_nonblock)
b5dba59e
JA
3243{
3244 int ret;
3245
3246 req->close.put_file = NULL;
3247 ret = __close_fd_get_file(req->close.fd, &req->close.put_file);
3248 if (ret < 0)
3249 return ret;
3250
3251 /* if the file has a flush method, be safe and punt to async */
a2100672 3252 if (req->close.put_file->f_op->flush && force_nonblock) {
594506fe
PB
3253 /* submission ref will be dropped, take it for async */
3254 refcount_inc(&req->refs);
3255
a2100672
PB
3256 req->work.func = io_close_finish;
3257 /*
3258 * Do manual async queue here to avoid grabbing files - we don't
3259 * need the files, and it'll cause io_close_finish() to close
3260 * the file again and cause a double CQE entry for this request
3261 */
3262 io_queue_async_work(req);
3263 return 0;
3264 }
b5dba59e
JA
3265
3266 /*
3267 * No ->flush(), safely close from here and just punt the
3268 * fput() to async context.
3269 */
014db007 3270 __io_close_finish(req);
a93b3331 3271 return 0;
b5dba59e
JA
3272}
3273
3529d8c2 3274static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4
JA
3275{
3276 struct io_ring_ctx *ctx = req->ctx;
5d17b4a4
JA
3277
3278 if (!req->file)
3279 return -EBADF;
5d17b4a4
JA
3280
3281 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3282 return -EINVAL;
3283 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
3284 return -EINVAL;
3285
8ed8d3c3
JA
3286 req->sync.off = READ_ONCE(sqe->off);
3287 req->sync.len = READ_ONCE(sqe->len);
3288 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
3289 return 0;
3290}
3291
014db007 3292static void __io_sync_file_range(struct io_kiocb *req)
8ed8d3c3 3293{
8ed8d3c3
JA
3294 int ret;
3295
9adbd45d 3296 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3
JA
3297 req->sync.flags);
3298 if (ret < 0)
3299 req_set_fail_links(req);
3300 io_cqring_add_event(req, ret);
014db007 3301 io_put_req(req);
5ea62161
PB
3302}
3303
3304
3305static void io_sync_file_range_finish(struct io_wq_work **workptr)
3306{
3307 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
3308 struct io_kiocb *nxt = NULL;
3309
3310 if (io_req_cancelled(req))
3311 return;
014db007 3312 __io_sync_file_range(req);
594506fe 3313 io_put_req(req); /* put submission ref */
8ed8d3c3 3314 if (nxt)
78912934 3315 io_wq_assign_next(workptr, nxt);
5d17b4a4
JA
3316}
3317
014db007 3318static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
5d17b4a4 3319{
5d17b4a4 3320 /* sync_file_range always requires a blocking context */
8ed8d3c3 3321 if (force_nonblock) {
8ed8d3c3 3322 req->work.func = io_sync_file_range_finish;
5d17b4a4 3323 return -EAGAIN;
8ed8d3c3 3324 }
5d17b4a4 3325
014db007 3326 __io_sync_file_range(req);
5d17b4a4
JA
3327 return 0;
3328}
3329
02d27d89
PB
3330static int io_setup_async_msg(struct io_kiocb *req,
3331 struct io_async_msghdr *kmsg)
3332{
3333 if (req->io)
3334 return -EAGAIN;
3335 if (io_alloc_async_ctx(req)) {
3336 if (kmsg->iov != kmsg->fast_iov)
3337 kfree(kmsg->iov);
3338 return -ENOMEM;
3339 }
3340 req->flags |= REQ_F_NEED_CLEANUP;
3341 memcpy(&req->io->msg, kmsg, sizeof(*kmsg));
3342 return -EAGAIN;
3343}
3344
3529d8c2 3345static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 3346{
0fa03c62 3347#if defined(CONFIG_NET)
e47293fd 3348 struct io_sr_msg *sr = &req->sr_msg;
3529d8c2 3349 struct io_async_ctx *io = req->io;
99bc4c38 3350 int ret;
03b1230c 3351
e47293fd
JA
3352 sr->msg_flags = READ_ONCE(sqe->msg_flags);
3353 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 3354 sr->len = READ_ONCE(sqe->len);
3529d8c2 3355
d8768362
JA
3356#ifdef CONFIG_COMPAT
3357 if (req->ctx->compat)
3358 sr->msg_flags |= MSG_CMSG_COMPAT;
3359#endif
3360
fddaface 3361 if (!io || req->opcode == IORING_OP_SEND)
3529d8c2 3362 return 0;
5f798bea
PB
3363 /* iovec is already imported */
3364 if (req->flags & REQ_F_NEED_CLEANUP)
3365 return 0;
3529d8c2 3366
d9688565 3367 io->msg.iov = io->msg.fast_iov;
99bc4c38 3368 ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
e47293fd 3369 &io->msg.iov);
99bc4c38
PB
3370 if (!ret)
3371 req->flags |= REQ_F_NEED_CLEANUP;
3372 return ret;
03b1230c 3373#else
e47293fd 3374 return -EOPNOTSUPP;
03b1230c
JA
3375#endif
3376}
3377
014db007 3378static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
aa1fa28f 3379{
03b1230c 3380#if defined(CONFIG_NET)
0b416c3e 3381 struct io_async_msghdr *kmsg = NULL;
0fa03c62
JA
3382 struct socket *sock;
3383 int ret;
3384
3385 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3386 return -EINVAL;
3387
3388 sock = sock_from_file(req->file, &ret);
3389 if (sock) {
b7bb4f7d 3390 struct io_async_ctx io;
0fa03c62
JA
3391 unsigned flags;
3392
03b1230c 3393 if (req->io) {
0b416c3e 3394 kmsg = &req->io->msg;
b537916c 3395 kmsg->msg.msg_name = &req->io->msg.addr;
0b416c3e
JA
3396 /* if iov is set, it's allocated already */
3397 if (!kmsg->iov)
3398 kmsg->iov = kmsg->fast_iov;
3399 kmsg->msg.msg_iter.iov = kmsg->iov;
03b1230c 3400 } else {
3529d8c2
JA
3401 struct io_sr_msg *sr = &req->sr_msg;
3402
0b416c3e 3403 kmsg = &io.msg;
b537916c 3404 kmsg->msg.msg_name = &io.msg.addr;
3529d8c2
JA
3405
3406 io.msg.iov = io.msg.fast_iov;
3407 ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
3408 sr->msg_flags, &io.msg.iov);
03b1230c 3409 if (ret)
3529d8c2 3410 return ret;
03b1230c 3411 }
0fa03c62 3412
e47293fd
JA
3413 flags = req->sr_msg.msg_flags;
3414 if (flags & MSG_DONTWAIT)
3415 req->flags |= REQ_F_NOWAIT;
3416 else if (force_nonblock)
3417 flags |= MSG_DONTWAIT;
3418
0b416c3e 3419 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
02d27d89
PB
3420 if (force_nonblock && ret == -EAGAIN)
3421 return io_setup_async_msg(req, kmsg);
441cdbd5
JA
3422 if (ret == -ERESTARTSYS)
3423 ret = -EINTR;
0fa03c62
JA
3424 }
3425
1e95081c 3426 if (kmsg && kmsg->iov != kmsg->fast_iov)
0b416c3e 3427 kfree(kmsg->iov);
99bc4c38 3428 req->flags &= ~REQ_F_NEED_CLEANUP;
78e19bbe 3429 io_cqring_add_event(req, ret);
4e88d6e7
JA
3430 if (ret < 0)
3431 req_set_fail_links(req);
014db007 3432 io_put_req(req);
5d17b4a4 3433 return 0;
03b1230c
JA
3434#else
3435 return -EOPNOTSUPP;
aa1fa28f 3436#endif
03b1230c 3437}
aa1fa28f 3438
014db007 3439static int io_send(struct io_kiocb *req, bool force_nonblock)
fddaface
JA
3440{
3441#if defined(CONFIG_NET)
3442 struct socket *sock;
3443 int ret;
3444
3445 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3446 return -EINVAL;
3447
3448 sock = sock_from_file(req->file, &ret);
3449 if (sock) {
3450 struct io_sr_msg *sr = &req->sr_msg;
3451 struct msghdr msg;
3452 struct iovec iov;
3453 unsigned flags;
3454
3455 ret = import_single_range(WRITE, sr->buf, sr->len, &iov,
3456 &msg.msg_iter);
3457 if (ret)
3458 return ret;
3459
3460 msg.msg_name = NULL;
3461 msg.msg_control = NULL;
3462 msg.msg_controllen = 0;
3463 msg.msg_namelen = 0;
3464
3465 flags = req->sr_msg.msg_flags;
3466 if (flags & MSG_DONTWAIT)
3467 req->flags |= REQ_F_NOWAIT;
3468 else if (force_nonblock)
3469 flags |= MSG_DONTWAIT;
3470
0b7b21e4
JA
3471 msg.msg_flags = flags;
3472 ret = sock_sendmsg(sock, &msg);
fddaface
JA
3473 if (force_nonblock && ret == -EAGAIN)
3474 return -EAGAIN;
3475 if (ret == -ERESTARTSYS)
3476 ret = -EINTR;
3477 }
3478
3479 io_cqring_add_event(req, ret);
3480 if (ret < 0)
3481 req_set_fail_links(req);
014db007 3482 io_put_req(req);
fddaface
JA
3483 return 0;
3484#else
3485 return -EOPNOTSUPP;
3486#endif
3487}
3488
bcda7baa
JA
3489static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
3490 int *cflags, bool needs_lock)
3491{
3492 struct io_sr_msg *sr = &req->sr_msg;
3493 struct io_buffer *kbuf;
3494
3495 if (!(req->flags & REQ_F_BUFFER_SELECT))
3496 return NULL;
3497
3498 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
3499 if (IS_ERR(kbuf))
3500 return kbuf;
3501
3502 sr->kbuf = kbuf;
3503 req->flags |= REQ_F_BUFFER_SELECTED;
3504
3505 *cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
3506 *cflags |= IORING_CQE_F_BUFFER;
3507 return kbuf;
3508}
3509
3529d8c2
JA
3510static int io_recvmsg_prep(struct io_kiocb *req,
3511 const struct io_uring_sqe *sqe)
aa1fa28f
JA
3512{
3513#if defined(CONFIG_NET)
e47293fd 3514 struct io_sr_msg *sr = &req->sr_msg;
3529d8c2 3515 struct io_async_ctx *io = req->io;
99bc4c38 3516 int ret;
3529d8c2
JA
3517
3518 sr->msg_flags = READ_ONCE(sqe->msg_flags);
3519 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 3520 sr->len = READ_ONCE(sqe->len);
bcda7baa 3521 sr->bgid = READ_ONCE(sqe->buf_group);
06b76d44 3522
d8768362
JA
3523#ifdef CONFIG_COMPAT
3524 if (req->ctx->compat)
3525 sr->msg_flags |= MSG_CMSG_COMPAT;
3526#endif
3527
fddaface 3528 if (!io || req->opcode == IORING_OP_RECV)
06b76d44 3529 return 0;
5f798bea
PB
3530 /* iovec is already imported */
3531 if (req->flags & REQ_F_NEED_CLEANUP)
3532 return 0;
03b1230c 3533
d9688565 3534 io->msg.iov = io->msg.fast_iov;
99bc4c38 3535 ret = recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
e47293fd 3536 &io->msg.uaddr, &io->msg.iov);
99bc4c38
PB
3537 if (!ret)
3538 req->flags |= REQ_F_NEED_CLEANUP;
3539 return ret;
aa1fa28f 3540#else
e47293fd 3541 return -EOPNOTSUPP;
aa1fa28f
JA
3542#endif
3543}
3544
014db007 3545static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
aa1fa28f
JA
3546{
3547#if defined(CONFIG_NET)
0b416c3e 3548 struct io_async_msghdr *kmsg = NULL;
03b1230c
JA
3549 struct socket *sock;
3550 int ret;
3551
3552 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3553 return -EINVAL;
3554
3555 sock = sock_from_file(req->file, &ret);
3556 if (sock) {
b7bb4f7d 3557 struct io_async_ctx io;
03b1230c
JA
3558 unsigned flags;
3559
03b1230c 3560 if (req->io) {
0b416c3e 3561 kmsg = &req->io->msg;
b537916c 3562 kmsg->msg.msg_name = &req->io->msg.addr;
0b416c3e
JA
3563 /* if iov is set, it's allocated already */
3564 if (!kmsg->iov)
3565 kmsg->iov = kmsg->fast_iov;
3566 kmsg->msg.msg_iter.iov = kmsg->iov;
03b1230c 3567 } else {
3529d8c2
JA
3568 struct io_sr_msg *sr = &req->sr_msg;
3569
0b416c3e 3570 kmsg = &io.msg;
b537916c 3571 kmsg->msg.msg_name = &io.msg.addr;
3529d8c2
JA
3572
3573 io.msg.iov = io.msg.fast_iov;
3574 ret = recvmsg_copy_msghdr(&io.msg.msg, sr->msg,
3575 sr->msg_flags, &io.msg.uaddr,
3576 &io.msg.iov);
03b1230c 3577 if (ret)
3529d8c2 3578 return ret;
03b1230c
JA
3579 }
3580
e47293fd
JA
3581 flags = req->sr_msg.msg_flags;
3582 if (flags & MSG_DONTWAIT)
3583 req->flags |= REQ_F_NOWAIT;
3584 else if (force_nonblock)
3585 flags |= MSG_DONTWAIT;
3586
3587 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg,
3588 kmsg->uaddr, flags);
02d27d89
PB
3589 if (force_nonblock && ret == -EAGAIN)
3590 return io_setup_async_msg(req, kmsg);
03b1230c
JA
3591 if (ret == -ERESTARTSYS)
3592 ret = -EINTR;
3593 }
3594
1e95081c 3595 if (kmsg && kmsg->iov != kmsg->fast_iov)
0b416c3e 3596 kfree(kmsg->iov);
99bc4c38 3597 req->flags &= ~REQ_F_NEED_CLEANUP;
03b1230c 3598 io_cqring_add_event(req, ret);
4e88d6e7
JA
3599 if (ret < 0)
3600 req_set_fail_links(req);
014db007 3601 io_put_req(req);
03b1230c 3602 return 0;
0fa03c62
JA
3603#else
3604 return -EOPNOTSUPP;
3605#endif
3606}
5d17b4a4 3607
014db007 3608static int io_recv(struct io_kiocb *req, bool force_nonblock)
fddaface
JA
3609{
3610#if defined(CONFIG_NET)
bcda7baa 3611 struct io_buffer *kbuf = NULL;
fddaface 3612 struct socket *sock;
bcda7baa 3613 int ret, cflags = 0;
fddaface
JA
3614
3615 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3616 return -EINVAL;
3617
3618 sock = sock_from_file(req->file, &ret);
3619 if (sock) {
3620 struct io_sr_msg *sr = &req->sr_msg;
bcda7baa 3621 void __user *buf = sr->buf;
fddaface
JA
3622 struct msghdr msg;
3623 struct iovec iov;
3624 unsigned flags;
3625
bcda7baa
JA
3626 kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
3627 if (IS_ERR(kbuf))
3628 return PTR_ERR(kbuf);
3629 else if (kbuf)
3630 buf = u64_to_user_ptr(kbuf->addr);
3631
3632 ret = import_single_range(READ, buf, sr->len, &iov,
fddaface 3633 &msg.msg_iter);
bcda7baa
JA
3634 if (ret) {
3635 kfree(kbuf);
fddaface 3636 return ret;
bcda7baa 3637 }
fddaface 3638
bcda7baa 3639 req->flags |= REQ_F_NEED_CLEANUP;
fddaface
JA
3640 msg.msg_name = NULL;
3641 msg.msg_control = NULL;
3642 msg.msg_controllen = 0;
3643 msg.msg_namelen = 0;
3644 msg.msg_iocb = NULL;
3645 msg.msg_flags = 0;
3646
3647 flags = req->sr_msg.msg_flags;
3648 if (flags & MSG_DONTWAIT)
3649 req->flags |= REQ_F_NOWAIT;
3650 else if (force_nonblock)
3651 flags |= MSG_DONTWAIT;
3652
0b7b21e4 3653 ret = sock_recvmsg(sock, &msg, flags);
fddaface
JA
3654 if (force_nonblock && ret == -EAGAIN)
3655 return -EAGAIN;
3656 if (ret == -ERESTARTSYS)
3657 ret = -EINTR;
3658 }
3659
bcda7baa
JA
3660 kfree(kbuf);
3661 req->flags &= ~REQ_F_NEED_CLEANUP;
3662 __io_cqring_add_event(req, ret, cflags);
fddaface
JA
3663 if (ret < 0)
3664 req_set_fail_links(req);
014db007 3665 io_put_req(req);
fddaface
JA
3666 return 0;
3667#else
3668 return -EOPNOTSUPP;
3669#endif
3670}
3671
3672
3529d8c2 3673static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35
JA
3674{
3675#if defined(CONFIG_NET)
8ed8d3c3
JA
3676 struct io_accept *accept = &req->accept;
3677
17f2fe35
JA
3678 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3679 return -EINVAL;
8042d6ce 3680 if (sqe->ioprio || sqe->len || sqe->buf_index)
17f2fe35
JA
3681 return -EINVAL;
3682
d55e5f5b
JA
3683 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3684 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 3685 accept->flags = READ_ONCE(sqe->accept_flags);
8ed8d3c3
JA
3686 return 0;
3687#else
3688 return -EOPNOTSUPP;
3689#endif
3690}
17f2fe35 3691
8ed8d3c3 3692#if defined(CONFIG_NET)
014db007 3693static int __io_accept(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3
JA
3694{
3695 struct io_accept *accept = &req->accept;
3696 unsigned file_flags;
3697 int ret;
3698
3699 file_flags = force_nonblock ? O_NONBLOCK : 0;
3700 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
3701 accept->addr_len, accept->flags);
3702 if (ret == -EAGAIN && force_nonblock)
17f2fe35 3703 return -EAGAIN;
8e3cca12
JA
3704 if (ret == -ERESTARTSYS)
3705 ret = -EINTR;
4e88d6e7
JA
3706 if (ret < 0)
3707 req_set_fail_links(req);
78e19bbe 3708 io_cqring_add_event(req, ret);
014db007 3709 io_put_req(req);
17f2fe35 3710 return 0;
8ed8d3c3
JA
3711}
3712
3713static void io_accept_finish(struct io_wq_work **workptr)
3714{
3715 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
8ed8d3c3
JA
3716
3717 if (io_req_cancelled(req))
3718 return;
014db007 3719 __io_accept(req, false);
e9fd9396 3720 io_steal_work(req, workptr);
8ed8d3c3
JA
3721}
3722#endif
3723
014db007 3724static int io_accept(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3
JA
3725{
3726#if defined(CONFIG_NET)
3727 int ret;
3728
014db007 3729 ret = __io_accept(req, force_nonblock);
8ed8d3c3
JA
3730 if (ret == -EAGAIN && force_nonblock) {
3731 req->work.func = io_accept_finish;
8ed8d3c3
JA
3732 return -EAGAIN;
3733 }
3734 return 0;
0fa03c62
JA
3735#else
3736 return -EOPNOTSUPP;
3737#endif
3738}
5d17b4a4 3739
3529d8c2 3740static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021
JA
3741{
3742#if defined(CONFIG_NET)
3529d8c2
JA
3743 struct io_connect *conn = &req->connect;
3744 struct io_async_ctx *io = req->io;
f499a021 3745
3fbb51c1
JA
3746 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3747 return -EINVAL;
3748 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
3749 return -EINVAL;
3750
3529d8c2
JA
3751 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3752 conn->addr_len = READ_ONCE(sqe->addr2);
3753
3754 if (!io)
3755 return 0;
3756
3757 return move_addr_to_kernel(conn->addr, conn->addr_len,
3fbb51c1 3758 &io->connect.address);
f499a021 3759#else
3fbb51c1 3760 return -EOPNOTSUPP;
f499a021
JA
3761#endif
3762}
3763
014db007 3764static int io_connect(struct io_kiocb *req, bool force_nonblock)
f8e85cf2
JA
3765{
3766#if defined(CONFIG_NET)
f499a021 3767 struct io_async_ctx __io, *io;
f8e85cf2 3768 unsigned file_flags;
3fbb51c1 3769 int ret;
f8e85cf2 3770
f499a021
JA
3771 if (req->io) {
3772 io = req->io;
3773 } else {
3529d8c2
JA
3774 ret = move_addr_to_kernel(req->connect.addr,
3775 req->connect.addr_len,
3776 &__io.connect.address);
f499a021
JA
3777 if (ret)
3778 goto out;
3779 io = &__io;
3780 }
3781
3fbb51c1
JA
3782 file_flags = force_nonblock ? O_NONBLOCK : 0;
3783
3784 ret = __sys_connect_file(req->file, &io->connect.address,
3785 req->connect.addr_len, file_flags);
87f80d62 3786 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
b7bb4f7d
JA
3787 if (req->io)
3788 return -EAGAIN;
3789 if (io_alloc_async_ctx(req)) {
f499a021
JA
3790 ret = -ENOMEM;
3791 goto out;
3792 }
b7bb4f7d 3793 memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect));
f8e85cf2 3794 return -EAGAIN;
f499a021 3795 }
f8e85cf2
JA
3796 if (ret == -ERESTARTSYS)
3797 ret = -EINTR;
f499a021 3798out:
4e88d6e7
JA
3799 if (ret < 0)
3800 req_set_fail_links(req);
f8e85cf2 3801 io_cqring_add_event(req, ret);
014db007 3802 io_put_req(req);
f8e85cf2
JA
3803 return 0;
3804#else
3805 return -EOPNOTSUPP;
3806#endif
3807}
3808
d7718a9d
JA
3809struct io_poll_table {
3810 struct poll_table_struct pt;
3811 struct io_kiocb *req;
3812 int error;
3813};
3814
3815static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
3816 struct wait_queue_head *head)
3817{
3818 if (unlikely(poll->head)) {
3819 pt->error = -EINVAL;
3820 return;
3821 }
3822
3823 pt->error = 0;
3824 poll->head = head;
3825 add_wait_queue(head, &poll->wait);
3826}
3827
3828static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
3829 struct poll_table_struct *p)
3830{
3831 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
3832
3833 __io_queue_proc(&pt->req->apoll->poll, pt, head);
3834}
3835
3836static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
3837 __poll_t mask, task_work_func_t func)
3838{
3839 struct task_struct *tsk;
3840
3841 /* for instances that support it check for an event match first: */
3842 if (mask && !(mask & poll->events))
3843 return 0;
3844
3845 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
3846
3847 list_del_init(&poll->wait.entry);
3848
3849 tsk = req->task;
3850 req->result = mask;
3851 init_task_work(&req->task_work, func);
3852 /*
3853 * If this fails, then the task is exiting. If that is the case, then
3854 * the exit check will ultimately cancel these work items. Hence we
3855 * don't need to check here and handle it specifically.
3856 */
3857 task_work_add(tsk, &req->task_work, true);
3858 wake_up_process(tsk);
3859 return 1;
3860}
3861
3862static void io_async_task_func(struct callback_head *cb)
3863{
3864 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
3865 struct async_poll *apoll = req->apoll;
3866 struct io_ring_ctx *ctx = req->ctx;
3867
3868 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
3869
3870 WARN_ON_ONCE(!list_empty(&req->apoll->poll.wait.entry));
3871
3872 if (hash_hashed(&req->hash_node)) {
3873 spin_lock_irq(&ctx->completion_lock);
3874 hash_del(&req->hash_node);
3875 spin_unlock_irq(&ctx->completion_lock);
3876 }
3877
3878 /* restore ->work in case we need to retry again */
3879 memcpy(&req->work, &apoll->work, sizeof(req->work));
3880
3881 __set_current_state(TASK_RUNNING);
3882 mutex_lock(&ctx->uring_lock);
3883 __io_queue_sqe(req, NULL);
3884 mutex_unlock(&ctx->uring_lock);
3885
3886 kfree(apoll);
3887}
3888
3889static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
3890 void *key)
3891{
3892 struct io_kiocb *req = wait->private;
3893 struct io_poll_iocb *poll = &req->apoll->poll;
3894
3895 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
3896 key_to_poll(key));
3897
3898 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
3899}
3900
3901static void io_poll_req_insert(struct io_kiocb *req)
3902{
3903 struct io_ring_ctx *ctx = req->ctx;
3904 struct hlist_head *list;
3905
3906 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
3907 hlist_add_head(&req->hash_node, list);
3908}
3909
3910static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
3911 struct io_poll_iocb *poll,
3912 struct io_poll_table *ipt, __poll_t mask,
3913 wait_queue_func_t wake_func)
3914 __acquires(&ctx->completion_lock)
3915{
3916 struct io_ring_ctx *ctx = req->ctx;
3917 bool cancel = false;
3918
3919 poll->file = req->file;
3920 poll->head = NULL;
3921 poll->done = poll->canceled = false;
3922 poll->events = mask;
3923
3924 ipt->pt._key = mask;
3925 ipt->req = req;
3926 ipt->error = -EINVAL;
3927
3928 INIT_LIST_HEAD(&poll->wait.entry);
3929 init_waitqueue_func_entry(&poll->wait, wake_func);
3930 poll->wait.private = req;
3931
3932 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
3933
3934 spin_lock_irq(&ctx->completion_lock);
3935 if (likely(poll->head)) {
3936 spin_lock(&poll->head->lock);
3937 if (unlikely(list_empty(&poll->wait.entry))) {
3938 if (ipt->error)
3939 cancel = true;
3940 ipt->error = 0;
3941 mask = 0;
3942 }
3943 if (mask || ipt->error)
3944 list_del_init(&poll->wait.entry);
3945 else if (cancel)
3946 WRITE_ONCE(poll->canceled, true);
3947 else if (!poll->done) /* actually waiting for an event */
3948 io_poll_req_insert(req);
3949 spin_unlock(&poll->head->lock);
3950 }
3951
3952 return mask;
3953}
3954
3955static bool io_arm_poll_handler(struct io_kiocb *req)
3956{
3957 const struct io_op_def *def = &io_op_defs[req->opcode];
3958 struct io_ring_ctx *ctx = req->ctx;
3959 struct async_poll *apoll;
3960 struct io_poll_table ipt;
3961 __poll_t mask, ret;
3962
3963 if (!req->file || !file_can_poll(req->file))
3964 return false;
3965 if (req->flags & (REQ_F_MUST_PUNT | REQ_F_POLLED))
3966 return false;
3967 if (!def->pollin && !def->pollout)
3968 return false;
3969
3970 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
3971 if (unlikely(!apoll))
3972 return false;
3973
3974 req->flags |= REQ_F_POLLED;
3975 memcpy(&apoll->work, &req->work, sizeof(req->work));
3976
3977 /*
3978 * Don't need a reference here, as we're adding it to the task
3979 * task_works list. If the task exits, the list is pruned.
3980 */
3981 req->task = current;
3982 req->apoll = apoll;
3983 INIT_HLIST_NODE(&req->hash_node);
3984
8755d97a 3985 mask = 0;
d7718a9d 3986 if (def->pollin)
8755d97a 3987 mask |= POLLIN | POLLRDNORM;
d7718a9d
JA
3988 if (def->pollout)
3989 mask |= POLLOUT | POLLWRNORM;
3990 mask |= POLLERR | POLLPRI;
3991
3992 ipt.pt._qproc = io_async_queue_proc;
3993
3994 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
3995 io_async_wake);
3996 if (ret) {
3997 ipt.error = 0;
3998 apoll->poll.done = true;
3999 spin_unlock_irq(&ctx->completion_lock);
4000 memcpy(&req->work, &apoll->work, sizeof(req->work));
4001 kfree(apoll);
4002 return false;
4003 }
4004 spin_unlock_irq(&ctx->completion_lock);
4005 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
4006 apoll->poll.events);
4007 return true;
4008}
4009
4010static bool __io_poll_remove_one(struct io_kiocb *req,
4011 struct io_poll_iocb *poll)
221c5eb2 4012{
b41e9852 4013 bool do_complete = false;
221c5eb2
JA
4014
4015 spin_lock(&poll->head->lock);
4016 WRITE_ONCE(poll->canceled, true);
392edb45
JA
4017 if (!list_empty(&poll->wait.entry)) {
4018 list_del_init(&poll->wait.entry);
b41e9852 4019 do_complete = true;
221c5eb2
JA
4020 }
4021 spin_unlock(&poll->head->lock);
d7718a9d
JA
4022 return do_complete;
4023}
4024
4025static bool io_poll_remove_one(struct io_kiocb *req)
4026{
4027 bool do_complete;
4028
4029 if (req->opcode == IORING_OP_POLL_ADD) {
4030 do_complete = __io_poll_remove_one(req, &req->poll);
4031 } else {
4032 /* non-poll requests have submit ref still */
4033 do_complete = __io_poll_remove_one(req, &req->apoll->poll);
4034 if (do_complete)
4035 io_put_req(req);
4036 }
4037
78076bb6 4038 hash_del(&req->hash_node);
d7718a9d 4039
b41e9852
JA
4040 if (do_complete) {
4041 io_cqring_fill_event(req, -ECANCELED);
4042 io_commit_cqring(req->ctx);
4043 req->flags |= REQ_F_COMP_LOCKED;
4044 io_put_req(req);
4045 }
4046
4047 return do_complete;
221c5eb2
JA
4048}
4049
4050static void io_poll_remove_all(struct io_ring_ctx *ctx)
4051{
78076bb6 4052 struct hlist_node *tmp;
221c5eb2 4053 struct io_kiocb *req;
78076bb6 4054 int i;
221c5eb2
JA
4055
4056 spin_lock_irq(&ctx->completion_lock);
78076bb6
JA
4057 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
4058 struct hlist_head *list;
4059
4060 list = &ctx->cancel_hash[i];
4061 hlist_for_each_entry_safe(req, tmp, list, hash_node)
4062 io_poll_remove_one(req);
221c5eb2
JA
4063 }
4064 spin_unlock_irq(&ctx->completion_lock);
b41e9852
JA
4065
4066 io_cqring_ev_posted(ctx);
221c5eb2
JA
4067}
4068
47f46768
JA
4069static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
4070{
78076bb6 4071 struct hlist_head *list;
47f46768
JA
4072 struct io_kiocb *req;
4073
78076bb6
JA
4074 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
4075 hlist_for_each_entry(req, list, hash_node) {
b41e9852
JA
4076 if (sqe_addr != req->user_data)
4077 continue;
4078 if (io_poll_remove_one(req))
eac406c6 4079 return 0;
b41e9852 4080 return -EALREADY;
47f46768
JA
4081 }
4082
4083 return -ENOENT;
4084}
4085
3529d8c2
JA
4086static int io_poll_remove_prep(struct io_kiocb *req,
4087 const struct io_uring_sqe *sqe)
0969e783 4088{
0969e783
JA
4089 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4090 return -EINVAL;
4091 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
4092 sqe->poll_events)
4093 return -EINVAL;
4094
4095 req->poll.addr = READ_ONCE(sqe->addr);
0969e783
JA
4096 return 0;
4097}
4098
221c5eb2
JA
4099/*
4100 * Find a running poll command that matches one specified in sqe->addr,
4101 * and remove it if found.
4102 */
fc4df999 4103static int io_poll_remove(struct io_kiocb *req)
221c5eb2
JA
4104{
4105 struct io_ring_ctx *ctx = req->ctx;
0969e783 4106 u64 addr;
47f46768 4107 int ret;
221c5eb2 4108
0969e783 4109 addr = req->poll.addr;
221c5eb2 4110 spin_lock_irq(&ctx->completion_lock);
0969e783 4111 ret = io_poll_cancel(ctx, addr);
221c5eb2
JA
4112 spin_unlock_irq(&ctx->completion_lock);
4113
78e19bbe 4114 io_cqring_add_event(req, ret);
4e88d6e7
JA
4115 if (ret < 0)
4116 req_set_fail_links(req);
e65ef56d 4117 io_put_req(req);
221c5eb2
JA
4118 return 0;
4119}
4120
b0dd8a41 4121static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
221c5eb2 4122{
a197f664
JL
4123 struct io_ring_ctx *ctx = req->ctx;
4124
8c838788 4125 req->poll.done = true;
b0a20349 4126 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
8c838788 4127 io_commit_cqring(ctx);
221c5eb2
JA
4128}
4129
b41e9852 4130static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
221c5eb2 4131{
221c5eb2 4132 struct io_ring_ctx *ctx = req->ctx;
221c5eb2 4133
221c5eb2 4134 spin_lock_irq(&ctx->completion_lock);
78076bb6 4135 hash_del(&req->hash_node);
b41e9852
JA
4136 io_poll_complete(req, req->result, 0);
4137 req->flags |= REQ_F_COMP_LOCKED;
4138 io_put_req_find_next(req, nxt);
e94f141b
JA
4139 spin_unlock_irq(&ctx->completion_lock);
4140
4141 io_cqring_ev_posted(ctx);
e94f141b
JA
4142}
4143
b41e9852 4144static void io_poll_task_func(struct callback_head *cb)
f0b493e6 4145{
b41e9852
JA
4146 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4147 struct io_kiocb *nxt = NULL;
f0b493e6 4148
b41e9852 4149 io_poll_task_handler(req, &nxt);
d7718a9d
JA
4150 if (nxt) {
4151 struct io_ring_ctx *ctx = nxt->ctx;
4152
4153 mutex_lock(&ctx->uring_lock);
b41e9852 4154 __io_queue_sqe(nxt, NULL);
d7718a9d
JA
4155 mutex_unlock(&ctx->uring_lock);
4156 }
f0b493e6
JA
4157}
4158
221c5eb2
JA
4159static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4160 void *key)
4161{
c2f2eb7d
JA
4162 struct io_kiocb *req = wait->private;
4163 struct io_poll_iocb *poll = &req->poll;
221c5eb2 4164
d7718a9d 4165 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
221c5eb2
JA
4166}
4167
221c5eb2
JA
4168static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
4169 struct poll_table_struct *p)
4170{
4171 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4172
d7718a9d 4173 __io_queue_proc(&pt->req->poll, pt, head);
eac406c6
JA
4174}
4175
3529d8c2 4176static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
4177{
4178 struct io_poll_iocb *poll = &req->poll;
221c5eb2 4179 u16 events;
221c5eb2
JA
4180
4181 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4182 return -EINVAL;
4183 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
4184 return -EINVAL;
09bb8394
JA
4185 if (!poll->file)
4186 return -EBADF;
221c5eb2 4187
221c5eb2
JA
4188 events = READ_ONCE(sqe->poll_events);
4189 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
b41e9852 4190
d7718a9d
JA
4191 /*
4192 * Don't need a reference here, as we're adding it to the task
4193 * task_works list. If the task exits, the list is pruned.
4194 */
b41e9852 4195 req->task = current;
0969e783
JA
4196 return 0;
4197}
4198
014db007 4199static int io_poll_add(struct io_kiocb *req)
0969e783
JA
4200{
4201 struct io_poll_iocb *poll = &req->poll;
4202 struct io_ring_ctx *ctx = req->ctx;
4203 struct io_poll_table ipt;
0969e783 4204 __poll_t mask;
0969e783 4205
78076bb6 4206 INIT_HLIST_NODE(&req->hash_node);
36703247 4207 INIT_LIST_HEAD(&req->list);
d7718a9d 4208 ipt.pt._qproc = io_poll_queue_proc;
36703247 4209
d7718a9d
JA
4210 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
4211 io_poll_wake);
221c5eb2 4212
8c838788 4213 if (mask) { /* no async, we'd stolen it */
221c5eb2 4214 ipt.error = 0;
b0dd8a41 4215 io_poll_complete(req, mask, 0);
221c5eb2 4216 }
221c5eb2
JA
4217 spin_unlock_irq(&ctx->completion_lock);
4218
8c838788
JA
4219 if (mask) {
4220 io_cqring_ev_posted(ctx);
014db007 4221 io_put_req(req);
221c5eb2 4222 }
8c838788 4223 return ipt.error;
221c5eb2
JA
4224}
4225
5262f567
JA
4226static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
4227{
ad8a48ac
JA
4228 struct io_timeout_data *data = container_of(timer,
4229 struct io_timeout_data, timer);
4230 struct io_kiocb *req = data->req;
4231 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
4232 unsigned long flags;
4233
5262f567
JA
4234 atomic_inc(&ctx->cq_timeouts);
4235
4236 spin_lock_irqsave(&ctx->completion_lock, flags);
ef03681a 4237 /*
11365043
JA
4238 * We could be racing with timeout deletion. If the list is empty,
4239 * then timeout lookup already found it and will be handling it.
ef03681a 4240 */
842f9612 4241 if (!list_empty(&req->list)) {
11365043 4242 struct io_kiocb *prev;
5262f567 4243
11365043
JA
4244 /*
4245 * Adjust the reqs sequence before the current one because it
d195a66e 4246 * will consume a slot in the cq_ring and the cq_tail
11365043
JA
4247 * pointer will be increased, otherwise other timeout reqs may
4248 * return in advance without waiting for enough wait_nr.
4249 */
4250 prev = req;
4251 list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
4252 prev->sequence++;
11365043 4253 list_del_init(&req->list);
11365043 4254 }
5262f567 4255
78e19bbe 4256 io_cqring_fill_event(req, -ETIME);
5262f567
JA
4257 io_commit_cqring(ctx);
4258 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4259
4260 io_cqring_ev_posted(ctx);
4e88d6e7 4261 req_set_fail_links(req);
5262f567
JA
4262 io_put_req(req);
4263 return HRTIMER_NORESTART;
4264}
4265
47f46768
JA
4266static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
4267{
4268 struct io_kiocb *req;
4269 int ret = -ENOENT;
4270
4271 list_for_each_entry(req, &ctx->timeout_list, list) {
4272 if (user_data == req->user_data) {
4273 list_del_init(&req->list);
4274 ret = 0;
4275 break;
4276 }
4277 }
4278
4279 if (ret == -ENOENT)
4280 return ret;
4281
2d28390a 4282 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
47f46768
JA
4283 if (ret == -1)
4284 return -EALREADY;
4285
4e88d6e7 4286 req_set_fail_links(req);
47f46768
JA
4287 io_cqring_fill_event(req, -ECANCELED);
4288 io_put_req(req);
4289 return 0;
4290}
4291
3529d8c2
JA
4292static int io_timeout_remove_prep(struct io_kiocb *req,
4293 const struct io_uring_sqe *sqe)
b29472ee 4294{
b29472ee
JA
4295 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4296 return -EINVAL;
4297 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
4298 return -EINVAL;
4299
4300 req->timeout.addr = READ_ONCE(sqe->addr);
4301 req->timeout.flags = READ_ONCE(sqe->timeout_flags);
4302 if (req->timeout.flags)
4303 return -EINVAL;
4304
b29472ee
JA
4305 return 0;
4306}
4307
11365043
JA
4308/*
4309 * Remove or update an existing timeout command
4310 */
fc4df999 4311static int io_timeout_remove(struct io_kiocb *req)
11365043
JA
4312{
4313 struct io_ring_ctx *ctx = req->ctx;
47f46768 4314 int ret;
11365043 4315
11365043 4316 spin_lock_irq(&ctx->completion_lock);
b29472ee 4317 ret = io_timeout_cancel(ctx, req->timeout.addr);
11365043 4318
47f46768 4319 io_cqring_fill_event(req, ret);
11365043
JA
4320 io_commit_cqring(ctx);
4321 spin_unlock_irq(&ctx->completion_lock);
5262f567 4322 io_cqring_ev_posted(ctx);
4e88d6e7
JA
4323 if (ret < 0)
4324 req_set_fail_links(req);
ec9c02ad 4325 io_put_req(req);
11365043 4326 return 0;
5262f567
JA
4327}
4328
3529d8c2 4329static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2d28390a 4330 bool is_timeout_link)
5262f567 4331{
ad8a48ac 4332 struct io_timeout_data *data;
a41525ab 4333 unsigned flags;
5262f567 4334
ad8a48ac 4335 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5262f567 4336 return -EINVAL;
ad8a48ac 4337 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
a41525ab 4338 return -EINVAL;
2d28390a
JA
4339 if (sqe->off && is_timeout_link)
4340 return -EINVAL;
a41525ab
JA
4341 flags = READ_ONCE(sqe->timeout_flags);
4342 if (flags & ~IORING_TIMEOUT_ABS)
5262f567 4343 return -EINVAL;
bdf20073 4344
26a61679
JA
4345 req->timeout.count = READ_ONCE(sqe->off);
4346
3529d8c2 4347 if (!req->io && io_alloc_async_ctx(req))
26a61679
JA
4348 return -ENOMEM;
4349
4350 data = &req->io->timeout;
ad8a48ac 4351 data->req = req;
ad8a48ac
JA
4352 req->flags |= REQ_F_TIMEOUT;
4353
4354 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
4355 return -EFAULT;
4356
11365043 4357 if (flags & IORING_TIMEOUT_ABS)
ad8a48ac 4358 data->mode = HRTIMER_MODE_ABS;
11365043 4359 else
ad8a48ac 4360 data->mode = HRTIMER_MODE_REL;
11365043 4361
ad8a48ac
JA
4362 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
4363 return 0;
4364}
4365
fc4df999 4366static int io_timeout(struct io_kiocb *req)
ad8a48ac
JA
4367{
4368 unsigned count;
4369 struct io_ring_ctx *ctx = req->ctx;
4370 struct io_timeout_data *data;
4371 struct list_head *entry;
4372 unsigned span = 0;
ad8a48ac 4373
2d28390a 4374 data = &req->io->timeout;
93bd25bb 4375
5262f567
JA
4376 /*
4377 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
4378 * timeout event to be satisfied. If it isn't set, then this is
4379 * a pure timeout request, sequence isn't used.
5262f567 4380 */
26a61679 4381 count = req->timeout.count;
93bd25bb
JA
4382 if (!count) {
4383 req->flags |= REQ_F_TIMEOUT_NOSEQ;
4384 spin_lock_irq(&ctx->completion_lock);
4385 entry = ctx->timeout_list.prev;
4386 goto add;
4387 }
5262f567
JA
4388
4389 req->sequence = ctx->cached_sq_head + count - 1;
2d28390a 4390 data->seq_offset = count;
5262f567
JA
4391
4392 /*
4393 * Insertion sort, ensuring the first entry in the list is always
4394 * the one we need first.
4395 */
5262f567
JA
4396 spin_lock_irq(&ctx->completion_lock);
4397 list_for_each_prev(entry, &ctx->timeout_list) {
4398 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
5da0fb1a 4399 unsigned nxt_sq_head;
4400 long long tmp, tmp_nxt;
2d28390a 4401 u32 nxt_offset = nxt->io->timeout.seq_offset;
5262f567 4402
93bd25bb
JA
4403 if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
4404 continue;
4405
5da0fb1a 4406 /*
4407 * Since cached_sq_head + count - 1 can overflow, use type long
4408 * long to store it.
4409 */
4410 tmp = (long long)ctx->cached_sq_head + count - 1;
cc42e0ac
PB
4411 nxt_sq_head = nxt->sequence - nxt_offset + 1;
4412 tmp_nxt = (long long)nxt_sq_head + nxt_offset - 1;
5da0fb1a 4413
4414 /*
4415 * cached_sq_head may overflow, and it will never overflow twice
4416 * once there is some timeout req still be valid.
4417 */
4418 if (ctx->cached_sq_head < nxt_sq_head)
8b07a65a 4419 tmp += UINT_MAX;
5da0fb1a 4420
a1f58ba4 4421 if (tmp > tmp_nxt)
5262f567 4422 break;
a1f58ba4 4423
4424 /*
4425 * Sequence of reqs after the insert one and itself should
4426 * be adjusted because each timeout req consumes a slot.
4427 */
4428 span++;
4429 nxt->sequence++;
5262f567 4430 }
a1f58ba4 4431 req->sequence -= span;
93bd25bb 4432add:
5262f567 4433 list_add(&req->list, entry);
ad8a48ac
JA
4434 data->timer.function = io_timeout_fn;
4435 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5262f567 4436 spin_unlock_irq(&ctx->completion_lock);
5262f567
JA
4437 return 0;
4438}
5262f567 4439
62755e35
JA
4440static bool io_cancel_cb(struct io_wq_work *work, void *data)
4441{
4442 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
4443
4444 return req->user_data == (unsigned long) data;
4445}
4446
e977d6d3 4447static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
62755e35 4448{
62755e35 4449 enum io_wq_cancel cancel_ret;
62755e35
JA
4450 int ret = 0;
4451
62755e35
JA
4452 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
4453 switch (cancel_ret) {
4454 case IO_WQ_CANCEL_OK:
4455 ret = 0;
4456 break;
4457 case IO_WQ_CANCEL_RUNNING:
4458 ret = -EALREADY;
4459 break;
4460 case IO_WQ_CANCEL_NOTFOUND:
4461 ret = -ENOENT;
4462 break;
4463 }
4464
e977d6d3
JA
4465 return ret;
4466}
4467
47f46768
JA
4468static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
4469 struct io_kiocb *req, __u64 sqe_addr,
014db007 4470 int success_ret)
47f46768
JA
4471{
4472 unsigned long flags;
4473 int ret;
4474
4475 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
4476 if (ret != -ENOENT) {
4477 spin_lock_irqsave(&ctx->completion_lock, flags);
4478 goto done;
4479 }
4480
4481 spin_lock_irqsave(&ctx->completion_lock, flags);
4482 ret = io_timeout_cancel(ctx, sqe_addr);
4483 if (ret != -ENOENT)
4484 goto done;
4485 ret = io_poll_cancel(ctx, sqe_addr);
4486done:
b0dd8a41
JA
4487 if (!ret)
4488 ret = success_ret;
47f46768
JA
4489 io_cqring_fill_event(req, ret);
4490 io_commit_cqring(ctx);
4491 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4492 io_cqring_ev_posted(ctx);
4493
4e88d6e7
JA
4494 if (ret < 0)
4495 req_set_fail_links(req);
014db007 4496 io_put_req(req);
47f46768
JA
4497}
4498
3529d8c2
JA
4499static int io_async_cancel_prep(struct io_kiocb *req,
4500 const struct io_uring_sqe *sqe)
e977d6d3 4501{
fbf23849 4502 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
e977d6d3
JA
4503 return -EINVAL;
4504 if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
4505 sqe->cancel_flags)
4506 return -EINVAL;
4507
fbf23849
JA
4508 req->cancel.addr = READ_ONCE(sqe->addr);
4509 return 0;
4510}
4511
014db007 4512static int io_async_cancel(struct io_kiocb *req)
fbf23849
JA
4513{
4514 struct io_ring_ctx *ctx = req->ctx;
fbf23849 4515
014db007 4516 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
5262f567
JA
4517 return 0;
4518}
4519
05f3fb3c
JA
4520static int io_files_update_prep(struct io_kiocb *req,
4521 const struct io_uring_sqe *sqe)
4522{
4523 if (sqe->flags || sqe->ioprio || sqe->rw_flags)
4524 return -EINVAL;
4525
4526 req->files_update.offset = READ_ONCE(sqe->off);
4527 req->files_update.nr_args = READ_ONCE(sqe->len);
4528 if (!req->files_update.nr_args)
4529 return -EINVAL;
4530 req->files_update.arg = READ_ONCE(sqe->addr);
4531 return 0;
4532}
4533
4534static int io_files_update(struct io_kiocb *req, bool force_nonblock)
fbf23849
JA
4535{
4536 struct io_ring_ctx *ctx = req->ctx;
05f3fb3c
JA
4537 struct io_uring_files_update up;
4538 int ret;
fbf23849 4539
f86cd20c 4540 if (force_nonblock)
05f3fb3c 4541 return -EAGAIN;
05f3fb3c
JA
4542
4543 up.offset = req->files_update.offset;
4544 up.fds = req->files_update.arg;
4545
4546 mutex_lock(&ctx->uring_lock);
4547 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
4548 mutex_unlock(&ctx->uring_lock);
4549
4550 if (ret < 0)
4551 req_set_fail_links(req);
4552 io_cqring_add_event(req, ret);
4553 io_put_req(req);
5262f567
JA
4554 return 0;
4555}
4556
3529d8c2
JA
4557static int io_req_defer_prep(struct io_kiocb *req,
4558 const struct io_uring_sqe *sqe)
f67676d1 4559{
e781573e 4560 ssize_t ret = 0;
f67676d1 4561
f86cd20c
JA
4562 if (io_op_defs[req->opcode].file_table) {
4563 ret = io_grab_files(req);
4564 if (unlikely(ret))
4565 return ret;
4566 }
4567
cccf0ee8
JA
4568 io_req_work_grab_env(req, &io_op_defs[req->opcode]);
4569
d625c6ee 4570 switch (req->opcode) {
e781573e
JA
4571 case IORING_OP_NOP:
4572 break;
f67676d1
JA
4573 case IORING_OP_READV:
4574 case IORING_OP_READ_FIXED:
3a6820f2 4575 case IORING_OP_READ:
3529d8c2 4576 ret = io_read_prep(req, sqe, true);
f67676d1
JA
4577 break;
4578 case IORING_OP_WRITEV:
4579 case IORING_OP_WRITE_FIXED:
3a6820f2 4580 case IORING_OP_WRITE:
3529d8c2 4581 ret = io_write_prep(req, sqe, true);
f67676d1 4582 break;
0969e783 4583 case IORING_OP_POLL_ADD:
3529d8c2 4584 ret = io_poll_add_prep(req, sqe);
0969e783
JA
4585 break;
4586 case IORING_OP_POLL_REMOVE:
3529d8c2 4587 ret = io_poll_remove_prep(req, sqe);
0969e783 4588 break;
8ed8d3c3 4589 case IORING_OP_FSYNC:
3529d8c2 4590 ret = io_prep_fsync(req, sqe);
8ed8d3c3
JA
4591 break;
4592 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2 4593 ret = io_prep_sfr(req, sqe);
8ed8d3c3 4594 break;
03b1230c 4595 case IORING_OP_SENDMSG:
fddaface 4596 case IORING_OP_SEND:
3529d8c2 4597 ret = io_sendmsg_prep(req, sqe);
03b1230c
JA
4598 break;
4599 case IORING_OP_RECVMSG:
fddaface 4600 case IORING_OP_RECV:
3529d8c2 4601 ret = io_recvmsg_prep(req, sqe);
03b1230c 4602 break;
f499a021 4603 case IORING_OP_CONNECT:
3529d8c2 4604 ret = io_connect_prep(req, sqe);
f499a021 4605 break;
2d28390a 4606 case IORING_OP_TIMEOUT:
3529d8c2 4607 ret = io_timeout_prep(req, sqe, false);
b7bb4f7d 4608 break;
b29472ee 4609 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2 4610 ret = io_timeout_remove_prep(req, sqe);
b29472ee 4611 break;
fbf23849 4612 case IORING_OP_ASYNC_CANCEL:
3529d8c2 4613 ret = io_async_cancel_prep(req, sqe);
fbf23849 4614 break;
2d28390a 4615 case IORING_OP_LINK_TIMEOUT:
3529d8c2 4616 ret = io_timeout_prep(req, sqe, true);
b7bb4f7d 4617 break;
8ed8d3c3 4618 case IORING_OP_ACCEPT:
3529d8c2 4619 ret = io_accept_prep(req, sqe);
8ed8d3c3 4620 break;
d63d1b5e
JA
4621 case IORING_OP_FALLOCATE:
4622 ret = io_fallocate_prep(req, sqe);
4623 break;
15b71abe
JA
4624 case IORING_OP_OPENAT:
4625 ret = io_openat_prep(req, sqe);
4626 break;
b5dba59e
JA
4627 case IORING_OP_CLOSE:
4628 ret = io_close_prep(req, sqe);
4629 break;
05f3fb3c
JA
4630 case IORING_OP_FILES_UPDATE:
4631 ret = io_files_update_prep(req, sqe);
4632 break;
eddc7ef5
JA
4633 case IORING_OP_STATX:
4634 ret = io_statx_prep(req, sqe);
4635 break;
4840e418
JA
4636 case IORING_OP_FADVISE:
4637 ret = io_fadvise_prep(req, sqe);
4638 break;
c1ca757b
JA
4639 case IORING_OP_MADVISE:
4640 ret = io_madvise_prep(req, sqe);
4641 break;
cebdb986
JA
4642 case IORING_OP_OPENAT2:
4643 ret = io_openat2_prep(req, sqe);
4644 break;
3e4827b0
JA
4645 case IORING_OP_EPOLL_CTL:
4646 ret = io_epoll_ctl_prep(req, sqe);
4647 break;
7d67af2c
PB
4648 case IORING_OP_SPLICE:
4649 ret = io_splice_prep(req, sqe);
4650 break;
ddf0322d
JA
4651 case IORING_OP_PROVIDE_BUFFERS:
4652 ret = io_provide_buffers_prep(req, sqe);
4653 break;
f67676d1 4654 default:
e781573e
JA
4655 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
4656 req->opcode);
4657 ret = -EINVAL;
b7bb4f7d 4658 break;
f67676d1
JA
4659 }
4660
b7bb4f7d 4661 return ret;
f67676d1
JA
4662}
4663
3529d8c2 4664static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
de0617e4 4665{
a197f664 4666 struct io_ring_ctx *ctx = req->ctx;
f67676d1 4667 int ret;
de0617e4 4668
9d858b21
BL
4669 /* Still need defer if there is pending req in defer list. */
4670 if (!req_need_defer(req) && list_empty(&ctx->defer_list))
de0617e4
JA
4671 return 0;
4672
3529d8c2 4673 if (!req->io && io_alloc_async_ctx(req))
de0617e4
JA
4674 return -EAGAIN;
4675
3529d8c2 4676 ret = io_req_defer_prep(req, sqe);
b7bb4f7d 4677 if (ret < 0)
2d28390a 4678 return ret;
2d28390a 4679
de0617e4 4680 spin_lock_irq(&ctx->completion_lock);
9d858b21 4681 if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
de0617e4 4682 spin_unlock_irq(&ctx->completion_lock);
de0617e4
JA
4683 return 0;
4684 }
4685
915967f6 4686 trace_io_uring_defer(ctx, req, req->user_data);
de0617e4
JA
4687 list_add_tail(&req->list, &ctx->defer_list);
4688 spin_unlock_irq(&ctx->completion_lock);
4689 return -EIOCBQUEUED;
4690}
4691
99bc4c38
PB
4692static void io_cleanup_req(struct io_kiocb *req)
4693{
4694 struct io_async_ctx *io = req->io;
4695
4696 switch (req->opcode) {
4697 case IORING_OP_READV:
4698 case IORING_OP_READ_FIXED:
4699 case IORING_OP_READ:
bcda7baa
JA
4700 if (req->flags & REQ_F_BUFFER_SELECTED)
4701 kfree((void *)(unsigned long)req->rw.addr);
4702 /* fallthrough */
99bc4c38
PB
4703 case IORING_OP_WRITEV:
4704 case IORING_OP_WRITE_FIXED:
4705 case IORING_OP_WRITE:
4706 if (io->rw.iov != io->rw.fast_iov)
4707 kfree(io->rw.iov);
4708 break;
4709 case IORING_OP_SENDMSG:
4710 case IORING_OP_RECVMSG:
4711 if (io->msg.iov != io->msg.fast_iov)
4712 kfree(io->msg.iov);
4713 break;
bcda7baa
JA
4714 case IORING_OP_RECV:
4715 if (req->flags & REQ_F_BUFFER_SELECTED)
4716 kfree(req->sr_msg.kbuf);
4717 break;
8fef80bf
PB
4718 case IORING_OP_OPENAT:
4719 case IORING_OP_OPENAT2:
4720 case IORING_OP_STATX:
4721 putname(req->open.filename);
4722 break;
7d67af2c
PB
4723 case IORING_OP_SPLICE:
4724 io_put_file(req, req->splice.file_in,
4725 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
4726 break;
99bc4c38
PB
4727 }
4728
4729 req->flags &= ~REQ_F_NEED_CLEANUP;
4730}
4731
3529d8c2 4732static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
014db007 4733 bool force_nonblock)
2b188cc1 4734{
a197f664 4735 struct io_ring_ctx *ctx = req->ctx;
d625c6ee 4736 int ret;
2b188cc1 4737
d625c6ee 4738 switch (req->opcode) {
2b188cc1 4739 case IORING_OP_NOP:
78e19bbe 4740 ret = io_nop(req);
2b188cc1
JA
4741 break;
4742 case IORING_OP_READV:
edafccee 4743 case IORING_OP_READ_FIXED:
3a6820f2 4744 case IORING_OP_READ:
3529d8c2
JA
4745 if (sqe) {
4746 ret = io_read_prep(req, sqe, force_nonblock);
4747 if (ret < 0)
4748 break;
4749 }
014db007 4750 ret = io_read(req, force_nonblock);
edafccee 4751 break;
3529d8c2 4752 case IORING_OP_WRITEV:
edafccee 4753 case IORING_OP_WRITE_FIXED:
3a6820f2 4754 case IORING_OP_WRITE:
3529d8c2
JA
4755 if (sqe) {
4756 ret = io_write_prep(req, sqe, force_nonblock);
4757 if (ret < 0)
4758 break;
4759 }
014db007 4760 ret = io_write(req, force_nonblock);
2b188cc1 4761 break;
c992fe29 4762 case IORING_OP_FSYNC:
3529d8c2
JA
4763 if (sqe) {
4764 ret = io_prep_fsync(req, sqe);
4765 if (ret < 0)
4766 break;
4767 }
014db007 4768 ret = io_fsync(req, force_nonblock);
c992fe29 4769 break;
221c5eb2 4770 case IORING_OP_POLL_ADD:
3529d8c2
JA
4771 if (sqe) {
4772 ret = io_poll_add_prep(req, sqe);
4773 if (ret)
4774 break;
4775 }
014db007 4776 ret = io_poll_add(req);
221c5eb2
JA
4777 break;
4778 case IORING_OP_POLL_REMOVE:
3529d8c2
JA
4779 if (sqe) {
4780 ret = io_poll_remove_prep(req, sqe);
4781 if (ret < 0)
4782 break;
4783 }
fc4df999 4784 ret = io_poll_remove(req);
221c5eb2 4785 break;
5d17b4a4 4786 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2
JA
4787 if (sqe) {
4788 ret = io_prep_sfr(req, sqe);
4789 if (ret < 0)
4790 break;
4791 }
014db007 4792 ret = io_sync_file_range(req, force_nonblock);
5d17b4a4 4793 break;
0fa03c62 4794 case IORING_OP_SENDMSG:
fddaface 4795 case IORING_OP_SEND:
3529d8c2
JA
4796 if (sqe) {
4797 ret = io_sendmsg_prep(req, sqe);
4798 if (ret < 0)
4799 break;
4800 }
fddaface 4801 if (req->opcode == IORING_OP_SENDMSG)
014db007 4802 ret = io_sendmsg(req, force_nonblock);
fddaface 4803 else
014db007 4804 ret = io_send(req, force_nonblock);
0fa03c62 4805 break;
aa1fa28f 4806 case IORING_OP_RECVMSG:
fddaface 4807 case IORING_OP_RECV:
3529d8c2
JA
4808 if (sqe) {
4809 ret = io_recvmsg_prep(req, sqe);
4810 if (ret)
4811 break;
4812 }
fddaface 4813 if (req->opcode == IORING_OP_RECVMSG)
014db007 4814 ret = io_recvmsg(req, force_nonblock);
fddaface 4815 else
014db007 4816 ret = io_recv(req, force_nonblock);
aa1fa28f 4817 break;
5262f567 4818 case IORING_OP_TIMEOUT:
3529d8c2
JA
4819 if (sqe) {
4820 ret = io_timeout_prep(req, sqe, false);
4821 if (ret)
4822 break;
4823 }
fc4df999 4824 ret = io_timeout(req);
5262f567 4825 break;
11365043 4826 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2
JA
4827 if (sqe) {
4828 ret = io_timeout_remove_prep(req, sqe);
4829 if (ret)
4830 break;
4831 }
fc4df999 4832 ret = io_timeout_remove(req);
11365043 4833 break;
17f2fe35 4834 case IORING_OP_ACCEPT:
3529d8c2
JA
4835 if (sqe) {
4836 ret = io_accept_prep(req, sqe);
4837 if (ret)
4838 break;
4839 }
014db007 4840 ret = io_accept(req, force_nonblock);
17f2fe35 4841 break;
f8e85cf2 4842 case IORING_OP_CONNECT:
3529d8c2
JA
4843 if (sqe) {
4844 ret = io_connect_prep(req, sqe);
4845 if (ret)
4846 break;
4847 }
014db007 4848 ret = io_connect(req, force_nonblock);
f8e85cf2 4849 break;
62755e35 4850 case IORING_OP_ASYNC_CANCEL:
3529d8c2
JA
4851 if (sqe) {
4852 ret = io_async_cancel_prep(req, sqe);
4853 if (ret)
4854 break;
4855 }
014db007 4856 ret = io_async_cancel(req);
62755e35 4857 break;
d63d1b5e
JA
4858 case IORING_OP_FALLOCATE:
4859 if (sqe) {
4860 ret = io_fallocate_prep(req, sqe);
4861 if (ret)
4862 break;
4863 }
014db007 4864 ret = io_fallocate(req, force_nonblock);
d63d1b5e 4865 break;
15b71abe
JA
4866 case IORING_OP_OPENAT:
4867 if (sqe) {
4868 ret = io_openat_prep(req, sqe);
4869 if (ret)
4870 break;
4871 }
014db007 4872 ret = io_openat(req, force_nonblock);
15b71abe 4873 break;
b5dba59e
JA
4874 case IORING_OP_CLOSE:
4875 if (sqe) {
4876 ret = io_close_prep(req, sqe);
4877 if (ret)
4878 break;
4879 }
014db007 4880 ret = io_close(req, force_nonblock);
b5dba59e 4881 break;
05f3fb3c
JA
4882 case IORING_OP_FILES_UPDATE:
4883 if (sqe) {
4884 ret = io_files_update_prep(req, sqe);
4885 if (ret)
4886 break;
4887 }
4888 ret = io_files_update(req, force_nonblock);
4889 break;
eddc7ef5
JA
4890 case IORING_OP_STATX:
4891 if (sqe) {
4892 ret = io_statx_prep(req, sqe);
4893 if (ret)
4894 break;
4895 }
014db007 4896 ret = io_statx(req, force_nonblock);
eddc7ef5 4897 break;
4840e418
JA
4898 case IORING_OP_FADVISE:
4899 if (sqe) {
4900 ret = io_fadvise_prep(req, sqe);
4901 if (ret)
4902 break;
4903 }
014db007 4904 ret = io_fadvise(req, force_nonblock);
4840e418 4905 break;
c1ca757b
JA
4906 case IORING_OP_MADVISE:
4907 if (sqe) {
4908 ret = io_madvise_prep(req, sqe);
4909 if (ret)
4910 break;
4911 }
014db007 4912 ret = io_madvise(req, force_nonblock);
c1ca757b 4913 break;
cebdb986
JA
4914 case IORING_OP_OPENAT2:
4915 if (sqe) {
4916 ret = io_openat2_prep(req, sqe);
4917 if (ret)
4918 break;
4919 }
014db007 4920 ret = io_openat2(req, force_nonblock);
cebdb986 4921 break;
3e4827b0
JA
4922 case IORING_OP_EPOLL_CTL:
4923 if (sqe) {
4924 ret = io_epoll_ctl_prep(req, sqe);
4925 if (ret)
4926 break;
4927 }
014db007 4928 ret = io_epoll_ctl(req, force_nonblock);
3e4827b0 4929 break;
7d67af2c
PB
4930 case IORING_OP_SPLICE:
4931 if (sqe) {
4932 ret = io_splice_prep(req, sqe);
4933 if (ret < 0)
4934 break;
4935 }
014db007 4936 ret = io_splice(req, force_nonblock);
7d67af2c 4937 break;
ddf0322d
JA
4938 case IORING_OP_PROVIDE_BUFFERS:
4939 if (sqe) {
4940 ret = io_provide_buffers_prep(req, sqe);
4941 if (ret)
4942 break;
4943 }
4944 ret = io_provide_buffers(req, force_nonblock);
4945 break;
2b188cc1
JA
4946 default:
4947 ret = -EINVAL;
4948 break;
4949 }
4950
def596e9
JA
4951 if (ret)
4952 return ret;
4953
4954 if (ctx->flags & IORING_SETUP_IOPOLL) {
11ba820b
JA
4955 const bool in_async = io_wq_current_is_worker();
4956
9e645e11 4957 if (req->result == -EAGAIN)
def596e9
JA
4958 return -EAGAIN;
4959
11ba820b
JA
4960 /* workqueue context doesn't hold uring_lock, grab it now */
4961 if (in_async)
4962 mutex_lock(&ctx->uring_lock);
4963
def596e9 4964 io_iopoll_req_issued(req);
11ba820b
JA
4965
4966 if (in_async)
4967 mutex_unlock(&ctx->uring_lock);
def596e9
JA
4968 }
4969
4970 return 0;
2b188cc1
JA
4971}
4972
561fb04a 4973static void io_wq_submit_work(struct io_wq_work **workptr)
2b188cc1 4974{
561fb04a 4975 struct io_wq_work *work = *workptr;
2b188cc1 4976 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
561fb04a 4977 int ret = 0;
2b188cc1 4978
0c9d5ccd
JA
4979 /* if NO_CANCEL is set, we must still run the work */
4980 if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
4981 IO_WQ_WORK_CANCEL) {
561fb04a 4982 ret = -ECANCELED;
0c9d5ccd 4983 }
31b51510 4984
561fb04a 4985 if (!ret) {
561fb04a 4986 do {
014db007 4987 ret = io_issue_sqe(req, NULL, false);
561fb04a
JA
4988 /*
4989 * We can get EAGAIN for polled IO even though we're
4990 * forcing a sync submission from here, since we can't
4991 * wait for request slots on the block side.
4992 */
4993 if (ret != -EAGAIN)
4994 break;
4995 cond_resched();
4996 } while (1);
4997 }
31b51510 4998
561fb04a 4999 if (ret) {
4e88d6e7 5000 req_set_fail_links(req);
78e19bbe 5001 io_cqring_add_event(req, ret);
817869d2 5002 io_put_req(req);
edafccee 5003 }
2b188cc1 5004
e9fd9396 5005 io_steal_work(req, workptr);
2b188cc1
JA
5006}
5007
15b71abe 5008static int io_req_needs_file(struct io_kiocb *req, int fd)
9e3aa61a 5009{
d3656344 5010 if (!io_op_defs[req->opcode].needs_file)
9e3aa61a 5011 return 0;
0b5faf6b 5012 if ((fd == -1 || fd == AT_FDCWD) && io_op_defs[req->opcode].fd_non_neg)
d3656344
JA
5013 return 0;
5014 return 1;
09bb8394
JA
5015}
5016
65e19f54
JA
5017static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
5018 int index)
5019{
5020 struct fixed_file_table *table;
5021
05f3fb3c
JA
5022 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
5023 return table->files[index & IORING_FILE_TABLE_MASK];;
65e19f54
JA
5024}
5025
8da11c19
PB
5026static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
5027 int fd, struct file **out_file, bool fixed)
09bb8394 5028{
a197f664 5029 struct io_ring_ctx *ctx = req->ctx;
8da11c19 5030 struct file *file;
09bb8394 5031
8da11c19 5032 if (fixed) {
05f3fb3c 5033 if (unlikely(!ctx->file_data ||
09bb8394
JA
5034 (unsigned) fd >= ctx->nr_user_files))
5035 return -EBADF;
b7620121 5036 fd = array_index_nospec(fd, ctx->nr_user_files);
8da11c19
PB
5037 file = io_file_from_index(ctx, fd);
5038 if (!file)
08a45173 5039 return -EBADF;
05f3fb3c 5040 percpu_ref_get(&ctx->file_data->refs);
09bb8394 5041 } else {
c826bd7a 5042 trace_io_uring_file_get(ctx, fd);
8da11c19
PB
5043 file = __io_file_get(state, fd);
5044 if (unlikely(!file))
09bb8394
JA
5045 return -EBADF;
5046 }
5047
8da11c19 5048 *out_file = file;
09bb8394
JA
5049 return 0;
5050}
5051
8da11c19
PB
5052static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
5053 const struct io_uring_sqe *sqe)
5054{
5055 unsigned flags;
5056 int fd;
5057 bool fixed;
5058
5059 flags = READ_ONCE(sqe->flags);
5060 fd = READ_ONCE(sqe->fd);
5061
5062 if (!io_req_needs_file(req, fd))
5063 return 0;
5064
5065 fixed = (flags & IOSQE_FIXED_FILE);
5066 if (unlikely(!fixed && req->needs_fixed_file))
5067 return -EBADF;
5068
5069 return io_file_get(state, req, fd, &req->file, fixed);
5070}
5071
a197f664 5072static int io_grab_files(struct io_kiocb *req)
fcb323cc
JA
5073{
5074 int ret = -EBADF;
a197f664 5075 struct io_ring_ctx *ctx = req->ctx;
fcb323cc 5076
f86cd20c
JA
5077 if (req->work.files)
5078 return 0;
b14cca0c 5079 if (!ctx->ring_file)
b5dba59e
JA
5080 return -EBADF;
5081
fcb323cc
JA
5082 rcu_read_lock();
5083 spin_lock_irq(&ctx->inflight_lock);
5084 /*
5085 * We use the f_ops->flush() handler to ensure that we can flush
5086 * out work accessing these files if the fd is closed. Check if
5087 * the fd has changed since we started down this path, and disallow
5088 * this operation if it has.
5089 */
b14cca0c 5090 if (fcheck(ctx->ring_fd) == ctx->ring_file) {
fcb323cc
JA
5091 list_add(&req->inflight_entry, &ctx->inflight_list);
5092 req->flags |= REQ_F_INFLIGHT;
5093 req->work.files = current->files;
5094 ret = 0;
5095 }
5096 spin_unlock_irq(&ctx->inflight_lock);
5097 rcu_read_unlock();
5098
5099 return ret;
5100}
5101
2665abfd 5102static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 5103{
ad8a48ac
JA
5104 struct io_timeout_data *data = container_of(timer,
5105 struct io_timeout_data, timer);
5106 struct io_kiocb *req = data->req;
2665abfd
JA
5107 struct io_ring_ctx *ctx = req->ctx;
5108 struct io_kiocb *prev = NULL;
5109 unsigned long flags;
2665abfd
JA
5110
5111 spin_lock_irqsave(&ctx->completion_lock, flags);
5112
5113 /*
5114 * We don't expect the list to be empty, that will only happen if we
5115 * race with the completion of the linked work.
5116 */
4493233e
PB
5117 if (!list_empty(&req->link_list)) {
5118 prev = list_entry(req->link_list.prev, struct io_kiocb,
5119 link_list);
5d960724 5120 if (refcount_inc_not_zero(&prev->refs)) {
4493233e 5121 list_del_init(&req->link_list);
5d960724
JA
5122 prev->flags &= ~REQ_F_LINK_TIMEOUT;
5123 } else
76a46e06 5124 prev = NULL;
2665abfd
JA
5125 }
5126
5127 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5128
5129 if (prev) {
4e88d6e7 5130 req_set_fail_links(prev);
014db007 5131 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
76a46e06 5132 io_put_req(prev);
47f46768
JA
5133 } else {
5134 io_cqring_add_event(req, -ETIME);
5135 io_put_req(req);
2665abfd 5136 }
2665abfd
JA
5137 return HRTIMER_NORESTART;
5138}
5139
ad8a48ac 5140static void io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 5141{
76a46e06 5142 struct io_ring_ctx *ctx = req->ctx;
2665abfd 5143
76a46e06
JA
5144 /*
5145 * If the list is now empty, then our linked request finished before
5146 * we got a chance to setup the timer
5147 */
5148 spin_lock_irq(&ctx->completion_lock);
4493233e 5149 if (!list_empty(&req->link_list)) {
2d28390a 5150 struct io_timeout_data *data = &req->io->timeout;
94ae5e77 5151
ad8a48ac
JA
5152 data->timer.function = io_link_timeout_fn;
5153 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
5154 data->mode);
2665abfd 5155 }
76a46e06 5156 spin_unlock_irq(&ctx->completion_lock);
2665abfd 5157
2665abfd 5158 /* drop submission reference */
76a46e06
JA
5159 io_put_req(req);
5160}
2665abfd 5161
ad8a48ac 5162static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
2665abfd
JA
5163{
5164 struct io_kiocb *nxt;
5165
5166 if (!(req->flags & REQ_F_LINK))
5167 return NULL;
d7718a9d
JA
5168 /* for polled retry, if flag is set, we already went through here */
5169 if (req->flags & REQ_F_POLLED)
5170 return NULL;
2665abfd 5171
4493233e
PB
5172 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
5173 link_list);
d625c6ee 5174 if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
76a46e06 5175 return NULL;
2665abfd 5176
76a46e06 5177 req->flags |= REQ_F_LINK_TIMEOUT;
76a46e06 5178 return nxt;
2665abfd
JA
5179}
5180
3529d8c2 5181static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2b188cc1 5182{
4a0a7a18 5183 struct io_kiocb *linked_timeout;
4bc4494e 5184 struct io_kiocb *nxt;
193155c8 5185 const struct cred *old_creds = NULL;
e0c5c576 5186 int ret;
2b188cc1 5187
4a0a7a18
JA
5188again:
5189 linked_timeout = io_prep_linked_timeout(req);
5190
193155c8
JA
5191 if (req->work.creds && req->work.creds != current_cred()) {
5192 if (old_creds)
5193 revert_creds(old_creds);
5194 if (old_creds == req->work.creds)
5195 old_creds = NULL; /* restored original creds */
5196 else
5197 old_creds = override_creds(req->work.creds);
5198 }
5199
014db007 5200 ret = io_issue_sqe(req, sqe, true);
491381ce
JA
5201
5202 /*
5203 * We async punt it if the file wasn't marked NOWAIT, or if the file
5204 * doesn't support non-blocking read/write attempts
5205 */
5206 if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
5207 (req->flags & REQ_F_MUST_PUNT))) {
d7718a9d
JA
5208 if (io_arm_poll_handler(req)) {
5209 if (linked_timeout)
5210 io_queue_linked_timeout(linked_timeout);
4bc4494e 5211 goto exit;
d7718a9d 5212 }
86a761f8 5213punt:
f86cd20c 5214 if (io_op_defs[req->opcode].file_table) {
bbad27b2
PB
5215 ret = io_grab_files(req);
5216 if (ret)
5217 goto err;
2b188cc1 5218 }
bbad27b2
PB
5219
5220 /*
5221 * Queued up for async execution, worker will release
5222 * submit reference when the iocb is actually submitted.
5223 */
5224 io_queue_async_work(req);
4bc4494e 5225 goto exit;
2b188cc1 5226 }
e65ef56d 5227
fcb323cc 5228err:
4bc4494e 5229 nxt = NULL;
76a46e06 5230 /* drop submission reference */
2a44f467 5231 io_put_req_find_next(req, &nxt);
e65ef56d 5232
f9bd67f6 5233 if (linked_timeout) {
76a46e06 5234 if (!ret)
f9bd67f6 5235 io_queue_linked_timeout(linked_timeout);
76a46e06 5236 else
f9bd67f6 5237 io_put_req(linked_timeout);
76a46e06
JA
5238 }
5239
e65ef56d 5240 /* and drop final reference, if we failed */
9e645e11 5241 if (ret) {
78e19bbe 5242 io_cqring_add_event(req, ret);
4e88d6e7 5243 req_set_fail_links(req);
e65ef56d 5244 io_put_req(req);
9e645e11 5245 }
4a0a7a18
JA
5246 if (nxt) {
5247 req = nxt;
86a761f8
PB
5248
5249 if (req->flags & REQ_F_FORCE_ASYNC)
5250 goto punt;
4a0a7a18
JA
5251 goto again;
5252 }
4bc4494e 5253exit:
193155c8
JA
5254 if (old_creds)
5255 revert_creds(old_creds);
2b188cc1
JA
5256}
5257
3529d8c2 5258static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4fe2c963
JL
5259{
5260 int ret;
5261
3529d8c2 5262 ret = io_req_defer(req, sqe);
4fe2c963
JL
5263 if (ret) {
5264 if (ret != -EIOCBQUEUED) {
1118591a 5265fail_req:
78e19bbe 5266 io_cqring_add_event(req, ret);
4e88d6e7 5267 req_set_fail_links(req);
78e19bbe 5268 io_double_put_req(req);
4fe2c963 5269 }
2550878f 5270 } else if (req->flags & REQ_F_FORCE_ASYNC) {
1118591a
PB
5271 ret = io_req_defer_prep(req, sqe);
5272 if (unlikely(ret < 0))
5273 goto fail_req;
ce35a47a
JA
5274 /*
5275 * Never try inline submit of IOSQE_ASYNC is set, go straight
5276 * to async execution.
5277 */
5278 req->work.flags |= IO_WQ_WORK_CONCURRENT;
5279 io_queue_async_work(req);
5280 } else {
3529d8c2 5281 __io_queue_sqe(req, sqe);
ce35a47a 5282 }
4fe2c963
JL
5283}
5284
1b4a51b6 5285static inline void io_queue_link_head(struct io_kiocb *req)
4fe2c963 5286{
94ae5e77 5287 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
1b4a51b6
PB
5288 io_cqring_add_event(req, -ECANCELED);
5289 io_double_put_req(req);
5290 } else
3529d8c2 5291 io_queue_sqe(req, NULL);
4fe2c963
JL
5292}
5293
4e88d6e7 5294#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
bcda7baa
JA
5295 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
5296 IOSQE_BUFFER_SELECT)
9e645e11 5297
3529d8c2
JA
5298static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5299 struct io_submit_state *state, struct io_kiocb **link)
9e645e11 5300{
a197f664 5301 struct io_ring_ctx *ctx = req->ctx;
32fe525b 5302 unsigned int sqe_flags;
75c6a039 5303 int ret, id;
9e645e11 5304
32fe525b 5305 sqe_flags = READ_ONCE(sqe->flags);
9e645e11
JA
5306
5307 /* enforce forwards compatibility on users */
32fe525b 5308 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
9e645e11 5309 ret = -EINVAL;
196be95c 5310 goto err_req;
9e645e11
JA
5311 }
5312
bcda7baa
JA
5313 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
5314 !io_op_defs[req->opcode].buffer_select) {
5315 ret = -EOPNOTSUPP;
5316 goto err_req;
5317 }
5318
75c6a039
JA
5319 id = READ_ONCE(sqe->personality);
5320 if (id) {
193155c8
JA
5321 req->work.creds = idr_find(&ctx->personality_idr, id);
5322 if (unlikely(!req->work.creds)) {
75c6a039
JA
5323 ret = -EINVAL;
5324 goto err_req;
5325 }
193155c8 5326 get_cred(req->work.creds);
75c6a039
JA
5327 }
5328
6b47ee6e 5329 /* same numerical values with corresponding REQ_F_*, safe to copy */
8da11c19 5330 req->flags |= sqe_flags & (IOSQE_IO_DRAIN | IOSQE_IO_HARDLINK |
bcda7baa
JA
5331 IOSQE_ASYNC | IOSQE_FIXED_FILE |
5332 IOSQE_BUFFER_SELECT);
9e645e11 5333
3529d8c2 5334 ret = io_req_set_file(state, req, sqe);
9e645e11
JA
5335 if (unlikely(ret)) {
5336err_req:
78e19bbe
JA
5337 io_cqring_add_event(req, ret);
5338 io_double_put_req(req);
2e6e1fde 5339 return false;
9e645e11
JA
5340 }
5341
9e645e11
JA
5342 /*
5343 * If we already have a head request, queue this one for async
5344 * submittal once the head completes. If we don't have a head but
5345 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
5346 * submitted sync once the chain is complete. If none of those
5347 * conditions are true (normal request), then just queue it.
5348 */
5349 if (*link) {
9d76377f 5350 struct io_kiocb *head = *link;
4e88d6e7 5351
8cdf2193
PB
5352 /*
5353 * Taking sequential execution of a link, draining both sides
5354 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
5355 * requests in the link. So, it drains the head and the
5356 * next after the link request. The last one is done via
5357 * drain_next flag to persist the effect across calls.
5358 */
711be031
PB
5359 if (sqe_flags & IOSQE_IO_DRAIN) {
5360 head->flags |= REQ_F_IO_DRAIN;
5361 ctx->drain_next = 1;
5362 }
b7bb4f7d 5363 if (io_alloc_async_ctx(req)) {
9e645e11
JA
5364 ret = -EAGAIN;
5365 goto err_req;
5366 }
5367
3529d8c2 5368 ret = io_req_defer_prep(req, sqe);
2d28390a 5369 if (ret) {
4e88d6e7 5370 /* fail even hard links since we don't submit */
9d76377f 5371 head->flags |= REQ_F_FAIL_LINK;
f67676d1 5372 goto err_req;
2d28390a 5373 }
9d76377f
PB
5374 trace_io_uring_link(ctx, req, head);
5375 list_add_tail(&req->link_list, &head->link_list);
32fe525b
PB
5376
5377 /* last request of a link, enqueue the link */
5378 if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK))) {
5379 io_queue_link_head(head);
5380 *link = NULL;
5381 }
9e645e11 5382 } else {
711be031
PB
5383 if (unlikely(ctx->drain_next)) {
5384 req->flags |= REQ_F_IO_DRAIN;
5385 req->ctx->drain_next = 0;
5386 }
5387 if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
5388 req->flags |= REQ_F_LINK;
711be031
PB
5389 INIT_LIST_HEAD(&req->link_list);
5390 ret = io_req_defer_prep(req, sqe);
5391 if (ret)
5392 req->flags |= REQ_F_FAIL_LINK;
5393 *link = req;
5394 } else {
5395 io_queue_sqe(req, sqe);
5396 }
9e645e11 5397 }
2e6e1fde
PB
5398
5399 return true;
9e645e11
JA
5400}
5401
9a56a232
JA
5402/*
5403 * Batched submission is done, ensure local IO is flushed out.
5404 */
5405static void io_submit_state_end(struct io_submit_state *state)
5406{
5407 blk_finish_plug(&state->plug);
3d6770fb 5408 io_file_put(state);
2579f913 5409 if (state->free_reqs)
6c8a3134 5410 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
9a56a232
JA
5411}
5412
5413/*
5414 * Start submission side cache.
5415 */
5416static void io_submit_state_start(struct io_submit_state *state,
22efde59 5417 unsigned int max_ios)
9a56a232
JA
5418{
5419 blk_start_plug(&state->plug);
2579f913 5420 state->free_reqs = 0;
9a56a232
JA
5421 state->file = NULL;
5422 state->ios_left = max_ios;
5423}
5424
2b188cc1
JA
5425static void io_commit_sqring(struct io_ring_ctx *ctx)
5426{
75b28aff 5427 struct io_rings *rings = ctx->rings;
2b188cc1 5428
caf582c6
PB
5429 /*
5430 * Ensure any loads from the SQEs are done at this point,
5431 * since once we write the new head, the application could
5432 * write new data to them.
5433 */
5434 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
5435}
5436
2b188cc1 5437/*
3529d8c2 5438 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
2b188cc1
JA
5439 * that is mapped by userspace. This means that care needs to be taken to
5440 * ensure that reads are stable, as we cannot rely on userspace always
5441 * being a good citizen. If members of the sqe are validated and then later
5442 * used, it's important that those reads are done through READ_ONCE() to
5443 * prevent a re-load down the line.
5444 */
3529d8c2
JA
5445static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
5446 const struct io_uring_sqe **sqe_ptr)
2b188cc1 5447{
75b28aff 5448 u32 *sq_array = ctx->sq_array;
2b188cc1
JA
5449 unsigned head;
5450
5451 /*
5452 * The cached sq head (or cq tail) serves two purposes:
5453 *
5454 * 1) allows us to batch the cost of updating the user visible
5455 * head updates.
5456 * 2) allows the kernel side to track the head on its own, even
5457 * though the application is the one updating it.
5458 */
ee7d46d9 5459 head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
9835d6fa 5460 if (likely(head < ctx->sq_entries)) {
cf6fd4bd
PB
5461 /*
5462 * All io need record the previous position, if LINK vs DARIN,
5463 * it can be used to mark the position of the first IO in the
5464 * link list.
5465 */
5466 req->sequence = ctx->cached_sq_head;
3529d8c2
JA
5467 *sqe_ptr = &ctx->sq_sqes[head];
5468 req->opcode = READ_ONCE((*sqe_ptr)->opcode);
5469 req->user_data = READ_ONCE((*sqe_ptr)->user_data);
2b188cc1
JA
5470 ctx->cached_sq_head++;
5471 return true;
5472 }
5473
5474 /* drop invalid entries */
5475 ctx->cached_sq_head++;
498ccd9e 5476 ctx->cached_sq_dropped++;
ee7d46d9 5477 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
2b188cc1
JA
5478 return false;
5479}
5480
fb5ccc98 5481static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
ae9428ca
PB
5482 struct file *ring_file, int ring_fd,
5483 struct mm_struct **mm, bool async)
6c271ce2
JA
5484{
5485 struct io_submit_state state, *statep = NULL;
9e645e11 5486 struct io_kiocb *link = NULL;
9e645e11 5487 int i, submitted = 0;
95a1b3ff 5488 bool mm_fault = false;
6c271ce2 5489
c4a2ed72 5490 /* if we have a backlog and couldn't flush it all, return BUSY */
ad3eb2c8
JA
5491 if (test_bit(0, &ctx->sq_check_overflow)) {
5492 if (!list_empty(&ctx->cq_overflow_list) &&
5493 !io_cqring_overflow_flush(ctx, false))
5494 return -EBUSY;
5495 }
6c271ce2 5496
ee7d46d9
PB
5497 /* make sure SQ entry isn't read before tail */
5498 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
9ef4f124 5499
2b85edfc
PB
5500 if (!percpu_ref_tryget_many(&ctx->refs, nr))
5501 return -EAGAIN;
6c271ce2
JA
5502
5503 if (nr > IO_PLUG_THRESHOLD) {
22efde59 5504 io_submit_state_start(&state, nr);
6c271ce2
JA
5505 statep = &state;
5506 }
5507
b14cca0c
PB
5508 ctx->ring_fd = ring_fd;
5509 ctx->ring_file = ring_file;
5510
6c271ce2 5511 for (i = 0; i < nr; i++) {
3529d8c2 5512 const struct io_uring_sqe *sqe;
196be95c 5513 struct io_kiocb *req;
1cb1edb2 5514 int err;
fb5ccc98 5515
196be95c
PB
5516 req = io_get_req(ctx, statep);
5517 if (unlikely(!req)) {
5518 if (!submitted)
5519 submitted = -EAGAIN;
fb5ccc98 5520 break;
196be95c 5521 }
3529d8c2 5522 if (!io_get_sqring(ctx, req, &sqe)) {
2b85edfc 5523 __io_req_do_free(req);
196be95c
PB
5524 break;
5525 }
fb5ccc98 5526
d3656344
JA
5527 /* will complete beyond this point, count as submitted */
5528 submitted++;
5529
5530 if (unlikely(req->opcode >= IORING_OP_LAST)) {
1cb1edb2
PB
5531 err = -EINVAL;
5532fail_req:
5533 io_cqring_add_event(req, err);
d3656344 5534 io_double_put_req(req);
196be95c
PB
5535 break;
5536 }
fb5ccc98 5537
d3656344 5538 if (io_op_defs[req->opcode].needs_mm && !*mm) {
95a1b3ff 5539 mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
1cb1edb2
PB
5540 if (unlikely(mm_fault)) {
5541 err = -EFAULT;
5542 goto fail_req;
95a1b3ff 5543 }
1cb1edb2
PB
5544 use_mm(ctx->sqo_mm);
5545 *mm = ctx->sqo_mm;
9e645e11 5546 }
9e645e11 5547
cf6fd4bd 5548 req->needs_fixed_file = async;
354420f7
JA
5549 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
5550 true, async);
3529d8c2 5551 if (!io_submit_sqe(req, sqe, statep, &link))
2e6e1fde 5552 break;
6c271ce2
JA
5553 }
5554
9466f437
PB
5555 if (unlikely(submitted != nr)) {
5556 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
5557
5558 percpu_ref_put_many(&ctx->refs, nr - ref_used);
5559 }
9e645e11 5560 if (link)
1b4a51b6 5561 io_queue_link_head(link);
6c271ce2
JA
5562 if (statep)
5563 io_submit_state_end(&state);
5564
ae9428ca
PB
5565 /* Commit SQ ring head once we've consumed and submitted all SQEs */
5566 io_commit_sqring(ctx);
5567
6c271ce2
JA
5568 return submitted;
5569}
5570
5571static int io_sq_thread(void *data)
5572{
6c271ce2
JA
5573 struct io_ring_ctx *ctx = data;
5574 struct mm_struct *cur_mm = NULL;
181e448d 5575 const struct cred *old_cred;
6c271ce2
JA
5576 mm_segment_t old_fs;
5577 DEFINE_WAIT(wait);
6c271ce2 5578 unsigned long timeout;
bdcd3eab 5579 int ret = 0;
6c271ce2 5580
206aefde 5581 complete(&ctx->completions[1]);
a4c0b3de 5582
6c271ce2
JA
5583 old_fs = get_fs();
5584 set_fs(USER_DS);
181e448d 5585 old_cred = override_creds(ctx->creds);
6c271ce2 5586
bdcd3eab 5587 timeout = jiffies + ctx->sq_thread_idle;
2bbcd6d3 5588 while (!kthread_should_park()) {
fb5ccc98 5589 unsigned int to_submit;
6c271ce2 5590
bdcd3eab 5591 if (!list_empty(&ctx->poll_list)) {
6c271ce2
JA
5592 unsigned nr_events = 0;
5593
bdcd3eab
XW
5594 mutex_lock(&ctx->uring_lock);
5595 if (!list_empty(&ctx->poll_list))
5596 io_iopoll_getevents(ctx, &nr_events, 0);
5597 else
6c271ce2 5598 timeout = jiffies + ctx->sq_thread_idle;
bdcd3eab 5599 mutex_unlock(&ctx->uring_lock);
6c271ce2
JA
5600 }
5601
fb5ccc98 5602 to_submit = io_sqring_entries(ctx);
c1edbf5f
JA
5603
5604 /*
5605 * If submit got -EBUSY, flag us as needing the application
5606 * to enter the kernel to reap and flush events.
5607 */
5608 if (!to_submit || ret == -EBUSY) {
7143b5ac
SG
5609 /*
5610 * Drop cur_mm before scheduling, we can't hold it for
5611 * long periods (or over schedule()). Do this before
5612 * adding ourselves to the waitqueue, as the unuse/drop
5613 * may sleep.
5614 */
5615 if (cur_mm) {
5616 unuse_mm(cur_mm);
5617 mmput(cur_mm);
5618 cur_mm = NULL;
5619 }
5620
6c271ce2
JA
5621 /*
5622 * We're polling. If we're within the defined idle
5623 * period, then let us spin without work before going
c1edbf5f
JA
5624 * to sleep. The exception is if we got EBUSY doing
5625 * more IO, we should wait for the application to
5626 * reap events and wake us up.
6c271ce2 5627 */
bdcd3eab 5628 if (!list_empty(&ctx->poll_list) ||
df069d80
JA
5629 (!time_after(jiffies, timeout) && ret != -EBUSY &&
5630 !percpu_ref_is_dying(&ctx->refs))) {
b41e9852
JA
5631 if (current->task_works)
5632 task_work_run();
9831a90c 5633 cond_resched();
6c271ce2
JA
5634 continue;
5635 }
5636
6c271ce2
JA
5637 prepare_to_wait(&ctx->sqo_wait, &wait,
5638 TASK_INTERRUPTIBLE);
5639
bdcd3eab
XW
5640 /*
5641 * While doing polled IO, before going to sleep, we need
5642 * to check if there are new reqs added to poll_list, it
5643 * is because reqs may have been punted to io worker and
5644 * will be added to poll_list later, hence check the
5645 * poll_list again.
5646 */
5647 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
5648 !list_empty_careful(&ctx->poll_list)) {
5649 finish_wait(&ctx->sqo_wait, &wait);
5650 continue;
5651 }
5652
6c271ce2 5653 /* Tell userspace we may need a wakeup call */
75b28aff 5654 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
0d7bae69
SB
5655 /* make sure to read SQ tail after writing flags */
5656 smp_mb();
6c271ce2 5657
fb5ccc98 5658 to_submit = io_sqring_entries(ctx);
c1edbf5f 5659 if (!to_submit || ret == -EBUSY) {
2bbcd6d3 5660 if (kthread_should_park()) {
6c271ce2
JA
5661 finish_wait(&ctx->sqo_wait, &wait);
5662 break;
5663 }
b41e9852
JA
5664 if (current->task_works) {
5665 task_work_run();
5666 continue;
5667 }
6c271ce2
JA
5668 if (signal_pending(current))
5669 flush_signals(current);
5670 schedule();
5671 finish_wait(&ctx->sqo_wait, &wait);
5672
75b28aff 5673 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
5674 continue;
5675 }
5676 finish_wait(&ctx->sqo_wait, &wait);
5677
75b28aff 5678 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
5679 }
5680
8a4955ff 5681 mutex_lock(&ctx->uring_lock);
1d7bb1d5 5682 ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
8a4955ff 5683 mutex_unlock(&ctx->uring_lock);
bdcd3eab 5684 timeout = jiffies + ctx->sq_thread_idle;
6c271ce2
JA
5685 }
5686
b41e9852
JA
5687 if (current->task_works)
5688 task_work_run();
5689
6c271ce2
JA
5690 set_fs(old_fs);
5691 if (cur_mm) {
5692 unuse_mm(cur_mm);
5693 mmput(cur_mm);
5694 }
181e448d 5695 revert_creds(old_cred);
06058632 5696
2bbcd6d3 5697 kthread_parkme();
06058632 5698
6c271ce2
JA
5699 return 0;
5700}
5701
bda52162
JA
5702struct io_wait_queue {
5703 struct wait_queue_entry wq;
5704 struct io_ring_ctx *ctx;
5705 unsigned to_wait;
5706 unsigned nr_timeouts;
5707};
5708
1d7bb1d5 5709static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
bda52162
JA
5710{
5711 struct io_ring_ctx *ctx = iowq->ctx;
5712
5713 /*
d195a66e 5714 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
5715 * started waiting. For timeouts, we always want to return to userspace,
5716 * regardless of event count.
5717 */
1d7bb1d5 5718 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
bda52162
JA
5719 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
5720}
5721
5722static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
5723 int wake_flags, void *key)
5724{
5725 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
5726 wq);
5727
1d7bb1d5
JA
5728 /* use noflush == true, as we can't safely rely on locking context */
5729 if (!io_should_wake(iowq, true))
bda52162
JA
5730 return -1;
5731
5732 return autoremove_wake_function(curr, mode, wake_flags, key);
5733}
5734
2b188cc1
JA
5735/*
5736 * Wait until events become available, if we don't already have some. The
5737 * application must reap them itself, as they reside on the shared cq ring.
5738 */
5739static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
5740 const sigset_t __user *sig, size_t sigsz)
5741{
bda52162
JA
5742 struct io_wait_queue iowq = {
5743 .wq = {
5744 .private = current,
5745 .func = io_wake_function,
5746 .entry = LIST_HEAD_INIT(iowq.wq.entry),
5747 },
5748 .ctx = ctx,
5749 .to_wait = min_events,
5750 };
75b28aff 5751 struct io_rings *rings = ctx->rings;
e9ffa5c2 5752 int ret = 0;
2b188cc1 5753
b41e9852
JA
5754 do {
5755 if (io_cqring_events(ctx, false) >= min_events)
5756 return 0;
5757 if (!current->task_works)
5758 break;
5759 task_work_run();
5760 } while (1);
2b188cc1
JA
5761
5762 if (sig) {
9e75ad5d
AB
5763#ifdef CONFIG_COMPAT
5764 if (in_compat_syscall())
5765 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 5766 sigsz);
9e75ad5d
AB
5767 else
5768#endif
b772434b 5769 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 5770
2b188cc1
JA
5771 if (ret)
5772 return ret;
5773 }
5774
bda52162 5775 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
c826bd7a 5776 trace_io_uring_cqring_wait(ctx, min_events);
bda52162
JA
5777 do {
5778 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
5779 TASK_INTERRUPTIBLE);
b41e9852
JA
5780 if (current->task_works)
5781 task_work_run();
1d7bb1d5 5782 if (io_should_wake(&iowq, false))
bda52162
JA
5783 break;
5784 schedule();
5785 if (signal_pending(current)) {
e9ffa5c2 5786 ret = -EINTR;
bda52162
JA
5787 break;
5788 }
5789 } while (1);
5790 finish_wait(&ctx->wait, &iowq.wq);
5791
e9ffa5c2 5792 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 5793
75b28aff 5794 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
5795}
5796
6b06314c
JA
5797static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
5798{
5799#if defined(CONFIG_UNIX)
5800 if (ctx->ring_sock) {
5801 struct sock *sock = ctx->ring_sock->sk;
5802 struct sk_buff *skb;
5803
5804 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
5805 kfree_skb(skb);
5806 }
5807#else
5808 int i;
5809
65e19f54
JA
5810 for (i = 0; i < ctx->nr_user_files; i++) {
5811 struct file *file;
5812
5813 file = io_file_from_index(ctx, i);
5814 if (file)
5815 fput(file);
5816 }
6b06314c
JA
5817#endif
5818}
5819
05f3fb3c
JA
5820static void io_file_ref_kill(struct percpu_ref *ref)
5821{
5822 struct fixed_file_data *data;
5823
5824 data = container_of(ref, struct fixed_file_data, refs);
5825 complete(&data->done);
5826}
5827
6b06314c
JA
5828static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
5829{
05f3fb3c 5830 struct fixed_file_data *data = ctx->file_data;
65e19f54
JA
5831 unsigned nr_tables, i;
5832
05f3fb3c 5833 if (!data)
6b06314c
JA
5834 return -ENXIO;
5835
05f3fb3c 5836 percpu_ref_kill_and_confirm(&data->refs, io_file_ref_kill);
e46a7950 5837 flush_work(&data->ref_work);
2faf852d
JA
5838 wait_for_completion(&data->done);
5839 io_ring_file_ref_flush(data);
05f3fb3c
JA
5840 percpu_ref_exit(&data->refs);
5841
6b06314c 5842 __io_sqe_files_unregister(ctx);
65e19f54
JA
5843 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
5844 for (i = 0; i < nr_tables; i++)
05f3fb3c
JA
5845 kfree(data->table[i].files);
5846 kfree(data->table);
5847 kfree(data);
5848 ctx->file_data = NULL;
6b06314c
JA
5849 ctx->nr_user_files = 0;
5850 return 0;
5851}
5852
6c271ce2
JA
5853static void io_sq_thread_stop(struct io_ring_ctx *ctx)
5854{
5855 if (ctx->sqo_thread) {
206aefde 5856 wait_for_completion(&ctx->completions[1]);
2bbcd6d3
RP
5857 /*
5858 * The park is a bit of a work-around, without it we get
5859 * warning spews on shutdown with SQPOLL set and affinity
5860 * set to a single CPU.
5861 */
06058632 5862 kthread_park(ctx->sqo_thread);
6c271ce2
JA
5863 kthread_stop(ctx->sqo_thread);
5864 ctx->sqo_thread = NULL;
5865 }
5866}
5867
6b06314c
JA
5868static void io_finish_async(struct io_ring_ctx *ctx)
5869{
6c271ce2
JA
5870 io_sq_thread_stop(ctx);
5871
561fb04a
JA
5872 if (ctx->io_wq) {
5873 io_wq_destroy(ctx->io_wq);
5874 ctx->io_wq = NULL;
6b06314c
JA
5875 }
5876}
5877
5878#if defined(CONFIG_UNIX)
6b06314c
JA
5879/*
5880 * Ensure the UNIX gc is aware of our file set, so we are certain that
5881 * the io_uring can be safely unregistered on process exit, even if we have
5882 * loops in the file referencing.
5883 */
5884static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
5885{
5886 struct sock *sk = ctx->ring_sock->sk;
5887 struct scm_fp_list *fpl;
5888 struct sk_buff *skb;
08a45173 5889 int i, nr_files;
6b06314c
JA
5890
5891 if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
5892 unsigned long inflight = ctx->user->unix_inflight + nr;
5893
5894 if (inflight > task_rlimit(current, RLIMIT_NOFILE))
5895 return -EMFILE;
5896 }
5897
5898 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
5899 if (!fpl)
5900 return -ENOMEM;
5901
5902 skb = alloc_skb(0, GFP_KERNEL);
5903 if (!skb) {
5904 kfree(fpl);
5905 return -ENOMEM;
5906 }
5907
5908 skb->sk = sk;
6b06314c 5909
08a45173 5910 nr_files = 0;
6b06314c
JA
5911 fpl->user = get_uid(ctx->user);
5912 for (i = 0; i < nr; i++) {
65e19f54
JA
5913 struct file *file = io_file_from_index(ctx, i + offset);
5914
5915 if (!file)
08a45173 5916 continue;
65e19f54 5917 fpl->fp[nr_files] = get_file(file);
08a45173
JA
5918 unix_inflight(fpl->user, fpl->fp[nr_files]);
5919 nr_files++;
6b06314c
JA
5920 }
5921
08a45173
JA
5922 if (nr_files) {
5923 fpl->max = SCM_MAX_FD;
5924 fpl->count = nr_files;
5925 UNIXCB(skb).fp = fpl;
05f3fb3c 5926 skb->destructor = unix_destruct_scm;
08a45173
JA
5927 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
5928 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 5929
08a45173
JA
5930 for (i = 0; i < nr_files; i++)
5931 fput(fpl->fp[i]);
5932 } else {
5933 kfree_skb(skb);
5934 kfree(fpl);
5935 }
6b06314c
JA
5936
5937 return 0;
5938}
5939
5940/*
5941 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
5942 * causes regular reference counting to break down. We rely on the UNIX
5943 * garbage collection to take care of this problem for us.
5944 */
5945static int io_sqe_files_scm(struct io_ring_ctx *ctx)
5946{
5947 unsigned left, total;
5948 int ret = 0;
5949
5950 total = 0;
5951 left = ctx->nr_user_files;
5952 while (left) {
5953 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
5954
5955 ret = __io_sqe_files_scm(ctx, this_files, total);
5956 if (ret)
5957 break;
5958 left -= this_files;
5959 total += this_files;
5960 }
5961
5962 if (!ret)
5963 return 0;
5964
5965 while (total < ctx->nr_user_files) {
65e19f54
JA
5966 struct file *file = io_file_from_index(ctx, total);
5967
5968 if (file)
5969 fput(file);
6b06314c
JA
5970 total++;
5971 }
5972
5973 return ret;
5974}
5975#else
5976static int io_sqe_files_scm(struct io_ring_ctx *ctx)
5977{
5978 return 0;
5979}
5980#endif
5981
65e19f54
JA
5982static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
5983 unsigned nr_files)
5984{
5985 int i;
5986
5987 for (i = 0; i < nr_tables; i++) {
05f3fb3c 5988 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
5989 unsigned this_files;
5990
5991 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
5992 table->files = kcalloc(this_files, sizeof(struct file *),
5993 GFP_KERNEL);
5994 if (!table->files)
5995 break;
5996 nr_files -= this_files;
5997 }
5998
5999 if (i == nr_tables)
6000 return 0;
6001
6002 for (i = 0; i < nr_tables; i++) {
05f3fb3c 6003 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
6004 kfree(table->files);
6005 }
6006 return 1;
6007}
6008
05f3fb3c
JA
6009static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
6010{
6011#if defined(CONFIG_UNIX)
6012 struct sock *sock = ctx->ring_sock->sk;
6013 struct sk_buff_head list, *head = &sock->sk_receive_queue;
6014 struct sk_buff *skb;
6015 int i;
6016
6017 __skb_queue_head_init(&list);
6018
6019 /*
6020 * Find the skb that holds this file in its SCM_RIGHTS. When found,
6021 * remove this entry and rearrange the file array.
6022 */
6023 skb = skb_dequeue(head);
6024 while (skb) {
6025 struct scm_fp_list *fp;
6026
6027 fp = UNIXCB(skb).fp;
6028 for (i = 0; i < fp->count; i++) {
6029 int left;
6030
6031 if (fp->fp[i] != file)
6032 continue;
6033
6034 unix_notinflight(fp->user, fp->fp[i]);
6035 left = fp->count - 1 - i;
6036 if (left) {
6037 memmove(&fp->fp[i], &fp->fp[i + 1],
6038 left * sizeof(struct file *));
6039 }
6040 fp->count--;
6041 if (!fp->count) {
6042 kfree_skb(skb);
6043 skb = NULL;
6044 } else {
6045 __skb_queue_tail(&list, skb);
6046 }
6047 fput(file);
6048 file = NULL;
6049 break;
6050 }
6051
6052 if (!file)
6053 break;
6054
6055 __skb_queue_tail(&list, skb);
6056
6057 skb = skb_dequeue(head);
6058 }
6059
6060 if (skb_peek(&list)) {
6061 spin_lock_irq(&head->lock);
6062 while ((skb = __skb_dequeue(&list)) != NULL)
6063 __skb_queue_tail(head, skb);
6064 spin_unlock_irq(&head->lock);
6065 }
6066#else
6067 fput(file);
6068#endif
6069}
6070
6071struct io_file_put {
6072 struct llist_node llist;
6073 struct file *file;
6074 struct completion *done;
6075};
6076
2faf852d 6077static void io_ring_file_ref_flush(struct fixed_file_data *data)
65e19f54 6078{
05f3fb3c 6079 struct io_file_put *pfile, *tmp;
05f3fb3c 6080 struct llist_node *node;
65e19f54 6081
05f3fb3c
JA
6082 while ((node = llist_del_all(&data->put_llist)) != NULL) {
6083 llist_for_each_entry_safe(pfile, tmp, node, llist) {
6084 io_ring_file_put(data->ctx, pfile->file);
6085 if (pfile->done)
6086 complete(pfile->done);
6087 else
6088 kfree(pfile);
6089 }
65e19f54 6090 }
2faf852d 6091}
65e19f54 6092
2faf852d
JA
6093static void io_ring_file_ref_switch(struct work_struct *work)
6094{
6095 struct fixed_file_data *data;
65e19f54 6096
2faf852d
JA
6097 data = container_of(work, struct fixed_file_data, ref_work);
6098 io_ring_file_ref_flush(data);
05f3fb3c
JA
6099 percpu_ref_switch_to_percpu(&data->refs);
6100}
65e19f54 6101
05f3fb3c
JA
6102static void io_file_data_ref_zero(struct percpu_ref *ref)
6103{
6104 struct fixed_file_data *data;
6105
6106 data = container_of(ref, struct fixed_file_data, refs);
6107
2faf852d
JA
6108 /*
6109 * We can't safely switch from inside this context, punt to wq. If
6110 * the table ref is going away, the table is being unregistered.
6111 * Don't queue up the async work for that case, the caller will
6112 * handle it.
6113 */
6114 if (!percpu_ref_is_dying(&data->refs))
6115 queue_work(system_wq, &data->ref_work);
65e19f54
JA
6116}
6117
6b06314c
JA
6118static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
6119 unsigned nr_args)
6120{
6121 __s32 __user *fds = (__s32 __user *) arg;
65e19f54 6122 unsigned nr_tables;
05f3fb3c 6123 struct file *file;
6b06314c
JA
6124 int fd, ret = 0;
6125 unsigned i;
6126
05f3fb3c 6127 if (ctx->file_data)
6b06314c
JA
6128 return -EBUSY;
6129 if (!nr_args)
6130 return -EINVAL;
6131 if (nr_args > IORING_MAX_FIXED_FILES)
6132 return -EMFILE;
6133
05f3fb3c
JA
6134 ctx->file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
6135 if (!ctx->file_data)
6136 return -ENOMEM;
6137 ctx->file_data->ctx = ctx;
6138 init_completion(&ctx->file_data->done);
6139
65e19f54 6140 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
05f3fb3c
JA
6141 ctx->file_data->table = kcalloc(nr_tables,
6142 sizeof(struct fixed_file_table),
65e19f54 6143 GFP_KERNEL);
05f3fb3c
JA
6144 if (!ctx->file_data->table) {
6145 kfree(ctx->file_data);
6146 ctx->file_data = NULL;
6b06314c 6147 return -ENOMEM;
05f3fb3c
JA
6148 }
6149
6150 if (percpu_ref_init(&ctx->file_data->refs, io_file_data_ref_zero,
6151 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
6152 kfree(ctx->file_data->table);
6153 kfree(ctx->file_data);
6154 ctx->file_data = NULL;
6b06314c 6155 return -ENOMEM;
05f3fb3c
JA
6156 }
6157 ctx->file_data->put_llist.first = NULL;
6158 INIT_WORK(&ctx->file_data->ref_work, io_ring_file_ref_switch);
6b06314c 6159
65e19f54 6160 if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
05f3fb3c
JA
6161 percpu_ref_exit(&ctx->file_data->refs);
6162 kfree(ctx->file_data->table);
6163 kfree(ctx->file_data);
6164 ctx->file_data = NULL;
65e19f54
JA
6165 return -ENOMEM;
6166 }
6167
08a45173 6168 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
65e19f54
JA
6169 struct fixed_file_table *table;
6170 unsigned index;
6171
6b06314c
JA
6172 ret = -EFAULT;
6173 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
6174 break;
08a45173
JA
6175 /* allow sparse sets */
6176 if (fd == -1) {
6177 ret = 0;
6178 continue;
6179 }
6b06314c 6180
05f3fb3c 6181 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54 6182 index = i & IORING_FILE_TABLE_MASK;
05f3fb3c 6183 file = fget(fd);
6b06314c
JA
6184
6185 ret = -EBADF;
05f3fb3c 6186 if (!file)
6b06314c 6187 break;
05f3fb3c 6188
6b06314c
JA
6189 /*
6190 * Don't allow io_uring instances to be registered. If UNIX
6191 * isn't enabled, then this causes a reference cycle and this
6192 * instance can never get freed. If UNIX is enabled we'll
6193 * handle it just fine, but there's still no point in allowing
6194 * a ring fd as it doesn't support regular read/write anyway.
6195 */
05f3fb3c
JA
6196 if (file->f_op == &io_uring_fops) {
6197 fput(file);
6b06314c
JA
6198 break;
6199 }
6b06314c 6200 ret = 0;
05f3fb3c 6201 table->files[index] = file;
6b06314c
JA
6202 }
6203
6204 if (ret) {
65e19f54 6205 for (i = 0; i < ctx->nr_user_files; i++) {
65e19f54
JA
6206 file = io_file_from_index(ctx, i);
6207 if (file)
6208 fput(file);
6209 }
6210 for (i = 0; i < nr_tables; i++)
05f3fb3c 6211 kfree(ctx->file_data->table[i].files);
6b06314c 6212
05f3fb3c
JA
6213 kfree(ctx->file_data->table);
6214 kfree(ctx->file_data);
6215 ctx->file_data = NULL;
6b06314c
JA
6216 ctx->nr_user_files = 0;
6217 return ret;
6218 }
6219
6220 ret = io_sqe_files_scm(ctx);
6221 if (ret)
6222 io_sqe_files_unregister(ctx);
6223
6224 return ret;
6225}
6226
c3a31e60
JA
6227static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
6228 int index)
6229{
6230#if defined(CONFIG_UNIX)
6231 struct sock *sock = ctx->ring_sock->sk;
6232 struct sk_buff_head *head = &sock->sk_receive_queue;
6233 struct sk_buff *skb;
6234
6235 /*
6236 * See if we can merge this file into an existing skb SCM_RIGHTS
6237 * file set. If there's no room, fall back to allocating a new skb
6238 * and filling it in.
6239 */
6240 spin_lock_irq(&head->lock);
6241 skb = skb_peek(head);
6242 if (skb) {
6243 struct scm_fp_list *fpl = UNIXCB(skb).fp;
6244
6245 if (fpl->count < SCM_MAX_FD) {
6246 __skb_unlink(skb, head);
6247 spin_unlock_irq(&head->lock);
6248 fpl->fp[fpl->count] = get_file(file);
6249 unix_inflight(fpl->user, fpl->fp[fpl->count]);
6250 fpl->count++;
6251 spin_lock_irq(&head->lock);
6252 __skb_queue_head(head, skb);
6253 } else {
6254 skb = NULL;
6255 }
6256 }
6257 spin_unlock_irq(&head->lock);
6258
6259 if (skb) {
6260 fput(file);
6261 return 0;
6262 }
6263
6264 return __io_sqe_files_scm(ctx, 1, index);
6265#else
6266 return 0;
6267#endif
6268}
6269
05f3fb3c 6270static void io_atomic_switch(struct percpu_ref *ref)
c3a31e60 6271{
05f3fb3c
JA
6272 struct fixed_file_data *data;
6273
dd3db2a3
JA
6274 /*
6275 * Juggle reference to ensure we hit zero, if needed, so we can
6276 * switch back to percpu mode
6277 */
05f3fb3c 6278 data = container_of(ref, struct fixed_file_data, refs);
dd3db2a3
JA
6279 percpu_ref_put(&data->refs);
6280 percpu_ref_get(&data->refs);
05f3fb3c
JA
6281}
6282
6283static bool io_queue_file_removal(struct fixed_file_data *data,
6284 struct file *file)
6285{
6286 struct io_file_put *pfile, pfile_stack;
6287 DECLARE_COMPLETION_ONSTACK(done);
6288
6289 /*
6290 * If we fail allocating the struct we need for doing async reomval
6291 * of this file, just punt to sync and wait for it.
6292 */
6293 pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
6294 if (!pfile) {
6295 pfile = &pfile_stack;
6296 pfile->done = &done;
6297 }
6298
6299 pfile->file = file;
6300 llist_add(&pfile->llist, &data->put_llist);
6301
6302 if (pfile == &pfile_stack) {
dd3db2a3 6303 percpu_ref_switch_to_atomic(&data->refs, io_atomic_switch);
05f3fb3c
JA
6304 wait_for_completion(&done);
6305 flush_work(&data->ref_work);
6306 return false;
6307 }
6308
6309 return true;
6310}
6311
6312static int __io_sqe_files_update(struct io_ring_ctx *ctx,
6313 struct io_uring_files_update *up,
6314 unsigned nr_args)
6315{
6316 struct fixed_file_data *data = ctx->file_data;
6317 bool ref_switch = false;
6318 struct file *file;
c3a31e60
JA
6319 __s32 __user *fds;
6320 int fd, i, err;
6321 __u32 done;
6322
05f3fb3c 6323 if (check_add_overflow(up->offset, nr_args, &done))
c3a31e60
JA
6324 return -EOVERFLOW;
6325 if (done > ctx->nr_user_files)
6326 return -EINVAL;
6327
6328 done = 0;
05f3fb3c 6329 fds = u64_to_user_ptr(up->fds);
c3a31e60 6330 while (nr_args) {
65e19f54
JA
6331 struct fixed_file_table *table;
6332 unsigned index;
6333
c3a31e60
JA
6334 err = 0;
6335 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
6336 err = -EFAULT;
6337 break;
6338 }
05f3fb3c
JA
6339 i = array_index_nospec(up->offset, ctx->nr_user_files);
6340 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54
JA
6341 index = i & IORING_FILE_TABLE_MASK;
6342 if (table->files[index]) {
05f3fb3c 6343 file = io_file_from_index(ctx, index);
65e19f54 6344 table->files[index] = NULL;
05f3fb3c
JA
6345 if (io_queue_file_removal(data, file))
6346 ref_switch = true;
c3a31e60
JA
6347 }
6348 if (fd != -1) {
c3a31e60
JA
6349 file = fget(fd);
6350 if (!file) {
6351 err = -EBADF;
6352 break;
6353 }
6354 /*
6355 * Don't allow io_uring instances to be registered. If
6356 * UNIX isn't enabled, then this causes a reference
6357 * cycle and this instance can never get freed. If UNIX
6358 * is enabled we'll handle it just fine, but there's
6359 * still no point in allowing a ring fd as it doesn't
6360 * support regular read/write anyway.
6361 */
6362 if (file->f_op == &io_uring_fops) {
6363 fput(file);
6364 err = -EBADF;
6365 break;
6366 }
65e19f54 6367 table->files[index] = file;
c3a31e60
JA
6368 err = io_sqe_file_register(ctx, file, i);
6369 if (err)
6370 break;
6371 }
6372 nr_args--;
6373 done++;
05f3fb3c
JA
6374 up->offset++;
6375 }
6376
dd3db2a3 6377 if (ref_switch)
05f3fb3c 6378 percpu_ref_switch_to_atomic(&data->refs, io_atomic_switch);
c3a31e60
JA
6379
6380 return done ? done : err;
6381}
05f3fb3c
JA
6382static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
6383 unsigned nr_args)
6384{
6385 struct io_uring_files_update up;
6386
6387 if (!ctx->file_data)
6388 return -ENXIO;
6389 if (!nr_args)
6390 return -EINVAL;
6391 if (copy_from_user(&up, arg, sizeof(up)))
6392 return -EFAULT;
6393 if (up.resv)
6394 return -EINVAL;
6395
6396 return __io_sqe_files_update(ctx, &up, nr_args);
6397}
c3a31e60 6398
e9fd9396 6399static void io_free_work(struct io_wq_work *work)
7d723065
JA
6400{
6401 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6402
e9fd9396 6403 /* Consider that io_steal_work() relies on this ref */
7d723065
JA
6404 io_put_req(req);
6405}
6406
24369c2e
PB
6407static int io_init_wq_offload(struct io_ring_ctx *ctx,
6408 struct io_uring_params *p)
6409{
6410 struct io_wq_data data;
6411 struct fd f;
6412 struct io_ring_ctx *ctx_attach;
6413 unsigned int concurrency;
6414 int ret = 0;
6415
6416 data.user = ctx->user;
e9fd9396 6417 data.free_work = io_free_work;
24369c2e
PB
6418
6419 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
6420 /* Do QD, or 4 * CPUS, whatever is smallest */
6421 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
6422
6423 ctx->io_wq = io_wq_create(concurrency, &data);
6424 if (IS_ERR(ctx->io_wq)) {
6425 ret = PTR_ERR(ctx->io_wq);
6426 ctx->io_wq = NULL;
6427 }
6428 return ret;
6429 }
6430
6431 f = fdget(p->wq_fd);
6432 if (!f.file)
6433 return -EBADF;
6434
6435 if (f.file->f_op != &io_uring_fops) {
6436 ret = -EINVAL;
6437 goto out_fput;
6438 }
6439
6440 ctx_attach = f.file->private_data;
6441 /* @io_wq is protected by holding the fd */
6442 if (!io_wq_get(ctx_attach->io_wq, &data)) {
6443 ret = -EINVAL;
6444 goto out_fput;
6445 }
6446
6447 ctx->io_wq = ctx_attach->io_wq;
6448out_fput:
6449 fdput(f);
6450 return ret;
6451}
6452
6c271ce2
JA
6453static int io_sq_offload_start(struct io_ring_ctx *ctx,
6454 struct io_uring_params *p)
2b188cc1
JA
6455{
6456 int ret;
6457
6c271ce2 6458 init_waitqueue_head(&ctx->sqo_wait);
2b188cc1
JA
6459 mmgrab(current->mm);
6460 ctx->sqo_mm = current->mm;
6461
6c271ce2 6462 if (ctx->flags & IORING_SETUP_SQPOLL) {
3ec482d1
JA
6463 ret = -EPERM;
6464 if (!capable(CAP_SYS_ADMIN))
6465 goto err;
6466
917257da
JA
6467 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
6468 if (!ctx->sq_thread_idle)
6469 ctx->sq_thread_idle = HZ;
6470
6c271ce2 6471 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 6472 int cpu = p->sq_thread_cpu;
6c271ce2 6473
917257da 6474 ret = -EINVAL;
44a9bd18
JA
6475 if (cpu >= nr_cpu_ids)
6476 goto err;
7889f44d 6477 if (!cpu_online(cpu))
917257da
JA
6478 goto err;
6479
6c271ce2
JA
6480 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
6481 ctx, cpu,
6482 "io_uring-sq");
6483 } else {
6484 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
6485 "io_uring-sq");
6486 }
6487 if (IS_ERR(ctx->sqo_thread)) {
6488 ret = PTR_ERR(ctx->sqo_thread);
6489 ctx->sqo_thread = NULL;
6490 goto err;
6491 }
6492 wake_up_process(ctx->sqo_thread);
6493 } else if (p->flags & IORING_SETUP_SQ_AFF) {
6494 /* Can't have SQ_AFF without SQPOLL */
6495 ret = -EINVAL;
6496 goto err;
6497 }
6498
24369c2e
PB
6499 ret = io_init_wq_offload(ctx, p);
6500 if (ret)
2b188cc1 6501 goto err;
2b188cc1
JA
6502
6503 return 0;
6504err:
54a91f3b 6505 io_finish_async(ctx);
2b188cc1
JA
6506 mmdrop(ctx->sqo_mm);
6507 ctx->sqo_mm = NULL;
6508 return ret;
6509}
6510
6511static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
6512{
6513 atomic_long_sub(nr_pages, &user->locked_vm);
6514}
6515
6516static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
6517{
6518 unsigned long page_limit, cur_pages, new_pages;
6519
6520 /* Don't allow more pages than we can safely lock */
6521 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
6522
6523 do {
6524 cur_pages = atomic_long_read(&user->locked_vm);
6525 new_pages = cur_pages + nr_pages;
6526 if (new_pages > page_limit)
6527 return -ENOMEM;
6528 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
6529 new_pages) != cur_pages);
6530
6531 return 0;
6532}
6533
6534static void io_mem_free(void *ptr)
6535{
52e04ef4
MR
6536 struct page *page;
6537
6538 if (!ptr)
6539 return;
2b188cc1 6540
52e04ef4 6541 page = virt_to_head_page(ptr);
2b188cc1
JA
6542 if (put_page_testzero(page))
6543 free_compound_page(page);
6544}
6545
6546static void *io_mem_alloc(size_t size)
6547{
6548 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
6549 __GFP_NORETRY;
6550
6551 return (void *) __get_free_pages(gfp_flags, get_order(size));
6552}
6553
75b28aff
HV
6554static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
6555 size_t *sq_offset)
6556{
6557 struct io_rings *rings;
6558 size_t off, sq_array_size;
6559
6560 off = struct_size(rings, cqes, cq_entries);
6561 if (off == SIZE_MAX)
6562 return SIZE_MAX;
6563
6564#ifdef CONFIG_SMP
6565 off = ALIGN(off, SMP_CACHE_BYTES);
6566 if (off == 0)
6567 return SIZE_MAX;
6568#endif
6569
6570 sq_array_size = array_size(sizeof(u32), sq_entries);
6571 if (sq_array_size == SIZE_MAX)
6572 return SIZE_MAX;
6573
6574 if (check_add_overflow(off, sq_array_size, &off))
6575 return SIZE_MAX;
6576
6577 if (sq_offset)
6578 *sq_offset = off;
6579
6580 return off;
6581}
6582
2b188cc1
JA
6583static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
6584{
75b28aff 6585 size_t pages;
2b188cc1 6586
75b28aff
HV
6587 pages = (size_t)1 << get_order(
6588 rings_size(sq_entries, cq_entries, NULL));
6589 pages += (size_t)1 << get_order(
6590 array_size(sizeof(struct io_uring_sqe), sq_entries));
2b188cc1 6591
75b28aff 6592 return pages;
2b188cc1
JA
6593}
6594
edafccee
JA
6595static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
6596{
6597 int i, j;
6598
6599 if (!ctx->user_bufs)
6600 return -ENXIO;
6601
6602 for (i = 0; i < ctx->nr_user_bufs; i++) {
6603 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
6604
6605 for (j = 0; j < imu->nr_bvecs; j++)
f1f6a7dd 6606 unpin_user_page(imu->bvec[j].bv_page);
edafccee
JA
6607
6608 if (ctx->account_mem)
6609 io_unaccount_mem(ctx->user, imu->nr_bvecs);
d4ef6475 6610 kvfree(imu->bvec);
edafccee
JA
6611 imu->nr_bvecs = 0;
6612 }
6613
6614 kfree(ctx->user_bufs);
6615 ctx->user_bufs = NULL;
6616 ctx->nr_user_bufs = 0;
6617 return 0;
6618}
6619
6620static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
6621 void __user *arg, unsigned index)
6622{
6623 struct iovec __user *src;
6624
6625#ifdef CONFIG_COMPAT
6626 if (ctx->compat) {
6627 struct compat_iovec __user *ciovs;
6628 struct compat_iovec ciov;
6629
6630 ciovs = (struct compat_iovec __user *) arg;
6631 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
6632 return -EFAULT;
6633
d55e5f5b 6634 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
6635 dst->iov_len = ciov.iov_len;
6636 return 0;
6637 }
6638#endif
6639 src = (struct iovec __user *) arg;
6640 if (copy_from_user(dst, &src[index], sizeof(*dst)))
6641 return -EFAULT;
6642 return 0;
6643}
6644
6645static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
6646 unsigned nr_args)
6647{
6648 struct vm_area_struct **vmas = NULL;
6649 struct page **pages = NULL;
6650 int i, j, got_pages = 0;
6651 int ret = -EINVAL;
6652
6653 if (ctx->user_bufs)
6654 return -EBUSY;
6655 if (!nr_args || nr_args > UIO_MAXIOV)
6656 return -EINVAL;
6657
6658 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
6659 GFP_KERNEL);
6660 if (!ctx->user_bufs)
6661 return -ENOMEM;
6662
6663 for (i = 0; i < nr_args; i++) {
6664 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
6665 unsigned long off, start, end, ubuf;
6666 int pret, nr_pages;
6667 struct iovec iov;
6668 size_t size;
6669
6670 ret = io_copy_iov(ctx, &iov, arg, i);
6671 if (ret)
a278682d 6672 goto err;
edafccee
JA
6673
6674 /*
6675 * Don't impose further limits on the size and buffer
6676 * constraints here, we'll -EINVAL later when IO is
6677 * submitted if they are wrong.
6678 */
6679 ret = -EFAULT;
6680 if (!iov.iov_base || !iov.iov_len)
6681 goto err;
6682
6683 /* arbitrary limit, but we need something */
6684 if (iov.iov_len > SZ_1G)
6685 goto err;
6686
6687 ubuf = (unsigned long) iov.iov_base;
6688 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
6689 start = ubuf >> PAGE_SHIFT;
6690 nr_pages = end - start;
6691
6692 if (ctx->account_mem) {
6693 ret = io_account_mem(ctx->user, nr_pages);
6694 if (ret)
6695 goto err;
6696 }
6697
6698 ret = 0;
6699 if (!pages || nr_pages > got_pages) {
6700 kfree(vmas);
6701 kfree(pages);
d4ef6475 6702 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
edafccee 6703 GFP_KERNEL);
d4ef6475 6704 vmas = kvmalloc_array(nr_pages,
edafccee
JA
6705 sizeof(struct vm_area_struct *),
6706 GFP_KERNEL);
6707 if (!pages || !vmas) {
6708 ret = -ENOMEM;
6709 if (ctx->account_mem)
6710 io_unaccount_mem(ctx->user, nr_pages);
6711 goto err;
6712 }
6713 got_pages = nr_pages;
6714 }
6715
d4ef6475 6716 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
edafccee
JA
6717 GFP_KERNEL);
6718 ret = -ENOMEM;
6719 if (!imu->bvec) {
6720 if (ctx->account_mem)
6721 io_unaccount_mem(ctx->user, nr_pages);
6722 goto err;
6723 }
6724
6725 ret = 0;
6726 down_read(&current->mm->mmap_sem);
2113b05d 6727 pret = pin_user_pages(ubuf, nr_pages,
932f4a63
IW
6728 FOLL_WRITE | FOLL_LONGTERM,
6729 pages, vmas);
edafccee
JA
6730 if (pret == nr_pages) {
6731 /* don't support file backed memory */
6732 for (j = 0; j < nr_pages; j++) {
6733 struct vm_area_struct *vma = vmas[j];
6734
6735 if (vma->vm_file &&
6736 !is_file_hugepages(vma->vm_file)) {
6737 ret = -EOPNOTSUPP;
6738 break;
6739 }
6740 }
6741 } else {
6742 ret = pret < 0 ? pret : -EFAULT;
6743 }
6744 up_read(&current->mm->mmap_sem);
6745 if (ret) {
6746 /*
6747 * if we did partial map, or found file backed vmas,
6748 * release any pages we did get
6749 */
27c4d3a3 6750 if (pret > 0)
f1f6a7dd 6751 unpin_user_pages(pages, pret);
edafccee
JA
6752 if (ctx->account_mem)
6753 io_unaccount_mem(ctx->user, nr_pages);
d4ef6475 6754 kvfree(imu->bvec);
edafccee
JA
6755 goto err;
6756 }
6757
6758 off = ubuf & ~PAGE_MASK;
6759 size = iov.iov_len;
6760 for (j = 0; j < nr_pages; j++) {
6761 size_t vec_len;
6762
6763 vec_len = min_t(size_t, size, PAGE_SIZE - off);
6764 imu->bvec[j].bv_page = pages[j];
6765 imu->bvec[j].bv_len = vec_len;
6766 imu->bvec[j].bv_offset = off;
6767 off = 0;
6768 size -= vec_len;
6769 }
6770 /* store original address for later verification */
6771 imu->ubuf = ubuf;
6772 imu->len = iov.iov_len;
6773 imu->nr_bvecs = nr_pages;
6774
6775 ctx->nr_user_bufs++;
6776 }
d4ef6475
MR
6777 kvfree(pages);
6778 kvfree(vmas);
edafccee
JA
6779 return 0;
6780err:
d4ef6475
MR
6781 kvfree(pages);
6782 kvfree(vmas);
edafccee
JA
6783 io_sqe_buffer_unregister(ctx);
6784 return ret;
6785}
6786
9b402849
JA
6787static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
6788{
6789 __s32 __user *fds = arg;
6790 int fd;
6791
6792 if (ctx->cq_ev_fd)
6793 return -EBUSY;
6794
6795 if (copy_from_user(&fd, fds, sizeof(*fds)))
6796 return -EFAULT;
6797
6798 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
6799 if (IS_ERR(ctx->cq_ev_fd)) {
6800 int ret = PTR_ERR(ctx->cq_ev_fd);
6801 ctx->cq_ev_fd = NULL;
6802 return ret;
6803 }
6804
6805 return 0;
6806}
6807
6808static int io_eventfd_unregister(struct io_ring_ctx *ctx)
6809{
6810 if (ctx->cq_ev_fd) {
6811 eventfd_ctx_put(ctx->cq_ev_fd);
6812 ctx->cq_ev_fd = NULL;
6813 return 0;
6814 }
6815
6816 return -ENXIO;
6817}
6818
5a2e745d
JA
6819static int __io_destroy_buffers(int id, void *p, void *data)
6820{
6821 struct io_ring_ctx *ctx = data;
6822 struct io_buffer *buf = p;
6823
6824 /* the head kbuf is the list itself */
6825 while (!list_empty(&buf->list)) {
6826 struct io_buffer *nxt;
6827
6828 nxt = list_first_entry(&buf->list, struct io_buffer, list);
6829 list_del(&nxt->list);
6830 kfree(nxt);
6831 }
6832 kfree(buf);
6833 idr_remove(&ctx->io_buffer_idr, id);
6834 return 0;
6835}
6836
6837static void io_destroy_buffers(struct io_ring_ctx *ctx)
6838{
6839 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
6840 idr_destroy(&ctx->io_buffer_idr);
6841}
6842
2b188cc1
JA
6843static void io_ring_ctx_free(struct io_ring_ctx *ctx)
6844{
6b06314c 6845 io_finish_async(ctx);
2b188cc1
JA
6846 if (ctx->sqo_mm)
6847 mmdrop(ctx->sqo_mm);
def596e9
JA
6848
6849 io_iopoll_reap_events(ctx);
edafccee 6850 io_sqe_buffer_unregister(ctx);
6b06314c 6851 io_sqe_files_unregister(ctx);
9b402849 6852 io_eventfd_unregister(ctx);
5a2e745d 6853 io_destroy_buffers(ctx);
41726c9a 6854 idr_destroy(&ctx->personality_idr);
def596e9 6855
2b188cc1 6856#if defined(CONFIG_UNIX)
355e8d26
EB
6857 if (ctx->ring_sock) {
6858 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 6859 sock_release(ctx->ring_sock);
355e8d26 6860 }
2b188cc1
JA
6861#endif
6862
75b28aff 6863 io_mem_free(ctx->rings);
2b188cc1 6864 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
6865
6866 percpu_ref_exit(&ctx->refs);
6867 if (ctx->account_mem)
6868 io_unaccount_mem(ctx->user,
6869 ring_pages(ctx->sq_entries, ctx->cq_entries));
6870 free_uid(ctx->user);
181e448d 6871 put_cred(ctx->creds);
206aefde 6872 kfree(ctx->completions);
78076bb6 6873 kfree(ctx->cancel_hash);
0ddf92e8 6874 kmem_cache_free(req_cachep, ctx->fallback_req);
2b188cc1
JA
6875 kfree(ctx);
6876}
6877
6878static __poll_t io_uring_poll(struct file *file, poll_table *wait)
6879{
6880 struct io_ring_ctx *ctx = file->private_data;
6881 __poll_t mask = 0;
6882
6883 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
6884 /*
6885 * synchronizes with barrier from wq_has_sleeper call in
6886 * io_commit_cqring
6887 */
2b188cc1 6888 smp_rmb();
75b28aff
HV
6889 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
6890 ctx->rings->sq_ring_entries)
2b188cc1 6891 mask |= EPOLLOUT | EPOLLWRNORM;
63e5d81f 6892 if (io_cqring_events(ctx, false))
2b188cc1
JA
6893 mask |= EPOLLIN | EPOLLRDNORM;
6894
6895 return mask;
6896}
6897
6898static int io_uring_fasync(int fd, struct file *file, int on)
6899{
6900 struct io_ring_ctx *ctx = file->private_data;
6901
6902 return fasync_helper(fd, file, on, &ctx->cq_fasync);
6903}
6904
071698e1
JA
6905static int io_remove_personalities(int id, void *p, void *data)
6906{
6907 struct io_ring_ctx *ctx = data;
6908 const struct cred *cred;
6909
6910 cred = idr_remove(&ctx->personality_idr, id);
6911 if (cred)
6912 put_cred(cred);
6913 return 0;
6914}
6915
2b188cc1
JA
6916static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
6917{
6918 mutex_lock(&ctx->uring_lock);
6919 percpu_ref_kill(&ctx->refs);
6920 mutex_unlock(&ctx->uring_lock);
6921
df069d80
JA
6922 /*
6923 * Wait for sq thread to idle, if we have one. It won't spin on new
6924 * work after we've killed the ctx ref above. This is important to do
6925 * before we cancel existing commands, as the thread could otherwise
6926 * be queueing new work post that. If that's work we need to cancel,
6927 * it could cause shutdown to hang.
6928 */
6929 while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait))
6930 cpu_relax();
6931
5262f567 6932 io_kill_timeouts(ctx);
221c5eb2 6933 io_poll_remove_all(ctx);
561fb04a
JA
6934
6935 if (ctx->io_wq)
6936 io_wq_cancel_all(ctx->io_wq);
6937
def596e9 6938 io_iopoll_reap_events(ctx);
15dff286
JA
6939 /* if we failed setting up the ctx, we might not have any rings */
6940 if (ctx->rings)
6941 io_cqring_overflow_flush(ctx, true);
071698e1 6942 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
206aefde 6943 wait_for_completion(&ctx->completions[0]);
2b188cc1
JA
6944 io_ring_ctx_free(ctx);
6945}
6946
6947static int io_uring_release(struct inode *inode, struct file *file)
6948{
6949 struct io_ring_ctx *ctx = file->private_data;
6950
6951 file->private_data = NULL;
6952 io_ring_ctx_wait_and_kill(ctx);
6953 return 0;
6954}
6955
fcb323cc
JA
6956static void io_uring_cancel_files(struct io_ring_ctx *ctx,
6957 struct files_struct *files)
6958{
6959 struct io_kiocb *req;
6960 DEFINE_WAIT(wait);
6961
6962 while (!list_empty_careful(&ctx->inflight_list)) {
768134d4 6963 struct io_kiocb *cancel_req = NULL;
fcb323cc
JA
6964
6965 spin_lock_irq(&ctx->inflight_lock);
6966 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
768134d4
JA
6967 if (req->work.files != files)
6968 continue;
6969 /* req is being completed, ignore */
6970 if (!refcount_inc_not_zero(&req->refs))
6971 continue;
6972 cancel_req = req;
6973 break;
fcb323cc 6974 }
768134d4 6975 if (cancel_req)
fcb323cc 6976 prepare_to_wait(&ctx->inflight_wait, &wait,
768134d4 6977 TASK_UNINTERRUPTIBLE);
fcb323cc
JA
6978 spin_unlock_irq(&ctx->inflight_lock);
6979
768134d4
JA
6980 /* We need to keep going until we don't find a matching req */
6981 if (!cancel_req)
fcb323cc 6982 break;
2f6d9b9d 6983
2ca10259
JA
6984 if (cancel_req->flags & REQ_F_OVERFLOW) {
6985 spin_lock_irq(&ctx->completion_lock);
6986 list_del(&cancel_req->list);
6987 cancel_req->flags &= ~REQ_F_OVERFLOW;
6988 if (list_empty(&ctx->cq_overflow_list)) {
6989 clear_bit(0, &ctx->sq_check_overflow);
6990 clear_bit(0, &ctx->cq_check_overflow);
6991 }
6992 spin_unlock_irq(&ctx->completion_lock);
6993
6994 WRITE_ONCE(ctx->rings->cq_overflow,
6995 atomic_inc_return(&ctx->cached_cq_overflow));
6996
6997 /*
6998 * Put inflight ref and overflow ref. If that's
6999 * all we had, then we're done with this request.
7000 */
7001 if (refcount_sub_and_test(2, &cancel_req->refs)) {
7002 io_put_req(cancel_req);
7003 continue;
7004 }
7005 }
7006
2f6d9b9d
BL
7007 io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
7008 io_put_req(cancel_req);
fcb323cc
JA
7009 schedule();
7010 }
768134d4 7011 finish_wait(&ctx->inflight_wait, &wait);
fcb323cc
JA
7012}
7013
7014static int io_uring_flush(struct file *file, void *data)
7015{
7016 struct io_ring_ctx *ctx = file->private_data;
7017
7018 io_uring_cancel_files(ctx, data);
6ab23144
JA
7019
7020 /*
7021 * If the task is going away, cancel work it may have pending
7022 */
7023 if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
7024 io_wq_cancel_pid(ctx->io_wq, task_pid_vnr(current));
7025
fcb323cc
JA
7026 return 0;
7027}
7028
6c5c240e
RP
7029static void *io_uring_validate_mmap_request(struct file *file,
7030 loff_t pgoff, size_t sz)
2b188cc1 7031{
2b188cc1 7032 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 7033 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
7034 struct page *page;
7035 void *ptr;
7036
7037 switch (offset) {
7038 case IORING_OFF_SQ_RING:
75b28aff
HV
7039 case IORING_OFF_CQ_RING:
7040 ptr = ctx->rings;
2b188cc1
JA
7041 break;
7042 case IORING_OFF_SQES:
7043 ptr = ctx->sq_sqes;
7044 break;
2b188cc1 7045 default:
6c5c240e 7046 return ERR_PTR(-EINVAL);
2b188cc1
JA
7047 }
7048
7049 page = virt_to_head_page(ptr);
a50b854e 7050 if (sz > page_size(page))
6c5c240e
RP
7051 return ERR_PTR(-EINVAL);
7052
7053 return ptr;
7054}
7055
7056#ifdef CONFIG_MMU
7057
7058static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7059{
7060 size_t sz = vma->vm_end - vma->vm_start;
7061 unsigned long pfn;
7062 void *ptr;
7063
7064 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
7065 if (IS_ERR(ptr))
7066 return PTR_ERR(ptr);
2b188cc1
JA
7067
7068 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
7069 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
7070}
7071
6c5c240e
RP
7072#else /* !CONFIG_MMU */
7073
7074static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7075{
7076 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
7077}
7078
7079static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
7080{
7081 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
7082}
7083
7084static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
7085 unsigned long addr, unsigned long len,
7086 unsigned long pgoff, unsigned long flags)
7087{
7088 void *ptr;
7089
7090 ptr = io_uring_validate_mmap_request(file, pgoff, len);
7091 if (IS_ERR(ptr))
7092 return PTR_ERR(ptr);
7093
7094 return (unsigned long) ptr;
7095}
7096
7097#endif /* !CONFIG_MMU */
7098
2b188cc1
JA
7099SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
7100 u32, min_complete, u32, flags, const sigset_t __user *, sig,
7101 size_t, sigsz)
7102{
7103 struct io_ring_ctx *ctx;
7104 long ret = -EBADF;
7105 int submitted = 0;
7106 struct fd f;
7107
b41e9852
JA
7108 if (current->task_works)
7109 task_work_run();
7110
6c271ce2 7111 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
2b188cc1
JA
7112 return -EINVAL;
7113
7114 f = fdget(fd);
7115 if (!f.file)
7116 return -EBADF;
7117
7118 ret = -EOPNOTSUPP;
7119 if (f.file->f_op != &io_uring_fops)
7120 goto out_fput;
7121
7122 ret = -ENXIO;
7123 ctx = f.file->private_data;
7124 if (!percpu_ref_tryget(&ctx->refs))
7125 goto out_fput;
7126
6c271ce2
JA
7127 /*
7128 * For SQ polling, the thread will do all submissions and completions.
7129 * Just return the requested submit count, and wake the thread if
7130 * we were asked to.
7131 */
b2a9eada 7132 ret = 0;
6c271ce2 7133 if (ctx->flags & IORING_SETUP_SQPOLL) {
c1edbf5f
JA
7134 if (!list_empty_careful(&ctx->cq_overflow_list))
7135 io_cqring_overflow_flush(ctx, false);
6c271ce2
JA
7136 if (flags & IORING_ENTER_SQ_WAKEUP)
7137 wake_up(&ctx->sqo_wait);
7138 submitted = to_submit;
b2a9eada 7139 } else if (to_submit) {
ae9428ca 7140 struct mm_struct *cur_mm;
2b188cc1
JA
7141
7142 mutex_lock(&ctx->uring_lock);
ae9428ca
PB
7143 /* already have mm, so io_submit_sqes() won't try to grab it */
7144 cur_mm = ctx->sqo_mm;
7145 submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
7146 &cur_mm, false);
2b188cc1 7147 mutex_unlock(&ctx->uring_lock);
7c504e65
PB
7148
7149 if (submitted != to_submit)
7150 goto out;
2b188cc1
JA
7151 }
7152 if (flags & IORING_ENTER_GETEVENTS) {
def596e9
JA
7153 unsigned nr_events = 0;
7154
2b188cc1
JA
7155 min_complete = min(min_complete, ctx->cq_entries);
7156
def596e9 7157 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9 7158 ret = io_iopoll_check(ctx, &nr_events, min_complete);
def596e9
JA
7159 } else {
7160 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
7161 }
2b188cc1
JA
7162 }
7163
7c504e65 7164out:
6805b32e 7165 percpu_ref_put(&ctx->refs);
2b188cc1
JA
7166out_fput:
7167 fdput(f);
7168 return submitted ? submitted : ret;
7169}
7170
bebdb65e 7171#ifdef CONFIG_PROC_FS
87ce955b
JA
7172static int io_uring_show_cred(int id, void *p, void *data)
7173{
7174 const struct cred *cred = p;
7175 struct seq_file *m = data;
7176 struct user_namespace *uns = seq_user_ns(m);
7177 struct group_info *gi;
7178 kernel_cap_t cap;
7179 unsigned __capi;
7180 int g;
7181
7182 seq_printf(m, "%5d\n", id);
7183 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
7184 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
7185 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
7186 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
7187 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
7188 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
7189 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
7190 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
7191 seq_puts(m, "\n\tGroups:\t");
7192 gi = cred->group_info;
7193 for (g = 0; g < gi->ngroups; g++) {
7194 seq_put_decimal_ull(m, g ? " " : "",
7195 from_kgid_munged(uns, gi->gid[g]));
7196 }
7197 seq_puts(m, "\n\tCapEff:\t");
7198 cap = cred->cap_effective;
7199 CAP_FOR_EACH_U32(__capi)
7200 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
7201 seq_putc(m, '\n');
7202 return 0;
7203}
7204
7205static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
7206{
7207 int i;
7208
7209 mutex_lock(&ctx->uring_lock);
7210 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
7211 for (i = 0; i < ctx->nr_user_files; i++) {
7212 struct fixed_file_table *table;
7213 struct file *f;
7214
7215 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7216 f = table->files[i & IORING_FILE_TABLE_MASK];
7217 if (f)
7218 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
7219 else
7220 seq_printf(m, "%5u: <none>\n", i);
7221 }
7222 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
7223 for (i = 0; i < ctx->nr_user_bufs; i++) {
7224 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
7225
7226 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
7227 (unsigned int) buf->len);
7228 }
7229 if (!idr_is_empty(&ctx->personality_idr)) {
7230 seq_printf(m, "Personalities:\n");
7231 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
7232 }
d7718a9d
JA
7233 seq_printf(m, "PollList:\n");
7234 spin_lock_irq(&ctx->completion_lock);
7235 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
7236 struct hlist_head *list = &ctx->cancel_hash[i];
7237 struct io_kiocb *req;
7238
7239 hlist_for_each_entry(req, list, hash_node)
7240 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
7241 req->task->task_works != NULL);
7242 }
7243 spin_unlock_irq(&ctx->completion_lock);
87ce955b
JA
7244 mutex_unlock(&ctx->uring_lock);
7245}
7246
7247static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
7248{
7249 struct io_ring_ctx *ctx = f->private_data;
7250
7251 if (percpu_ref_tryget(&ctx->refs)) {
7252 __io_uring_show_fdinfo(ctx, m);
7253 percpu_ref_put(&ctx->refs);
7254 }
7255}
bebdb65e 7256#endif
87ce955b 7257
2b188cc1
JA
7258static const struct file_operations io_uring_fops = {
7259 .release = io_uring_release,
fcb323cc 7260 .flush = io_uring_flush,
2b188cc1 7261 .mmap = io_uring_mmap,
6c5c240e
RP
7262#ifndef CONFIG_MMU
7263 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
7264 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
7265#endif
2b188cc1
JA
7266 .poll = io_uring_poll,
7267 .fasync = io_uring_fasync,
bebdb65e 7268#ifdef CONFIG_PROC_FS
87ce955b 7269 .show_fdinfo = io_uring_show_fdinfo,
bebdb65e 7270#endif
2b188cc1
JA
7271};
7272
7273static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
7274 struct io_uring_params *p)
7275{
75b28aff
HV
7276 struct io_rings *rings;
7277 size_t size, sq_array_offset;
2b188cc1 7278
75b28aff
HV
7279 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
7280 if (size == SIZE_MAX)
7281 return -EOVERFLOW;
7282
7283 rings = io_mem_alloc(size);
7284 if (!rings)
2b188cc1
JA
7285 return -ENOMEM;
7286
75b28aff
HV
7287 ctx->rings = rings;
7288 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
7289 rings->sq_ring_mask = p->sq_entries - 1;
7290 rings->cq_ring_mask = p->cq_entries - 1;
7291 rings->sq_ring_entries = p->sq_entries;
7292 rings->cq_ring_entries = p->cq_entries;
7293 ctx->sq_mask = rings->sq_ring_mask;
7294 ctx->cq_mask = rings->cq_ring_mask;
7295 ctx->sq_entries = rings->sq_ring_entries;
7296 ctx->cq_entries = rings->cq_ring_entries;
2b188cc1
JA
7297
7298 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
7299 if (size == SIZE_MAX) {
7300 io_mem_free(ctx->rings);
7301 ctx->rings = NULL;
2b188cc1 7302 return -EOVERFLOW;
eb065d30 7303 }
2b188cc1
JA
7304
7305 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
7306 if (!ctx->sq_sqes) {
7307 io_mem_free(ctx->rings);
7308 ctx->rings = NULL;
2b188cc1 7309 return -ENOMEM;
eb065d30 7310 }
2b188cc1 7311
2b188cc1
JA
7312 return 0;
7313}
7314
7315/*
7316 * Allocate an anonymous fd, this is what constitutes the application
7317 * visible backing of an io_uring instance. The application mmaps this
7318 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
7319 * we have to tie this fd to a socket for file garbage collection purposes.
7320 */
7321static int io_uring_get_fd(struct io_ring_ctx *ctx)
7322{
7323 struct file *file;
7324 int ret;
7325
7326#if defined(CONFIG_UNIX)
7327 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
7328 &ctx->ring_sock);
7329 if (ret)
7330 return ret;
7331#endif
7332
7333 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
7334 if (ret < 0)
7335 goto err;
7336
7337 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
7338 O_RDWR | O_CLOEXEC);
7339 if (IS_ERR(file)) {
7340 put_unused_fd(ret);
7341 ret = PTR_ERR(file);
7342 goto err;
7343 }
7344
7345#if defined(CONFIG_UNIX)
7346 ctx->ring_sock->file = file;
7347#endif
7348 fd_install(ret, file);
7349 return ret;
7350err:
7351#if defined(CONFIG_UNIX)
7352 sock_release(ctx->ring_sock);
7353 ctx->ring_sock = NULL;
7354#endif
7355 return ret;
7356}
7357
7358static int io_uring_create(unsigned entries, struct io_uring_params *p)
7359{
7360 struct user_struct *user = NULL;
7361 struct io_ring_ctx *ctx;
7362 bool account_mem;
7363 int ret;
7364
8110c1a6 7365 if (!entries)
2b188cc1 7366 return -EINVAL;
8110c1a6
JA
7367 if (entries > IORING_MAX_ENTRIES) {
7368 if (!(p->flags & IORING_SETUP_CLAMP))
7369 return -EINVAL;
7370 entries = IORING_MAX_ENTRIES;
7371 }
2b188cc1
JA
7372
7373 /*
7374 * Use twice as many entries for the CQ ring. It's possible for the
7375 * application to drive a higher depth than the size of the SQ ring,
7376 * since the sqes are only used at submission time. This allows for
33a107f0
JA
7377 * some flexibility in overcommitting a bit. If the application has
7378 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
7379 * of CQ ring entries manually.
2b188cc1
JA
7380 */
7381 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
7382 if (p->flags & IORING_SETUP_CQSIZE) {
7383 /*
7384 * If IORING_SETUP_CQSIZE is set, we do the same roundup
7385 * to a power-of-two, if it isn't already. We do NOT impose
7386 * any cq vs sq ring sizing.
7387 */
8110c1a6 7388 if (p->cq_entries < p->sq_entries)
33a107f0 7389 return -EINVAL;
8110c1a6
JA
7390 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
7391 if (!(p->flags & IORING_SETUP_CLAMP))
7392 return -EINVAL;
7393 p->cq_entries = IORING_MAX_CQ_ENTRIES;
7394 }
33a107f0
JA
7395 p->cq_entries = roundup_pow_of_two(p->cq_entries);
7396 } else {
7397 p->cq_entries = 2 * p->sq_entries;
7398 }
2b188cc1
JA
7399
7400 user = get_uid(current_user());
7401 account_mem = !capable(CAP_IPC_LOCK);
7402
7403 if (account_mem) {
7404 ret = io_account_mem(user,
7405 ring_pages(p->sq_entries, p->cq_entries));
7406 if (ret) {
7407 free_uid(user);
7408 return ret;
7409 }
7410 }
7411
7412 ctx = io_ring_ctx_alloc(p);
7413 if (!ctx) {
7414 if (account_mem)
7415 io_unaccount_mem(user, ring_pages(p->sq_entries,
7416 p->cq_entries));
7417 free_uid(user);
7418 return -ENOMEM;
7419 }
7420 ctx->compat = in_compat_syscall();
7421 ctx->account_mem = account_mem;
7422 ctx->user = user;
0b8c0ec7 7423 ctx->creds = get_current_cred();
2b188cc1
JA
7424
7425 ret = io_allocate_scq_urings(ctx, p);
7426 if (ret)
7427 goto err;
7428
6c271ce2 7429 ret = io_sq_offload_start(ctx, p);
2b188cc1
JA
7430 if (ret)
7431 goto err;
7432
2b188cc1 7433 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
7434 p->sq_off.head = offsetof(struct io_rings, sq.head);
7435 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
7436 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
7437 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
7438 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
7439 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
7440 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
7441
7442 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
7443 p->cq_off.head = offsetof(struct io_rings, cq.head);
7444 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
7445 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
7446 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
7447 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
7448 p->cq_off.cqes = offsetof(struct io_rings, cqes);
ac90f249 7449
044c1ab3
JA
7450 /*
7451 * Install ring fd as the very last thing, so we don't risk someone
7452 * having closed it before we finish setup
7453 */
7454 ret = io_uring_get_fd(ctx);
7455 if (ret < 0)
7456 goto err;
7457
da8c9690 7458 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
cccf0ee8 7459 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
d7718a9d 7460 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL;
c826bd7a 7461 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
7462 return ret;
7463err:
7464 io_ring_ctx_wait_and_kill(ctx);
7465 return ret;
7466}
7467
7468/*
7469 * Sets up an aio uring context, and returns the fd. Applications asks for a
7470 * ring size, we return the actual sq/cq ring sizes (among other things) in the
7471 * params structure passed in.
7472 */
7473static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
7474{
7475 struct io_uring_params p;
7476 long ret;
7477 int i;
7478
7479 if (copy_from_user(&p, params, sizeof(p)))
7480 return -EFAULT;
7481 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
7482 if (p.resv[i])
7483 return -EINVAL;
7484 }
7485
6c271ce2 7486 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 7487 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
24369c2e 7488 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ))
2b188cc1
JA
7489 return -EINVAL;
7490
7491 ret = io_uring_create(entries, &p);
7492 if (ret < 0)
7493 return ret;
7494
7495 if (copy_to_user(params, &p, sizeof(p)))
7496 return -EFAULT;
7497
7498 return ret;
7499}
7500
7501SYSCALL_DEFINE2(io_uring_setup, u32, entries,
7502 struct io_uring_params __user *, params)
7503{
7504 return io_uring_setup(entries, params);
7505}
7506
66f4af93
JA
7507static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
7508{
7509 struct io_uring_probe *p;
7510 size_t size;
7511 int i, ret;
7512
7513 size = struct_size(p, ops, nr_args);
7514 if (size == SIZE_MAX)
7515 return -EOVERFLOW;
7516 p = kzalloc(size, GFP_KERNEL);
7517 if (!p)
7518 return -ENOMEM;
7519
7520 ret = -EFAULT;
7521 if (copy_from_user(p, arg, size))
7522 goto out;
7523 ret = -EINVAL;
7524 if (memchr_inv(p, 0, size))
7525 goto out;
7526
7527 p->last_op = IORING_OP_LAST - 1;
7528 if (nr_args > IORING_OP_LAST)
7529 nr_args = IORING_OP_LAST;
7530
7531 for (i = 0; i < nr_args; i++) {
7532 p->ops[i].op = i;
7533 if (!io_op_defs[i].not_supported)
7534 p->ops[i].flags = IO_URING_OP_SUPPORTED;
7535 }
7536 p->ops_len = i;
7537
7538 ret = 0;
7539 if (copy_to_user(arg, p, size))
7540 ret = -EFAULT;
7541out:
7542 kfree(p);
7543 return ret;
7544}
7545
071698e1
JA
7546static int io_register_personality(struct io_ring_ctx *ctx)
7547{
7548 const struct cred *creds = get_current_cred();
7549 int id;
7550
7551 id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
7552 USHRT_MAX, GFP_KERNEL);
7553 if (id < 0)
7554 put_cred(creds);
7555 return id;
7556}
7557
7558static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
7559{
7560 const struct cred *old_creds;
7561
7562 old_creds = idr_remove(&ctx->personality_idr, id);
7563 if (old_creds) {
7564 put_cred(old_creds);
7565 return 0;
7566 }
7567
7568 return -EINVAL;
7569}
7570
7571static bool io_register_op_must_quiesce(int op)
7572{
7573 switch (op) {
7574 case IORING_UNREGISTER_FILES:
7575 case IORING_REGISTER_FILES_UPDATE:
7576 case IORING_REGISTER_PROBE:
7577 case IORING_REGISTER_PERSONALITY:
7578 case IORING_UNREGISTER_PERSONALITY:
7579 return false;
7580 default:
7581 return true;
7582 }
7583}
7584
edafccee
JA
7585static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
7586 void __user *arg, unsigned nr_args)
b19062a5
JA
7587 __releases(ctx->uring_lock)
7588 __acquires(ctx->uring_lock)
edafccee
JA
7589{
7590 int ret;
7591
35fa71a0
JA
7592 /*
7593 * We're inside the ring mutex, if the ref is already dying, then
7594 * someone else killed the ctx or is already going through
7595 * io_uring_register().
7596 */
7597 if (percpu_ref_is_dying(&ctx->refs))
7598 return -ENXIO;
7599
071698e1 7600 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 7601 percpu_ref_kill(&ctx->refs);
b19062a5 7602
05f3fb3c
JA
7603 /*
7604 * Drop uring mutex before waiting for references to exit. If
7605 * another thread is currently inside io_uring_enter() it might
7606 * need to grab the uring_lock to make progress. If we hold it
7607 * here across the drain wait, then we can deadlock. It's safe
7608 * to drop the mutex here, since no new references will come in
7609 * after we've killed the percpu ref.
7610 */
7611 mutex_unlock(&ctx->uring_lock);
c150368b 7612 ret = wait_for_completion_interruptible(&ctx->completions[0]);
05f3fb3c 7613 mutex_lock(&ctx->uring_lock);
c150368b
JA
7614 if (ret) {
7615 percpu_ref_resurrect(&ctx->refs);
7616 ret = -EINTR;
7617 goto out;
7618 }
05f3fb3c 7619 }
edafccee
JA
7620
7621 switch (opcode) {
7622 case IORING_REGISTER_BUFFERS:
7623 ret = io_sqe_buffer_register(ctx, arg, nr_args);
7624 break;
7625 case IORING_UNREGISTER_BUFFERS:
7626 ret = -EINVAL;
7627 if (arg || nr_args)
7628 break;
7629 ret = io_sqe_buffer_unregister(ctx);
7630 break;
6b06314c
JA
7631 case IORING_REGISTER_FILES:
7632 ret = io_sqe_files_register(ctx, arg, nr_args);
7633 break;
7634 case IORING_UNREGISTER_FILES:
7635 ret = -EINVAL;
7636 if (arg || nr_args)
7637 break;
7638 ret = io_sqe_files_unregister(ctx);
7639 break;
c3a31e60
JA
7640 case IORING_REGISTER_FILES_UPDATE:
7641 ret = io_sqe_files_update(ctx, arg, nr_args);
7642 break;
9b402849 7643 case IORING_REGISTER_EVENTFD:
f2842ab5 7644 case IORING_REGISTER_EVENTFD_ASYNC:
9b402849
JA
7645 ret = -EINVAL;
7646 if (nr_args != 1)
7647 break;
7648 ret = io_eventfd_register(ctx, arg);
f2842ab5
JA
7649 if (ret)
7650 break;
7651 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
7652 ctx->eventfd_async = 1;
7653 else
7654 ctx->eventfd_async = 0;
9b402849
JA
7655 break;
7656 case IORING_UNREGISTER_EVENTFD:
7657 ret = -EINVAL;
7658 if (arg || nr_args)
7659 break;
7660 ret = io_eventfd_unregister(ctx);
7661 break;
66f4af93
JA
7662 case IORING_REGISTER_PROBE:
7663 ret = -EINVAL;
7664 if (!arg || nr_args > 256)
7665 break;
7666 ret = io_probe(ctx, arg, nr_args);
7667 break;
071698e1
JA
7668 case IORING_REGISTER_PERSONALITY:
7669 ret = -EINVAL;
7670 if (arg || nr_args)
7671 break;
7672 ret = io_register_personality(ctx);
7673 break;
7674 case IORING_UNREGISTER_PERSONALITY:
7675 ret = -EINVAL;
7676 if (arg)
7677 break;
7678 ret = io_unregister_personality(ctx, nr_args);
7679 break;
edafccee
JA
7680 default:
7681 ret = -EINVAL;
7682 break;
7683 }
7684
071698e1 7685 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 7686 /* bring the ctx back to life */
05f3fb3c 7687 percpu_ref_reinit(&ctx->refs);
c150368b
JA
7688out:
7689 reinit_completion(&ctx->completions[0]);
05f3fb3c 7690 }
edafccee
JA
7691 return ret;
7692}
7693
7694SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
7695 void __user *, arg, unsigned int, nr_args)
7696{
7697 struct io_ring_ctx *ctx;
7698 long ret = -EBADF;
7699 struct fd f;
7700
7701 f = fdget(fd);
7702 if (!f.file)
7703 return -EBADF;
7704
7705 ret = -EOPNOTSUPP;
7706 if (f.file->f_op != &io_uring_fops)
7707 goto out_fput;
7708
7709 ctx = f.file->private_data;
7710
7711 mutex_lock(&ctx->uring_lock);
7712 ret = __io_uring_register(ctx, opcode, arg, nr_args);
7713 mutex_unlock(&ctx->uring_lock);
c826bd7a
DD
7714 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
7715 ctx->cq_ev_fd != NULL, ret);
edafccee
JA
7716out_fput:
7717 fdput(f);
7718 return ret;
7719}
7720
2b188cc1
JA
7721static int __init io_uring_init(void)
7722{
d7f62e82
SM
7723#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
7724 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
7725 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
7726} while (0)
7727
7728#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
7729 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
7730 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
7731 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
7732 BUILD_BUG_SQE_ELEM(1, __u8, flags);
7733 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
7734 BUILD_BUG_SQE_ELEM(4, __s32, fd);
7735 BUILD_BUG_SQE_ELEM(8, __u64, off);
7736 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
7737 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7d67af2c 7738 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
d7f62e82
SM
7739 BUILD_BUG_SQE_ELEM(24, __u32, len);
7740 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
7741 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
7742 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
7743 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
7744 BUILD_BUG_SQE_ELEM(28, __u16, poll_events);
7745 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
7746 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
7747 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
7748 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
7749 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
7750 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
7751 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
7752 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7d67af2c 7753 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
d7f62e82
SM
7754 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
7755 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
7756 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7d67af2c 7757 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
d7f62e82 7758
d3656344 7759 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
2b188cc1
JA
7760 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
7761 return 0;
7762};
7763__initcall(io_uring_init);