io_uring: fix missing 'return' in comment
[linux-block.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
52de1fe1 47#include <net/compat.h>
2b188cc1
JA
48#include <linux/refcount.h>
49#include <linux/uio.h>
6b47ee6e 50#include <linux/bits.h>
2b188cc1
JA
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
58#include <linux/mmu_context.h>
59#include <linux/percpu.h>
60#include <linux/slab.h>
6c271ce2 61#include <linux/kthread.h>
2b188cc1 62#include <linux/blkdev.h>
edafccee 63#include <linux/bvec.h>
2b188cc1
JA
64#include <linux/net.h>
65#include <net/sock.h>
66#include <net/af_unix.h>
6b06314c 67#include <net/scm.h>
2b188cc1
JA
68#include <linux/anon_inodes.h>
69#include <linux/sched/mm.h>
70#include <linux/uaccess.h>
71#include <linux/nospec.h>
edafccee
JA
72#include <linux/sizes.h>
73#include <linux/hugetlb.h>
aa4c3967 74#include <linux/highmem.h>
15b71abe
JA
75#include <linux/namei.h>
76#include <linux/fsnotify.h>
4840e418 77#include <linux/fadvise.h>
3e4827b0 78#include <linux/eventpoll.h>
ff002b30 79#include <linux/fs_struct.h>
7d67af2c 80#include <linux/splice.h>
b41e9852 81#include <linux/task_work.h>
2b188cc1 82
c826bd7a
DD
83#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
2b188cc1
JA
86#include <uapi/linux/io_uring.h>
87
88#include "internal.h"
561fb04a 89#include "io-wq.h"
2b188cc1 90
5277deaa 91#define IORING_MAX_ENTRIES 32768
33a107f0 92#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
65e19f54
JA
93
94/*
95 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
96 */
97#define IORING_FILE_TABLE_SHIFT 9
98#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
99#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
100#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
2b188cc1
JA
101
102struct io_uring {
103 u32 head ____cacheline_aligned_in_smp;
104 u32 tail ____cacheline_aligned_in_smp;
105};
106
1e84b97b 107/*
75b28aff
HV
108 * This data is shared with the application through the mmap at offsets
109 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
110 *
111 * The offsets to the member fields are published through struct
112 * io_sqring_offsets when calling io_uring_setup.
113 */
75b28aff 114struct io_rings {
1e84b97b
SB
115 /*
116 * Head and tail offsets into the ring; the offsets need to be
117 * masked to get valid indices.
118 *
75b28aff
HV
119 * The kernel controls head of the sq ring and the tail of the cq ring,
120 * and the application controls tail of the sq ring and the head of the
121 * cq ring.
1e84b97b 122 */
75b28aff 123 struct io_uring sq, cq;
1e84b97b 124 /*
75b28aff 125 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
126 * ring_entries - 1)
127 */
75b28aff
HV
128 u32 sq_ring_mask, cq_ring_mask;
129 /* Ring sizes (constant, power of 2) */
130 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
131 /*
132 * Number of invalid entries dropped by the kernel due to
133 * invalid index stored in array
134 *
135 * Written by the kernel, shouldn't be modified by the
136 * application (i.e. get number of "new events" by comparing to
137 * cached value).
138 *
139 * After a new SQ head value was read by the application this
140 * counter includes all submissions that were dropped reaching
141 * the new SQ head (and possibly more).
142 */
75b28aff 143 u32 sq_dropped;
1e84b97b
SB
144 /*
145 * Runtime flags
146 *
147 * Written by the kernel, shouldn't be modified by the
148 * application.
149 *
150 * The application needs a full memory barrier before checking
151 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
152 */
75b28aff 153 u32 sq_flags;
1e84b97b
SB
154 /*
155 * Number of completion events lost because the queue was full;
156 * this should be avoided by the application by making sure
0b4295b5 157 * there are not more requests pending than there is space in
1e84b97b
SB
158 * the completion queue.
159 *
160 * Written by the kernel, shouldn't be modified by the
161 * application (i.e. get number of "new events" by comparing to
162 * cached value).
163 *
164 * As completion events come in out of order this counter is not
165 * ordered with any other data.
166 */
75b28aff 167 u32 cq_overflow;
1e84b97b
SB
168 /*
169 * Ring buffer of completion events.
170 *
171 * The kernel writes completion events fresh every time they are
172 * produced, so the application is allowed to modify pending
173 * entries.
174 */
75b28aff 175 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
176};
177
edafccee
JA
178struct io_mapped_ubuf {
179 u64 ubuf;
180 size_t len;
181 struct bio_vec *bvec;
182 unsigned int nr_bvecs;
183};
184
65e19f54
JA
185struct fixed_file_table {
186 struct file **files;
31b51510
JA
187};
188
05f3fb3c
JA
189struct fixed_file_data {
190 struct fixed_file_table *table;
191 struct io_ring_ctx *ctx;
192
193 struct percpu_ref refs;
194 struct llist_head put_llist;
05f3fb3c
JA
195 struct work_struct ref_work;
196 struct completion done;
197};
198
5a2e745d
JA
199struct io_buffer {
200 struct list_head list;
201 __u64 addr;
202 __s32 len;
203 __u16 bid;
204};
205
2b188cc1
JA
206struct io_ring_ctx {
207 struct {
208 struct percpu_ref refs;
209 } ____cacheline_aligned_in_smp;
210
211 struct {
212 unsigned int flags;
e1d85334
RD
213 unsigned int compat: 1;
214 unsigned int account_mem: 1;
215 unsigned int cq_overflow_flushed: 1;
216 unsigned int drain_next: 1;
217 unsigned int eventfd_async: 1;
2b188cc1 218
75b28aff
HV
219 /*
220 * Ring buffer of indices into array of io_uring_sqe, which is
221 * mmapped by the application using the IORING_OFF_SQES offset.
222 *
223 * This indirection could e.g. be used to assign fixed
224 * io_uring_sqe entries to operations and only submit them to
225 * the queue when needed.
226 *
227 * The kernel modifies neither the indices array nor the entries
228 * array.
229 */
230 u32 *sq_array;
2b188cc1
JA
231 unsigned cached_sq_head;
232 unsigned sq_entries;
233 unsigned sq_mask;
6c271ce2 234 unsigned sq_thread_idle;
498ccd9e 235 unsigned cached_sq_dropped;
206aefde 236 atomic_t cached_cq_overflow;
ad3eb2c8 237 unsigned long sq_check_overflow;
de0617e4
JA
238
239 struct list_head defer_list;
5262f567 240 struct list_head timeout_list;
1d7bb1d5 241 struct list_head cq_overflow_list;
fcb323cc
JA
242
243 wait_queue_head_t inflight_wait;
ad3eb2c8 244 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
245 } ____cacheline_aligned_in_smp;
246
206aefde
JA
247 struct io_rings *rings;
248
2b188cc1 249 /* IO offload */
561fb04a 250 struct io_wq *io_wq;
6c271ce2 251 struct task_struct *sqo_thread; /* if using sq thread polling */
2b188cc1 252 struct mm_struct *sqo_mm;
6c271ce2 253 wait_queue_head_t sqo_wait;
75b28aff 254
6b06314c
JA
255 /*
256 * If used, fixed file set. Writers must ensure that ->refs is dead,
257 * readers must ensure that ->refs is alive as long as the file* is
258 * used. Only updated through io_uring_register(2).
259 */
05f3fb3c 260 struct fixed_file_data *file_data;
6b06314c 261 unsigned nr_user_files;
b14cca0c
PB
262 int ring_fd;
263 struct file *ring_file;
6b06314c 264
edafccee
JA
265 /* if used, fixed mapped user buffers */
266 unsigned nr_user_bufs;
267 struct io_mapped_ubuf *user_bufs;
268
2b188cc1
JA
269 struct user_struct *user;
270
0b8c0ec7 271 const struct cred *creds;
181e448d 272
206aefde
JA
273 /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */
274 struct completion *completions;
275
0ddf92e8
JA
276 /* if all else fails... */
277 struct io_kiocb *fallback_req;
278
206aefde
JA
279#if defined(CONFIG_UNIX)
280 struct socket *ring_sock;
281#endif
282
5a2e745d
JA
283 struct idr io_buffer_idr;
284
071698e1
JA
285 struct idr personality_idr;
286
206aefde
JA
287 struct {
288 unsigned cached_cq_tail;
289 unsigned cq_entries;
290 unsigned cq_mask;
291 atomic_t cq_timeouts;
ad3eb2c8 292 unsigned long cq_check_overflow;
206aefde
JA
293 struct wait_queue_head cq_wait;
294 struct fasync_struct *cq_fasync;
295 struct eventfd_ctx *cq_ev_fd;
296 } ____cacheline_aligned_in_smp;
2b188cc1
JA
297
298 struct {
299 struct mutex uring_lock;
300 wait_queue_head_t wait;
301 } ____cacheline_aligned_in_smp;
302
303 struct {
304 spinlock_t completion_lock;
e94f141b 305
def596e9
JA
306 /*
307 * ->poll_list is protected by the ctx->uring_lock for
308 * io_uring instances that don't use IORING_SETUP_SQPOLL.
309 * For SQPOLL, only the single threaded io_sq_thread() will
310 * manipulate the list, hence no extra locking is needed there.
311 */
312 struct list_head poll_list;
78076bb6
JA
313 struct hlist_head *cancel_hash;
314 unsigned cancel_hash_bits;
e94f141b 315 bool poll_multi_file;
31b51510 316
fcb323cc
JA
317 spinlock_t inflight_lock;
318 struct list_head inflight_list;
2b188cc1 319 } ____cacheline_aligned_in_smp;
2b188cc1
JA
320};
321
09bb8394
JA
322/*
323 * First field must be the file pointer in all the
324 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
325 */
221c5eb2
JA
326struct io_poll_iocb {
327 struct file *file;
0969e783
JA
328 union {
329 struct wait_queue_head *head;
330 u64 addr;
331 };
221c5eb2 332 __poll_t events;
8c838788 333 bool done;
221c5eb2 334 bool canceled;
392edb45 335 struct wait_queue_entry wait;
221c5eb2
JA
336};
337
b5dba59e
JA
338struct io_close {
339 struct file *file;
340 struct file *put_file;
341 int fd;
342};
343
ad8a48ac
JA
344struct io_timeout_data {
345 struct io_kiocb *req;
346 struct hrtimer timer;
347 struct timespec64 ts;
348 enum hrtimer_mode mode;
cc42e0ac 349 u32 seq_offset;
ad8a48ac
JA
350};
351
8ed8d3c3
JA
352struct io_accept {
353 struct file *file;
354 struct sockaddr __user *addr;
355 int __user *addr_len;
356 int flags;
357};
358
359struct io_sync {
360 struct file *file;
361 loff_t len;
362 loff_t off;
363 int flags;
d63d1b5e 364 int mode;
8ed8d3c3
JA
365};
366
fbf23849
JA
367struct io_cancel {
368 struct file *file;
369 u64 addr;
370};
371
b29472ee
JA
372struct io_timeout {
373 struct file *file;
374 u64 addr;
375 int flags;
26a61679 376 unsigned count;
b29472ee
JA
377};
378
9adbd45d
JA
379struct io_rw {
380 /* NOTE: kiocb has the file as the first member, so don't do it here */
381 struct kiocb kiocb;
382 u64 addr;
383 u64 len;
384};
385
3fbb51c1
JA
386struct io_connect {
387 struct file *file;
388 struct sockaddr __user *addr;
389 int addr_len;
390};
391
e47293fd
JA
392struct io_sr_msg {
393 struct file *file;
fddaface
JA
394 union {
395 struct user_msghdr __user *msg;
396 void __user *buf;
397 };
e47293fd 398 int msg_flags;
bcda7baa 399 int bgid;
fddaface 400 size_t len;
bcda7baa 401 struct io_buffer *kbuf;
e47293fd
JA
402};
403
15b71abe
JA
404struct io_open {
405 struct file *file;
406 int dfd;
eddc7ef5 407 union {
eddc7ef5
JA
408 unsigned mask;
409 };
15b71abe 410 struct filename *filename;
eddc7ef5 411 struct statx __user *buffer;
c12cedf2 412 struct open_how how;
15b71abe
JA
413};
414
05f3fb3c
JA
415struct io_files_update {
416 struct file *file;
417 u64 arg;
418 u32 nr_args;
419 u32 offset;
420};
421
4840e418
JA
422struct io_fadvise {
423 struct file *file;
424 u64 offset;
425 u32 len;
426 u32 advice;
427};
428
c1ca757b
JA
429struct io_madvise {
430 struct file *file;
431 u64 addr;
432 u32 len;
433 u32 advice;
434};
435
3e4827b0
JA
436struct io_epoll {
437 struct file *file;
438 int epfd;
439 int op;
440 int fd;
441 struct epoll_event event;
e47293fd
JA
442};
443
7d67af2c
PB
444struct io_splice {
445 struct file *file_out;
446 struct file *file_in;
447 loff_t off_out;
448 loff_t off_in;
449 u64 len;
450 unsigned int flags;
451};
452
ddf0322d
JA
453struct io_provide_buf {
454 struct file *file;
455 __u64 addr;
456 __s32 len;
457 __u32 bgid;
458 __u16 nbufs;
459 __u16 bid;
460};
461
f499a021
JA
462struct io_async_connect {
463 struct sockaddr_storage address;
464};
465
03b1230c
JA
466struct io_async_msghdr {
467 struct iovec fast_iov[UIO_FASTIOV];
468 struct iovec *iov;
469 struct sockaddr __user *uaddr;
470 struct msghdr msg;
b537916c 471 struct sockaddr_storage addr;
03b1230c
JA
472};
473
f67676d1
JA
474struct io_async_rw {
475 struct iovec fast_iov[UIO_FASTIOV];
476 struct iovec *iov;
477 ssize_t nr_segs;
478 ssize_t size;
479};
480
1a6b74fc 481struct io_async_ctx {
f67676d1
JA
482 union {
483 struct io_async_rw rw;
03b1230c 484 struct io_async_msghdr msg;
f499a021 485 struct io_async_connect connect;
2d28390a 486 struct io_timeout_data timeout;
f67676d1 487 };
1a6b74fc
JA
488};
489
6b47ee6e
PB
490enum {
491 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
492 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
493 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
494 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
495 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
bcda7baa 496 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
6b47ee6e
PB
497
498 REQ_F_LINK_NEXT_BIT,
499 REQ_F_FAIL_LINK_BIT,
500 REQ_F_INFLIGHT_BIT,
501 REQ_F_CUR_POS_BIT,
502 REQ_F_NOWAIT_BIT,
503 REQ_F_IOPOLL_COMPLETED_BIT,
504 REQ_F_LINK_TIMEOUT_BIT,
505 REQ_F_TIMEOUT_BIT,
506 REQ_F_ISREG_BIT,
507 REQ_F_MUST_PUNT_BIT,
508 REQ_F_TIMEOUT_NOSEQ_BIT,
509 REQ_F_COMP_LOCKED_BIT,
99bc4c38 510 REQ_F_NEED_CLEANUP_BIT,
2ca10259 511 REQ_F_OVERFLOW_BIT,
d7718a9d 512 REQ_F_POLLED_BIT,
bcda7baa 513 REQ_F_BUFFER_SELECTED_BIT,
84557871
JA
514
515 /* not a real bit, just to check we're not overflowing the space */
516 __REQ_F_LAST_BIT,
6b47ee6e
PB
517};
518
519enum {
520 /* ctx owns file */
521 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
522 /* drain existing IO first */
523 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
524 /* linked sqes */
525 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
526 /* doesn't sever on completion < 0 */
527 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
528 /* IOSQE_ASYNC */
529 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
bcda7baa
JA
530 /* IOSQE_BUFFER_SELECT */
531 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
6b47ee6e
PB
532
533 /* already grabbed next link */
534 REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT),
535 /* fail rest of links */
536 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
537 /* on inflight list */
538 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
539 /* read/write uses file position */
540 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
541 /* must not punt to workers */
542 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
543 /* polled IO has completed */
544 REQ_F_IOPOLL_COMPLETED = BIT(REQ_F_IOPOLL_COMPLETED_BIT),
545 /* has linked timeout */
546 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
547 /* timeout request */
548 REQ_F_TIMEOUT = BIT(REQ_F_TIMEOUT_BIT),
549 /* regular file */
550 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
551 /* must be punted even for NONBLOCK */
552 REQ_F_MUST_PUNT = BIT(REQ_F_MUST_PUNT_BIT),
553 /* no timeout sequence */
554 REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
555 /* completion under lock */
556 REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
99bc4c38
PB
557 /* needs cleanup */
558 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
2ca10259
JA
559 /* in overflow list */
560 REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT),
d7718a9d
JA
561 /* already went through poll handler */
562 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
bcda7baa
JA
563 /* buffer already selected */
564 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
d7718a9d
JA
565};
566
567struct async_poll {
568 struct io_poll_iocb poll;
569 struct io_wq_work work;
6b47ee6e
PB
570};
571
09bb8394
JA
572/*
573 * NOTE! Each of the iocb union members has the file pointer
574 * as the first entry in their struct definition. So you can
575 * access the file pointer through any of the sub-structs,
576 * or directly as just 'ki_filp' in this struct.
577 */
2b188cc1 578struct io_kiocb {
221c5eb2 579 union {
09bb8394 580 struct file *file;
9adbd45d 581 struct io_rw rw;
221c5eb2 582 struct io_poll_iocb poll;
8ed8d3c3
JA
583 struct io_accept accept;
584 struct io_sync sync;
fbf23849 585 struct io_cancel cancel;
b29472ee 586 struct io_timeout timeout;
3fbb51c1 587 struct io_connect connect;
e47293fd 588 struct io_sr_msg sr_msg;
15b71abe 589 struct io_open open;
b5dba59e 590 struct io_close close;
05f3fb3c 591 struct io_files_update files_update;
4840e418 592 struct io_fadvise fadvise;
c1ca757b 593 struct io_madvise madvise;
3e4827b0 594 struct io_epoll epoll;
7d67af2c 595 struct io_splice splice;
ddf0322d 596 struct io_provide_buf pbuf;
221c5eb2 597 };
2b188cc1 598
1a6b74fc 599 struct io_async_ctx *io;
cf6fd4bd 600 bool needs_fixed_file;
d625c6ee 601 u8 opcode;
2b188cc1
JA
602
603 struct io_ring_ctx *ctx;
d7718a9d 604 struct list_head list;
2b188cc1 605 unsigned int flags;
c16361c1 606 refcount_t refs;
4ed734b0
JA
607 union {
608 struct task_struct *task;
609 unsigned long fsize;
610 };
2b188cc1 611 u64 user_data;
9e645e11 612 u32 result;
de0617e4 613 u32 sequence;
2b188cc1 614
d7718a9d
JA
615 struct list_head link_list;
616
fcb323cc
JA
617 struct list_head inflight_entry;
618
b41e9852
JA
619 union {
620 /*
621 * Only commands that never go async can use the below fields,
d7718a9d
JA
622 * obviously. Right now only IORING_OP_POLL_ADD uses them, and
623 * async armed poll handlers for regular commands. The latter
624 * restore the work, if needed.
b41e9852
JA
625 */
626 struct {
b41e9852 627 struct callback_head task_work;
d7718a9d
JA
628 struct hlist_node hash_node;
629 struct async_poll *apoll;
bcda7baa 630 int cflags;
b41e9852
JA
631 };
632 struct io_wq_work work;
633 };
2b188cc1
JA
634};
635
636#define IO_PLUG_THRESHOLD 2
def596e9 637#define IO_IOPOLL_BATCH 8
2b188cc1 638
9a56a232
JA
639struct io_submit_state {
640 struct blk_plug plug;
641
2579f913
JA
642 /*
643 * io_kiocb alloc cache
644 */
645 void *reqs[IO_IOPOLL_BATCH];
6c8a3134 646 unsigned int free_reqs;
2579f913 647
9a56a232
JA
648 /*
649 * File reference cache
650 */
651 struct file *file;
652 unsigned int fd;
653 unsigned int has_refs;
654 unsigned int used_refs;
655 unsigned int ios_left;
656};
657
d3656344
JA
658struct io_op_def {
659 /* needs req->io allocated for deferral/async */
660 unsigned async_ctx : 1;
661 /* needs current->mm setup, does mm access */
662 unsigned needs_mm : 1;
663 /* needs req->file assigned */
664 unsigned needs_file : 1;
665 /* needs req->file assigned IFF fd is >= 0 */
666 unsigned fd_non_neg : 1;
667 /* hash wq insertion if file is a regular file */
668 unsigned hash_reg_file : 1;
669 /* unbound wq insertion if file is a non-regular file */
670 unsigned unbound_nonreg_file : 1;
66f4af93
JA
671 /* opcode is not supported by this kernel */
672 unsigned not_supported : 1;
f86cd20c
JA
673 /* needs file table */
674 unsigned file_table : 1;
ff002b30
JA
675 /* needs ->fs */
676 unsigned needs_fs : 1;
8a72758c
JA
677 /* set if opcode supports polled "wait" */
678 unsigned pollin : 1;
679 unsigned pollout : 1;
bcda7baa
JA
680 /* op supports buffer selection */
681 unsigned buffer_select : 1;
d3656344
JA
682};
683
684static const struct io_op_def io_op_defs[] = {
0463b6c5
PB
685 [IORING_OP_NOP] = {},
686 [IORING_OP_READV] = {
d3656344
JA
687 .async_ctx = 1,
688 .needs_mm = 1,
689 .needs_file = 1,
690 .unbound_nonreg_file = 1,
8a72758c 691 .pollin = 1,
4d954c25 692 .buffer_select = 1,
d3656344 693 },
0463b6c5 694 [IORING_OP_WRITEV] = {
d3656344
JA
695 .async_ctx = 1,
696 .needs_mm = 1,
697 .needs_file = 1,
698 .hash_reg_file = 1,
699 .unbound_nonreg_file = 1,
8a72758c 700 .pollout = 1,
d3656344 701 },
0463b6c5 702 [IORING_OP_FSYNC] = {
d3656344
JA
703 .needs_file = 1,
704 },
0463b6c5 705 [IORING_OP_READ_FIXED] = {
d3656344
JA
706 .needs_file = 1,
707 .unbound_nonreg_file = 1,
8a72758c 708 .pollin = 1,
d3656344 709 },
0463b6c5 710 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
711 .needs_file = 1,
712 .hash_reg_file = 1,
713 .unbound_nonreg_file = 1,
8a72758c 714 .pollout = 1,
d3656344 715 },
0463b6c5 716 [IORING_OP_POLL_ADD] = {
d3656344
JA
717 .needs_file = 1,
718 .unbound_nonreg_file = 1,
719 },
0463b6c5
PB
720 [IORING_OP_POLL_REMOVE] = {},
721 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344
JA
722 .needs_file = 1,
723 },
0463b6c5 724 [IORING_OP_SENDMSG] = {
d3656344
JA
725 .async_ctx = 1,
726 .needs_mm = 1,
727 .needs_file = 1,
728 .unbound_nonreg_file = 1,
ff002b30 729 .needs_fs = 1,
8a72758c 730 .pollout = 1,
d3656344 731 },
0463b6c5 732 [IORING_OP_RECVMSG] = {
d3656344
JA
733 .async_ctx = 1,
734 .needs_mm = 1,
735 .needs_file = 1,
736 .unbound_nonreg_file = 1,
ff002b30 737 .needs_fs = 1,
8a72758c 738 .pollin = 1,
52de1fe1 739 .buffer_select = 1,
d3656344 740 },
0463b6c5 741 [IORING_OP_TIMEOUT] = {
d3656344
JA
742 .async_ctx = 1,
743 .needs_mm = 1,
744 },
0463b6c5
PB
745 [IORING_OP_TIMEOUT_REMOVE] = {},
746 [IORING_OP_ACCEPT] = {
d3656344
JA
747 .needs_mm = 1,
748 .needs_file = 1,
749 .unbound_nonreg_file = 1,
f86cd20c 750 .file_table = 1,
8a72758c 751 .pollin = 1,
d3656344 752 },
0463b6c5
PB
753 [IORING_OP_ASYNC_CANCEL] = {},
754 [IORING_OP_LINK_TIMEOUT] = {
d3656344
JA
755 .async_ctx = 1,
756 .needs_mm = 1,
757 },
0463b6c5 758 [IORING_OP_CONNECT] = {
d3656344
JA
759 .async_ctx = 1,
760 .needs_mm = 1,
761 .needs_file = 1,
762 .unbound_nonreg_file = 1,
8a72758c 763 .pollout = 1,
d3656344 764 },
0463b6c5 765 [IORING_OP_FALLOCATE] = {
d3656344
JA
766 .needs_file = 1,
767 },
0463b6c5 768 [IORING_OP_OPENAT] = {
d3656344
JA
769 .needs_file = 1,
770 .fd_non_neg = 1,
f86cd20c 771 .file_table = 1,
ff002b30 772 .needs_fs = 1,
d3656344 773 },
0463b6c5 774 [IORING_OP_CLOSE] = {
d3656344 775 .needs_file = 1,
f86cd20c 776 .file_table = 1,
d3656344 777 },
0463b6c5 778 [IORING_OP_FILES_UPDATE] = {
d3656344 779 .needs_mm = 1,
f86cd20c 780 .file_table = 1,
d3656344 781 },
0463b6c5 782 [IORING_OP_STATX] = {
d3656344
JA
783 .needs_mm = 1,
784 .needs_file = 1,
785 .fd_non_neg = 1,
ff002b30 786 .needs_fs = 1,
d3656344 787 },
0463b6c5 788 [IORING_OP_READ] = {
3a6820f2
JA
789 .needs_mm = 1,
790 .needs_file = 1,
791 .unbound_nonreg_file = 1,
8a72758c 792 .pollin = 1,
bcda7baa 793 .buffer_select = 1,
3a6820f2 794 },
0463b6c5 795 [IORING_OP_WRITE] = {
3a6820f2
JA
796 .needs_mm = 1,
797 .needs_file = 1,
798 .unbound_nonreg_file = 1,
8a72758c 799 .pollout = 1,
3a6820f2 800 },
0463b6c5 801 [IORING_OP_FADVISE] = {
4840e418
JA
802 .needs_file = 1,
803 },
0463b6c5 804 [IORING_OP_MADVISE] = {
c1ca757b
JA
805 .needs_mm = 1,
806 },
0463b6c5 807 [IORING_OP_SEND] = {
fddaface
JA
808 .needs_mm = 1,
809 .needs_file = 1,
810 .unbound_nonreg_file = 1,
8a72758c 811 .pollout = 1,
fddaface 812 },
0463b6c5 813 [IORING_OP_RECV] = {
fddaface
JA
814 .needs_mm = 1,
815 .needs_file = 1,
816 .unbound_nonreg_file = 1,
8a72758c 817 .pollin = 1,
bcda7baa 818 .buffer_select = 1,
fddaface 819 },
0463b6c5 820 [IORING_OP_OPENAT2] = {
cebdb986
JA
821 .needs_file = 1,
822 .fd_non_neg = 1,
f86cd20c 823 .file_table = 1,
ff002b30 824 .needs_fs = 1,
cebdb986 825 },
3e4827b0
JA
826 [IORING_OP_EPOLL_CTL] = {
827 .unbound_nonreg_file = 1,
828 .file_table = 1,
829 },
7d67af2c
PB
830 [IORING_OP_SPLICE] = {
831 .needs_file = 1,
832 .hash_reg_file = 1,
833 .unbound_nonreg_file = 1,
ddf0322d
JA
834 },
835 [IORING_OP_PROVIDE_BUFFERS] = {},
067524e9 836 [IORING_OP_REMOVE_BUFFERS] = {},
d3656344
JA
837};
838
561fb04a 839static void io_wq_submit_work(struct io_wq_work **workptr);
78e19bbe 840static void io_cqring_fill_event(struct io_kiocb *req, long res);
ec9c02ad 841static void io_put_req(struct io_kiocb *req);
978db57e 842static void __io_double_put_req(struct io_kiocb *req);
94ae5e77
JA
843static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
844static void io_queue_linked_timeout(struct io_kiocb *req);
05f3fb3c
JA
845static int __io_sqe_files_update(struct io_ring_ctx *ctx,
846 struct io_uring_files_update *ip,
847 unsigned nr_args);
f86cd20c 848static int io_grab_files(struct io_kiocb *req);
2faf852d 849static void io_ring_file_ref_flush(struct fixed_file_data *data);
99bc4c38 850static void io_cleanup_req(struct io_kiocb *req);
b41e9852
JA
851static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
852 int fd, struct file **out_file, bool fixed);
853static void __io_queue_sqe(struct io_kiocb *req,
854 const struct io_uring_sqe *sqe);
de0617e4 855
2b188cc1
JA
856static struct kmem_cache *req_cachep;
857
858static const struct file_operations io_uring_fops;
859
860struct sock *io_uring_get_socket(struct file *file)
861{
862#if defined(CONFIG_UNIX)
863 if (file->f_op == &io_uring_fops) {
864 struct io_ring_ctx *ctx = file->private_data;
865
866 return ctx->ring_sock->sk;
867 }
868#endif
869 return NULL;
870}
871EXPORT_SYMBOL(io_uring_get_socket);
872
873static void io_ring_ctx_ref_free(struct percpu_ref *ref)
874{
875 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
876
206aefde 877 complete(&ctx->completions[0]);
2b188cc1
JA
878}
879
880static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
881{
882 struct io_ring_ctx *ctx;
78076bb6 883 int hash_bits;
2b188cc1
JA
884
885 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
886 if (!ctx)
887 return NULL;
888
0ddf92e8
JA
889 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
890 if (!ctx->fallback_req)
891 goto err;
892
206aefde
JA
893 ctx->completions = kmalloc(2 * sizeof(struct completion), GFP_KERNEL);
894 if (!ctx->completions)
895 goto err;
896
78076bb6
JA
897 /*
898 * Use 5 bits less than the max cq entries, that should give us around
899 * 32 entries per hash list if totally full and uniformly spread.
900 */
901 hash_bits = ilog2(p->cq_entries);
902 hash_bits -= 5;
903 if (hash_bits <= 0)
904 hash_bits = 1;
905 ctx->cancel_hash_bits = hash_bits;
906 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
907 GFP_KERNEL);
908 if (!ctx->cancel_hash)
909 goto err;
910 __hash_init(ctx->cancel_hash, 1U << hash_bits);
911
21482896 912 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
913 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
914 goto err;
2b188cc1
JA
915
916 ctx->flags = p->flags;
917 init_waitqueue_head(&ctx->cq_wait);
1d7bb1d5 918 INIT_LIST_HEAD(&ctx->cq_overflow_list);
206aefde
JA
919 init_completion(&ctx->completions[0]);
920 init_completion(&ctx->completions[1]);
5a2e745d 921 idr_init(&ctx->io_buffer_idr);
071698e1 922 idr_init(&ctx->personality_idr);
2b188cc1
JA
923 mutex_init(&ctx->uring_lock);
924 init_waitqueue_head(&ctx->wait);
925 spin_lock_init(&ctx->completion_lock);
def596e9 926 INIT_LIST_HEAD(&ctx->poll_list);
de0617e4 927 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 928 INIT_LIST_HEAD(&ctx->timeout_list);
fcb323cc
JA
929 init_waitqueue_head(&ctx->inflight_wait);
930 spin_lock_init(&ctx->inflight_lock);
931 INIT_LIST_HEAD(&ctx->inflight_list);
2b188cc1 932 return ctx;
206aefde 933err:
0ddf92e8
JA
934 if (ctx->fallback_req)
935 kmem_cache_free(req_cachep, ctx->fallback_req);
206aefde 936 kfree(ctx->completions);
78076bb6 937 kfree(ctx->cancel_hash);
206aefde
JA
938 kfree(ctx);
939 return NULL;
2b188cc1
JA
940}
941
9d858b21 942static inline bool __req_need_defer(struct io_kiocb *req)
7adf4eaf 943{
a197f664
JL
944 struct io_ring_ctx *ctx = req->ctx;
945
498ccd9e
JA
946 return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
947 + atomic_read(&ctx->cached_cq_overflow);
7adf4eaf
JA
948}
949
9d858b21 950static inline bool req_need_defer(struct io_kiocb *req)
de0617e4 951{
87987898 952 if (unlikely(req->flags & REQ_F_IO_DRAIN))
9d858b21 953 return __req_need_defer(req);
de0617e4 954
9d858b21 955 return false;
de0617e4
JA
956}
957
7adf4eaf 958static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
de0617e4
JA
959{
960 struct io_kiocb *req;
961
7adf4eaf 962 req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
9d858b21 963 if (req && !req_need_defer(req)) {
de0617e4
JA
964 list_del_init(&req->list);
965 return req;
966 }
967
968 return NULL;
969}
970
5262f567
JA
971static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
972{
7adf4eaf
JA
973 struct io_kiocb *req;
974
975 req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
93bd25bb
JA
976 if (req) {
977 if (req->flags & REQ_F_TIMEOUT_NOSEQ)
978 return NULL;
fb4b3d3f 979 if (!__req_need_defer(req)) {
93bd25bb
JA
980 list_del_init(&req->list);
981 return req;
982 }
7adf4eaf
JA
983 }
984
985 return NULL;
5262f567
JA
986}
987
de0617e4 988static void __io_commit_cqring(struct io_ring_ctx *ctx)
2b188cc1 989{
75b28aff 990 struct io_rings *rings = ctx->rings;
2b188cc1 991
07910158
PB
992 /* order cqe stores with ring update */
993 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
2b188cc1 994
07910158
PB
995 if (wq_has_sleeper(&ctx->cq_wait)) {
996 wake_up_interruptible(&ctx->cq_wait);
997 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
2b188cc1
JA
998 }
999}
1000
cccf0ee8
JA
1001static inline void io_req_work_grab_env(struct io_kiocb *req,
1002 const struct io_op_def *def)
1003{
1004 if (!req->work.mm && def->needs_mm) {
1005 mmgrab(current->mm);
1006 req->work.mm = current->mm;
2b188cc1 1007 }
cccf0ee8
JA
1008 if (!req->work.creds)
1009 req->work.creds = get_current_cred();
ff002b30
JA
1010 if (!req->work.fs && def->needs_fs) {
1011 spin_lock(&current->fs->lock);
1012 if (!current->fs->in_exec) {
1013 req->work.fs = current->fs;
1014 req->work.fs->users++;
1015 } else {
1016 req->work.flags |= IO_WQ_WORK_CANCEL;
1017 }
1018 spin_unlock(&current->fs->lock);
1019 }
6ab23144
JA
1020 if (!req->work.task_pid)
1021 req->work.task_pid = task_pid_vnr(current);
2b188cc1
JA
1022}
1023
cccf0ee8 1024static inline void io_req_work_drop_env(struct io_kiocb *req)
18d9be1a 1025{
cccf0ee8
JA
1026 if (req->work.mm) {
1027 mmdrop(req->work.mm);
1028 req->work.mm = NULL;
1029 }
1030 if (req->work.creds) {
1031 put_cred(req->work.creds);
1032 req->work.creds = NULL;
1033 }
ff002b30
JA
1034 if (req->work.fs) {
1035 struct fs_struct *fs = req->work.fs;
1036
1037 spin_lock(&req->work.fs->lock);
1038 if (--fs->users)
1039 fs = NULL;
1040 spin_unlock(&req->work.fs->lock);
1041 if (fs)
1042 free_fs_struct(fs);
1043 }
561fb04a
JA
1044}
1045
8766dd51 1046static inline void io_prep_async_work(struct io_kiocb *req,
94ae5e77 1047 struct io_kiocb **link)
18d9be1a 1048{
d3656344 1049 const struct io_op_def *def = &io_op_defs[req->opcode];
54a91f3b 1050
d3656344
JA
1051 if (req->flags & REQ_F_ISREG) {
1052 if (def->hash_reg_file)
8766dd51 1053 io_wq_hash_work(&req->work, file_inode(req->file));
d3656344
JA
1054 } else {
1055 if (def->unbound_nonreg_file)
3529d8c2 1056 req->work.flags |= IO_WQ_WORK_UNBOUND;
54a91f3b 1057 }
cccf0ee8
JA
1058
1059 io_req_work_grab_env(req, def);
54a91f3b 1060
94ae5e77 1061 *link = io_prep_linked_timeout(req);
561fb04a
JA
1062}
1063
a197f664 1064static inline void io_queue_async_work(struct io_kiocb *req)
561fb04a 1065{
a197f664 1066 struct io_ring_ctx *ctx = req->ctx;
94ae5e77 1067 struct io_kiocb *link;
94ae5e77 1068
8766dd51 1069 io_prep_async_work(req, &link);
561fb04a 1070
8766dd51
PB
1071 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1072 &req->work, req->flags);
1073 io_wq_enqueue(ctx->io_wq, &req->work);
94ae5e77
JA
1074
1075 if (link)
1076 io_queue_linked_timeout(link);
18d9be1a
JA
1077}
1078
5262f567
JA
1079static void io_kill_timeout(struct io_kiocb *req)
1080{
1081 int ret;
1082
2d28390a 1083 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
5262f567
JA
1084 if (ret != -1) {
1085 atomic_inc(&req->ctx->cq_timeouts);
842f9612 1086 list_del_init(&req->list);
78e19bbe 1087 io_cqring_fill_event(req, 0);
ec9c02ad 1088 io_put_req(req);
5262f567
JA
1089 }
1090}
1091
1092static void io_kill_timeouts(struct io_ring_ctx *ctx)
1093{
1094 struct io_kiocb *req, *tmp;
1095
1096 spin_lock_irq(&ctx->completion_lock);
1097 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
1098 io_kill_timeout(req);
1099 spin_unlock_irq(&ctx->completion_lock);
1100}
1101
de0617e4
JA
1102static void io_commit_cqring(struct io_ring_ctx *ctx)
1103{
1104 struct io_kiocb *req;
1105
5262f567
JA
1106 while ((req = io_get_timeout_req(ctx)) != NULL)
1107 io_kill_timeout(req);
1108
de0617e4
JA
1109 __io_commit_cqring(ctx);
1110
87987898 1111 while ((req = io_get_deferred_req(ctx)) != NULL)
a197f664 1112 io_queue_async_work(req);
de0617e4
JA
1113}
1114
2b188cc1
JA
1115static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1116{
75b28aff 1117 struct io_rings *rings = ctx->rings;
2b188cc1
JA
1118 unsigned tail;
1119
1120 tail = ctx->cached_cq_tail;
115e12e5
SB
1121 /*
1122 * writes to the cq entry need to come after reading head; the
1123 * control dependency is enough as we're using WRITE_ONCE to
1124 * fill the cq entry
1125 */
75b28aff 1126 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
2b188cc1
JA
1127 return NULL;
1128
1129 ctx->cached_cq_tail++;
75b28aff 1130 return &rings->cqes[tail & ctx->cq_mask];
2b188cc1
JA
1131}
1132
f2842ab5
JA
1133static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1134{
f0b493e6
JA
1135 if (!ctx->cq_ev_fd)
1136 return false;
f2842ab5
JA
1137 if (!ctx->eventfd_async)
1138 return true;
b41e9852 1139 return io_wq_current_is_worker();
f2842ab5
JA
1140}
1141
b41e9852 1142static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1d7bb1d5
JA
1143{
1144 if (waitqueue_active(&ctx->wait))
1145 wake_up(&ctx->wait);
1146 if (waitqueue_active(&ctx->sqo_wait))
1147 wake_up(&ctx->sqo_wait);
b41e9852 1148 if (io_should_trigger_evfd(ctx))
1d7bb1d5
JA
1149 eventfd_signal(ctx->cq_ev_fd, 1);
1150}
1151
c4a2ed72
JA
1152/* Returns true if there are no backlogged entries after the flush */
1153static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1d7bb1d5
JA
1154{
1155 struct io_rings *rings = ctx->rings;
1156 struct io_uring_cqe *cqe;
1157 struct io_kiocb *req;
1158 unsigned long flags;
1159 LIST_HEAD(list);
1160
1161 if (!force) {
1162 if (list_empty_careful(&ctx->cq_overflow_list))
c4a2ed72 1163 return true;
1d7bb1d5
JA
1164 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1165 rings->cq_ring_entries))
c4a2ed72 1166 return false;
1d7bb1d5
JA
1167 }
1168
1169 spin_lock_irqsave(&ctx->completion_lock, flags);
1170
1171 /* if force is set, the ring is going away. always drop after that */
1172 if (force)
69b3e546 1173 ctx->cq_overflow_flushed = 1;
1d7bb1d5 1174
c4a2ed72 1175 cqe = NULL;
1d7bb1d5
JA
1176 while (!list_empty(&ctx->cq_overflow_list)) {
1177 cqe = io_get_cqring(ctx);
1178 if (!cqe && !force)
1179 break;
1180
1181 req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
1182 list);
1183 list_move(&req->list, &list);
2ca10259 1184 req->flags &= ~REQ_F_OVERFLOW;
1d7bb1d5
JA
1185 if (cqe) {
1186 WRITE_ONCE(cqe->user_data, req->user_data);
1187 WRITE_ONCE(cqe->res, req->result);
bcda7baa 1188 WRITE_ONCE(cqe->flags, req->cflags);
1d7bb1d5
JA
1189 } else {
1190 WRITE_ONCE(ctx->rings->cq_overflow,
1191 atomic_inc_return(&ctx->cached_cq_overflow));
1192 }
1193 }
1194
1195 io_commit_cqring(ctx);
ad3eb2c8
JA
1196 if (cqe) {
1197 clear_bit(0, &ctx->sq_check_overflow);
1198 clear_bit(0, &ctx->cq_check_overflow);
1199 }
1d7bb1d5
JA
1200 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1201 io_cqring_ev_posted(ctx);
1202
1203 while (!list_empty(&list)) {
1204 req = list_first_entry(&list, struct io_kiocb, list);
1205 list_del(&req->list);
ec9c02ad 1206 io_put_req(req);
1d7bb1d5 1207 }
c4a2ed72
JA
1208
1209 return cqe != NULL;
1d7bb1d5
JA
1210}
1211
bcda7baa 1212static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1213{
78e19bbe 1214 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1215 struct io_uring_cqe *cqe;
1216
78e19bbe 1217 trace_io_uring_complete(ctx, req->user_data, res);
51c3ff62 1218
2b188cc1
JA
1219 /*
1220 * If we can't get a cq entry, userspace overflowed the
1221 * submission (by quite a lot). Increment the overflow count in
1222 * the ring.
1223 */
1224 cqe = io_get_cqring(ctx);
1d7bb1d5 1225 if (likely(cqe)) {
78e19bbe 1226 WRITE_ONCE(cqe->user_data, req->user_data);
2b188cc1 1227 WRITE_ONCE(cqe->res, res);
bcda7baa 1228 WRITE_ONCE(cqe->flags, cflags);
1d7bb1d5 1229 } else if (ctx->cq_overflow_flushed) {
498ccd9e
JA
1230 WRITE_ONCE(ctx->rings->cq_overflow,
1231 atomic_inc_return(&ctx->cached_cq_overflow));
1d7bb1d5 1232 } else {
ad3eb2c8
JA
1233 if (list_empty(&ctx->cq_overflow_list)) {
1234 set_bit(0, &ctx->sq_check_overflow);
1235 set_bit(0, &ctx->cq_check_overflow);
1236 }
2ca10259 1237 req->flags |= REQ_F_OVERFLOW;
1d7bb1d5
JA
1238 refcount_inc(&req->refs);
1239 req->result = res;
bcda7baa 1240 req->cflags = cflags;
1d7bb1d5 1241 list_add_tail(&req->list, &ctx->cq_overflow_list);
2b188cc1
JA
1242 }
1243}
1244
bcda7baa
JA
1245static void io_cqring_fill_event(struct io_kiocb *req, long res)
1246{
1247 __io_cqring_fill_event(req, res, 0);
1248}
1249
1250static void __io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1251{
78e19bbe 1252 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1253 unsigned long flags;
1254
1255 spin_lock_irqsave(&ctx->completion_lock, flags);
bcda7baa 1256 __io_cqring_fill_event(req, res, cflags);
2b188cc1
JA
1257 io_commit_cqring(ctx);
1258 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1259
8c838788 1260 io_cqring_ev_posted(ctx);
2b188cc1
JA
1261}
1262
bcda7baa
JA
1263static void io_cqring_add_event(struct io_kiocb *req, long res)
1264{
1265 __io_cqring_add_event(req, res, 0);
1266}
1267
0ddf92e8
JA
1268static inline bool io_is_fallback_req(struct io_kiocb *req)
1269{
1270 return req == (struct io_kiocb *)
1271 ((unsigned long) req->ctx->fallback_req & ~1UL);
1272}
1273
1274static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1275{
1276 struct io_kiocb *req;
1277
1278 req = ctx->fallback_req;
1279 if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req))
1280 return req;
1281
1282 return NULL;
1283}
1284
2579f913
JA
1285static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
1286 struct io_submit_state *state)
2b188cc1 1287{
fd6fab2c 1288 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2b188cc1
JA
1289 struct io_kiocb *req;
1290
2579f913 1291 if (!state) {
fd6fab2c 1292 req = kmem_cache_alloc(req_cachep, gfp);
2579f913 1293 if (unlikely(!req))
0ddf92e8 1294 goto fallback;
2579f913
JA
1295 } else if (!state->free_reqs) {
1296 size_t sz;
1297 int ret;
1298
1299 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
fd6fab2c
JA
1300 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1301
1302 /*
1303 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1304 * retry single alloc to be on the safe side.
1305 */
1306 if (unlikely(ret <= 0)) {
1307 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1308 if (!state->reqs[0])
0ddf92e8 1309 goto fallback;
fd6fab2c
JA
1310 ret = 1;
1311 }
2579f913 1312 state->free_reqs = ret - 1;
6c8a3134 1313 req = state->reqs[ret - 1];
2579f913 1314 } else {
2579f913 1315 state->free_reqs--;
6c8a3134 1316 req = state->reqs[state->free_reqs];
2b188cc1
JA
1317 }
1318
0ddf92e8 1319got_it:
1a6b74fc 1320 req->io = NULL;
60c112b0 1321 req->file = NULL;
2579f913
JA
1322 req->ctx = ctx;
1323 req->flags = 0;
e65ef56d
JA
1324 /* one is dropped after submission, the other at completion */
1325 refcount_set(&req->refs, 2);
9e645e11 1326 req->result = 0;
561fb04a 1327 INIT_IO_WORK(&req->work, io_wq_submit_work);
2579f913 1328 return req;
0ddf92e8
JA
1329fallback:
1330 req = io_get_fallback_req(ctx);
1331 if (req)
1332 goto got_it;
6805b32e 1333 percpu_ref_put(&ctx->refs);
2b188cc1
JA
1334 return NULL;
1335}
1336
8da11c19
PB
1337static inline void io_put_file(struct io_kiocb *req, struct file *file,
1338 bool fixed)
1339{
1340 if (fixed)
1341 percpu_ref_put(&req->ctx->file_data->refs);
1342 else
1343 fput(file);
1344}
1345
2b85edfc 1346static void __io_req_do_free(struct io_kiocb *req)
def596e9 1347{
2b85edfc
PB
1348 if (likely(!io_is_fallback_req(req)))
1349 kmem_cache_free(req_cachep, req);
1350 else
1351 clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
1352}
1353
c6ca97b3 1354static void __io_req_aux_free(struct io_kiocb *req)
2b188cc1 1355{
929a3af9
PB
1356 if (req->flags & REQ_F_NEED_CLEANUP)
1357 io_cleanup_req(req);
1358
96fd84d8 1359 kfree(req->io);
8da11c19
PB
1360 if (req->file)
1361 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
cccf0ee8
JA
1362
1363 io_req_work_drop_env(req);
def596e9
JA
1364}
1365
9e645e11 1366static void __io_free_req(struct io_kiocb *req)
2b188cc1 1367{
c6ca97b3 1368 __io_req_aux_free(req);
fcb323cc 1369
fcb323cc 1370 if (req->flags & REQ_F_INFLIGHT) {
c6ca97b3 1371 struct io_ring_ctx *ctx = req->ctx;
fcb323cc
JA
1372 unsigned long flags;
1373
1374 spin_lock_irqsave(&ctx->inflight_lock, flags);
1375 list_del(&req->inflight_entry);
1376 if (waitqueue_active(&ctx->inflight_wait))
1377 wake_up(&ctx->inflight_wait);
1378 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1379 }
2b85edfc
PB
1380
1381 percpu_ref_put(&req->ctx->refs);
1382 __io_req_do_free(req);
e65ef56d
JA
1383}
1384
c6ca97b3
JA
1385struct req_batch {
1386 void *reqs[IO_IOPOLL_BATCH];
1387 int to_free;
1388 int need_iter;
1389};
1390
1391static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
1392{
10fef4be
JA
1393 int fixed_refs = rb->to_free;
1394
c6ca97b3
JA
1395 if (!rb->to_free)
1396 return;
1397 if (rb->need_iter) {
1398 int i, inflight = 0;
1399 unsigned long flags;
1400
10fef4be 1401 fixed_refs = 0;
c6ca97b3
JA
1402 for (i = 0; i < rb->to_free; i++) {
1403 struct io_kiocb *req = rb->reqs[i];
1404
10fef4be 1405 if (req->flags & REQ_F_FIXED_FILE) {
c6ca97b3 1406 req->file = NULL;
10fef4be
JA
1407 fixed_refs++;
1408 }
c6ca97b3
JA
1409 if (req->flags & REQ_F_INFLIGHT)
1410 inflight++;
c6ca97b3
JA
1411 __io_req_aux_free(req);
1412 }
1413 if (!inflight)
1414 goto do_free;
1415
1416 spin_lock_irqsave(&ctx->inflight_lock, flags);
1417 for (i = 0; i < rb->to_free; i++) {
1418 struct io_kiocb *req = rb->reqs[i];
1419
10fef4be 1420 if (req->flags & REQ_F_INFLIGHT) {
c6ca97b3
JA
1421 list_del(&req->inflight_entry);
1422 if (!--inflight)
1423 break;
1424 }
1425 }
1426 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1427
1428 if (waitqueue_active(&ctx->inflight_wait))
1429 wake_up(&ctx->inflight_wait);
1430 }
1431do_free:
1432 kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
10fef4be
JA
1433 if (fixed_refs)
1434 percpu_ref_put_many(&ctx->file_data->refs, fixed_refs);
c6ca97b3 1435 percpu_ref_put_many(&ctx->refs, rb->to_free);
c6ca97b3 1436 rb->to_free = rb->need_iter = 0;
e65ef56d
JA
1437}
1438
a197f664 1439static bool io_link_cancel_timeout(struct io_kiocb *req)
2665abfd 1440{
a197f664 1441 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
1442 int ret;
1443
2d28390a 1444 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
2665abfd 1445 if (ret != -1) {
78e19bbe 1446 io_cqring_fill_event(req, -ECANCELED);
2665abfd
JA
1447 io_commit_cqring(ctx);
1448 req->flags &= ~REQ_F_LINK;
ec9c02ad 1449 io_put_req(req);
2665abfd
JA
1450 return true;
1451 }
1452
1453 return false;
e65ef56d
JA
1454}
1455
ba816ad6 1456static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
9e645e11 1457{
2665abfd 1458 struct io_ring_ctx *ctx = req->ctx;
2665abfd 1459 bool wake_ev = false;
9e645e11 1460
4d7dd462
JA
1461 /* Already got next link */
1462 if (req->flags & REQ_F_LINK_NEXT)
1463 return;
1464
9e645e11
JA
1465 /*
1466 * The list should never be empty when we are called here. But could
1467 * potentially happen if the chain is messed up, check to be on the
1468 * safe side.
1469 */
4493233e
PB
1470 while (!list_empty(&req->link_list)) {
1471 struct io_kiocb *nxt = list_first_entry(&req->link_list,
1472 struct io_kiocb, link_list);
94ae5e77 1473
4493233e
PB
1474 if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) &&
1475 (nxt->flags & REQ_F_TIMEOUT))) {
1476 list_del_init(&nxt->link_list);
94ae5e77 1477 wake_ev |= io_link_cancel_timeout(nxt);
94ae5e77
JA
1478 req->flags &= ~REQ_F_LINK_TIMEOUT;
1479 continue;
1480 }
9e645e11 1481
4493233e
PB
1482 list_del_init(&req->link_list);
1483 if (!list_empty(&nxt->link_list))
1484 nxt->flags |= REQ_F_LINK;
b18fdf71 1485 *nxtptr = nxt;
94ae5e77 1486 break;
9e645e11 1487 }
2665abfd 1488
4d7dd462 1489 req->flags |= REQ_F_LINK_NEXT;
2665abfd
JA
1490 if (wake_ev)
1491 io_cqring_ev_posted(ctx);
9e645e11
JA
1492}
1493
1494/*
1495 * Called if REQ_F_LINK is set, and we fail the head request
1496 */
1497static void io_fail_links(struct io_kiocb *req)
1498{
2665abfd 1499 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
1500 unsigned long flags;
1501
1502 spin_lock_irqsave(&ctx->completion_lock, flags);
9e645e11
JA
1503
1504 while (!list_empty(&req->link_list)) {
4493233e
PB
1505 struct io_kiocb *link = list_first_entry(&req->link_list,
1506 struct io_kiocb, link_list);
9e645e11 1507
4493233e 1508 list_del_init(&link->link_list);
c826bd7a 1509 trace_io_uring_fail_link(req, link);
2665abfd
JA
1510
1511 if ((req->flags & REQ_F_LINK_TIMEOUT) &&
d625c6ee 1512 link->opcode == IORING_OP_LINK_TIMEOUT) {
a197f664 1513 io_link_cancel_timeout(link);
2665abfd 1514 } else {
78e19bbe 1515 io_cqring_fill_event(link, -ECANCELED);
978db57e 1516 __io_double_put_req(link);
2665abfd 1517 }
5d960724 1518 req->flags &= ~REQ_F_LINK_TIMEOUT;
9e645e11 1519 }
2665abfd
JA
1520
1521 io_commit_cqring(ctx);
1522 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1523 io_cqring_ev_posted(ctx);
9e645e11
JA
1524}
1525
4d7dd462 1526static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
9e645e11 1527{
4d7dd462 1528 if (likely(!(req->flags & REQ_F_LINK)))
2665abfd 1529 return;
2665abfd 1530
9e645e11
JA
1531 /*
1532 * If LINK is set, we have dependent requests in this chain. If we
1533 * didn't fail this request, queue the first one up, moving any other
1534 * dependencies to the next request. In case of failure, fail the rest
1535 * of the chain.
1536 */
2665abfd
JA
1537 if (req->flags & REQ_F_FAIL_LINK) {
1538 io_fail_links(req);
7c9e7f0f
JA
1539 } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
1540 REQ_F_LINK_TIMEOUT) {
2665abfd
JA
1541 struct io_ring_ctx *ctx = req->ctx;
1542 unsigned long flags;
1543
1544 /*
1545 * If this is a timeout link, we could be racing with the
1546 * timeout timer. Grab the completion lock for this case to
7c9e7f0f 1547 * protect against that.
2665abfd
JA
1548 */
1549 spin_lock_irqsave(&ctx->completion_lock, flags);
1550 io_req_link_next(req, nxt);
1551 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1552 } else {
1553 io_req_link_next(req, nxt);
9e645e11 1554 }
4d7dd462 1555}
9e645e11 1556
c69f8dbe
JL
1557static void io_free_req(struct io_kiocb *req)
1558{
944e58bf
PB
1559 struct io_kiocb *nxt = NULL;
1560
1561 io_req_find_next(req, &nxt);
70cf9f32 1562 __io_free_req(req);
944e58bf
PB
1563
1564 if (nxt)
1565 io_queue_async_work(nxt);
c69f8dbe
JL
1566}
1567
7a743e22
PB
1568static void io_link_work_cb(struct io_wq_work **workptr)
1569{
18a542ff
PB
1570 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
1571 struct io_kiocb *link;
7a743e22 1572
18a542ff 1573 link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
7a743e22
PB
1574 io_queue_linked_timeout(link);
1575 io_wq_submit_work(workptr);
1576}
1577
1578static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
1579{
1580 struct io_kiocb *link;
8766dd51
PB
1581 const struct io_op_def *def = &io_op_defs[nxt->opcode];
1582
1583 if ((nxt->flags & REQ_F_ISREG) && def->hash_reg_file)
1584 io_wq_hash_work(&nxt->work, file_inode(nxt->file));
7a743e22
PB
1585
1586 *workptr = &nxt->work;
1587 link = io_prep_linked_timeout(nxt);
18a542ff 1588 if (link)
7a743e22 1589 nxt->work.func = io_link_work_cb;
7a743e22
PB
1590}
1591
ba816ad6
JA
1592/*
1593 * Drop reference to request, return next in chain (if there is one) if this
1594 * was the last reference to this request.
1595 */
f9bd67f6 1596__attribute__((nonnull))
ec9c02ad 1597static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
e65ef56d 1598{
2a44f467
JA
1599 if (refcount_dec_and_test(&req->refs)) {
1600 io_req_find_next(req, nxtptr);
4d7dd462 1601 __io_free_req(req);
2a44f467 1602 }
2b188cc1
JA
1603}
1604
e65ef56d
JA
1605static void io_put_req(struct io_kiocb *req)
1606{
1607 if (refcount_dec_and_test(&req->refs))
1608 io_free_req(req);
2b188cc1
JA
1609}
1610
e9fd9396
PB
1611static void io_steal_work(struct io_kiocb *req,
1612 struct io_wq_work **workptr)
7a743e22
PB
1613{
1614 /*
1615 * It's in an io-wq worker, so there always should be at least
1616 * one reference, which will be dropped in io_put_work() just
1617 * after the current handler returns.
1618 *
1619 * It also means, that if the counter dropped to 1, then there is
1620 * no asynchronous users left, so it's safe to steal the next work.
1621 */
7a743e22
PB
1622 if (refcount_read(&req->refs) == 1) {
1623 struct io_kiocb *nxt = NULL;
1624
1625 io_req_find_next(req, &nxt);
1626 if (nxt)
1627 io_wq_assign_next(workptr, nxt);
1628 }
1629}
1630
978db57e
JA
1631/*
1632 * Must only be used if we don't need to care about links, usually from
1633 * within the completion handling itself.
1634 */
1635static void __io_double_put_req(struct io_kiocb *req)
78e19bbe
JA
1636{
1637 /* drop both submit and complete references */
1638 if (refcount_sub_and_test(2, &req->refs))
1639 __io_free_req(req);
1640}
1641
978db57e
JA
1642static void io_double_put_req(struct io_kiocb *req)
1643{
1644 /* drop both submit and complete references */
1645 if (refcount_sub_and_test(2, &req->refs))
1646 io_free_req(req);
1647}
1648
1d7bb1d5 1649static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
a3a0e43f 1650{
84f97dc2
JA
1651 struct io_rings *rings = ctx->rings;
1652
ad3eb2c8
JA
1653 if (test_bit(0, &ctx->cq_check_overflow)) {
1654 /*
1655 * noflush == true is from the waitqueue handler, just ensure
1656 * we wake up the task, and the next invocation will flush the
1657 * entries. We cannot safely to it from here.
1658 */
1659 if (noflush && !list_empty(&ctx->cq_overflow_list))
1660 return -1U;
1d7bb1d5 1661
ad3eb2c8
JA
1662 io_cqring_overflow_flush(ctx, false);
1663 }
1d7bb1d5 1664
a3a0e43f
JA
1665 /* See comment at the top of this file */
1666 smp_rmb();
ad3eb2c8 1667 return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
a3a0e43f
JA
1668}
1669
fb5ccc98
PB
1670static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
1671{
1672 struct io_rings *rings = ctx->rings;
1673
1674 /* make sure SQ entry isn't read before tail */
1675 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
1676}
1677
8237e045 1678static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
e94f141b 1679{
c6ca97b3
JA
1680 if ((req->flags & REQ_F_LINK) || io_is_fallback_req(req))
1681 return false;
e94f141b 1682
c6ca97b3
JA
1683 if (!(req->flags & REQ_F_FIXED_FILE) || req->io)
1684 rb->need_iter++;
1685
1686 rb->reqs[rb->to_free++] = req;
1687 if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
1688 io_free_req_many(req->ctx, rb);
1689 return true;
e94f141b
JA
1690}
1691
bcda7baa
JA
1692static int io_put_kbuf(struct io_kiocb *req)
1693{
4d954c25 1694 struct io_buffer *kbuf;
bcda7baa
JA
1695 int cflags;
1696
4d954c25 1697 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
bcda7baa
JA
1698 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
1699 cflags |= IORING_CQE_F_BUFFER;
1700 req->rw.addr = 0;
1701 kfree(kbuf);
1702 return cflags;
1703}
1704
def596e9
JA
1705/*
1706 * Find and free completed poll iocbs
1707 */
1708static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
1709 struct list_head *done)
1710{
8237e045 1711 struct req_batch rb;
def596e9 1712 struct io_kiocb *req;
def596e9 1713
c6ca97b3 1714 rb.to_free = rb.need_iter = 0;
def596e9 1715 while (!list_empty(done)) {
bcda7baa
JA
1716 int cflags = 0;
1717
def596e9
JA
1718 req = list_first_entry(done, struct io_kiocb, list);
1719 list_del(&req->list);
1720
bcda7baa
JA
1721 if (req->flags & REQ_F_BUFFER_SELECTED)
1722 cflags = io_put_kbuf(req);
1723
1724 __io_cqring_fill_event(req, req->result, cflags);
def596e9
JA
1725 (*nr_events)++;
1726
8237e045
JA
1727 if (refcount_dec_and_test(&req->refs) &&
1728 !io_req_multi_free(&rb, req))
1729 io_free_req(req);
def596e9 1730 }
def596e9 1731
09bb8394 1732 io_commit_cqring(ctx);
32b2244a
XW
1733 if (ctx->flags & IORING_SETUP_SQPOLL)
1734 io_cqring_ev_posted(ctx);
8237e045 1735 io_free_req_many(ctx, &rb);
def596e9
JA
1736}
1737
1738static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
1739 long min)
1740{
1741 struct io_kiocb *req, *tmp;
1742 LIST_HEAD(done);
1743 bool spin;
1744 int ret;
1745
1746 /*
1747 * Only spin for completions if we don't have multiple devices hanging
1748 * off our complete list, and we're under the requested amount.
1749 */
1750 spin = !ctx->poll_multi_file && *nr_events < min;
1751
1752 ret = 0;
1753 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
9adbd45d 1754 struct kiocb *kiocb = &req->rw.kiocb;
def596e9
JA
1755
1756 /*
1757 * Move completed entries to our local list. If we find a
1758 * request that requires polling, break out and complete
1759 * the done list first, if we have entries there.
1760 */
1761 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
1762 list_move_tail(&req->list, &done);
1763 continue;
1764 }
1765 if (!list_empty(&done))
1766 break;
1767
1768 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
1769 if (ret < 0)
1770 break;
1771
1772 if (ret && spin)
1773 spin = false;
1774 ret = 0;
1775 }
1776
1777 if (!list_empty(&done))
1778 io_iopoll_complete(ctx, nr_events, &done);
1779
1780 return ret;
1781}
1782
1783/*
d195a66e 1784 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
def596e9
JA
1785 * non-spinning poll check - we'll still enter the driver poll loop, but only
1786 * as a non-spinning completion check.
1787 */
1788static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
1789 long min)
1790{
08f5439f 1791 while (!list_empty(&ctx->poll_list) && !need_resched()) {
def596e9
JA
1792 int ret;
1793
1794 ret = io_do_iopoll(ctx, nr_events, min);
1795 if (ret < 0)
1796 return ret;
1797 if (!min || *nr_events >= min)
1798 return 0;
1799 }
1800
1801 return 1;
1802}
1803
1804/*
1805 * We can't just wait for polled events to come to us, we have to actively
1806 * find and complete them.
1807 */
1808static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
1809{
1810 if (!(ctx->flags & IORING_SETUP_IOPOLL))
1811 return;
1812
1813 mutex_lock(&ctx->uring_lock);
1814 while (!list_empty(&ctx->poll_list)) {
1815 unsigned int nr_events = 0;
1816
1817 io_iopoll_getevents(ctx, &nr_events, 1);
08f5439f
JA
1818
1819 /*
1820 * Ensure we allow local-to-the-cpu processing to take place,
1821 * in this case we need to ensure that we reap all events.
1822 */
1823 cond_resched();
def596e9
JA
1824 }
1825 mutex_unlock(&ctx->uring_lock);
1826}
1827
c7849be9
XW
1828static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1829 long min)
def596e9 1830{
2b2ed975 1831 int iters = 0, ret = 0;
500f9fba 1832
c7849be9
XW
1833 /*
1834 * We disallow the app entering submit/complete with polling, but we
1835 * still need to lock the ring to prevent racing with polled issue
1836 * that got punted to a workqueue.
1837 */
1838 mutex_lock(&ctx->uring_lock);
def596e9
JA
1839 do {
1840 int tmin = 0;
1841
a3a0e43f
JA
1842 /*
1843 * Don't enter poll loop if we already have events pending.
1844 * If we do, we can potentially be spinning for commands that
1845 * already triggered a CQE (eg in error).
1846 */
1d7bb1d5 1847 if (io_cqring_events(ctx, false))
a3a0e43f
JA
1848 break;
1849
500f9fba
JA
1850 /*
1851 * If a submit got punted to a workqueue, we can have the
1852 * application entering polling for a command before it gets
1853 * issued. That app will hold the uring_lock for the duration
1854 * of the poll right here, so we need to take a breather every
1855 * now and then to ensure that the issue has a chance to add
1856 * the poll to the issued list. Otherwise we can spin here
1857 * forever, while the workqueue is stuck trying to acquire the
1858 * very same mutex.
1859 */
1860 if (!(++iters & 7)) {
1861 mutex_unlock(&ctx->uring_lock);
1862 mutex_lock(&ctx->uring_lock);
1863 }
1864
def596e9
JA
1865 if (*nr_events < min)
1866 tmin = min - *nr_events;
1867
1868 ret = io_iopoll_getevents(ctx, nr_events, tmin);
1869 if (ret <= 0)
1870 break;
1871 ret = 0;
1872 } while (min && !*nr_events && !need_resched());
1873
500f9fba 1874 mutex_unlock(&ctx->uring_lock);
def596e9
JA
1875 return ret;
1876}
1877
491381ce 1878static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 1879{
491381ce
JA
1880 /*
1881 * Tell lockdep we inherited freeze protection from submission
1882 * thread.
1883 */
1884 if (req->flags & REQ_F_ISREG) {
1885 struct inode *inode = file_inode(req->file);
2b188cc1 1886
491381ce 1887 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2b188cc1 1888 }
491381ce 1889 file_end_write(req->file);
2b188cc1
JA
1890}
1891
4e88d6e7
JA
1892static inline void req_set_fail_links(struct io_kiocb *req)
1893{
1894 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1895 req->flags |= REQ_F_FAIL_LINK;
1896}
1897
ba816ad6 1898static void io_complete_rw_common(struct kiocb *kiocb, long res)
2b188cc1 1899{
9adbd45d 1900 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
bcda7baa 1901 int cflags = 0;
2b188cc1 1902
491381ce
JA
1903 if (kiocb->ki_flags & IOCB_WRITE)
1904 kiocb_end_write(req);
2b188cc1 1905
4e88d6e7
JA
1906 if (res != req->result)
1907 req_set_fail_links(req);
bcda7baa
JA
1908 if (req->flags & REQ_F_BUFFER_SELECTED)
1909 cflags = io_put_kbuf(req);
1910 __io_cqring_add_event(req, res, cflags);
ba816ad6
JA
1911}
1912
1913static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
1914{
9adbd45d 1915 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6
JA
1916
1917 io_complete_rw_common(kiocb, res);
e65ef56d 1918 io_put_req(req);
2b188cc1
JA
1919}
1920
def596e9
JA
1921static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
1922{
9adbd45d 1923 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 1924
491381ce
JA
1925 if (kiocb->ki_flags & IOCB_WRITE)
1926 kiocb_end_write(req);
def596e9 1927
4e88d6e7
JA
1928 if (res != req->result)
1929 req_set_fail_links(req);
9e645e11 1930 req->result = res;
def596e9
JA
1931 if (res != -EAGAIN)
1932 req->flags |= REQ_F_IOPOLL_COMPLETED;
1933}
1934
1935/*
1936 * After the iocb has been issued, it's safe to be found on the poll list.
1937 * Adding the kiocb to the list AFTER submission ensures that we don't
1938 * find it from a io_iopoll_getevents() thread before the issuer is done
1939 * accessing the kiocb cookie.
1940 */
1941static void io_iopoll_req_issued(struct io_kiocb *req)
1942{
1943 struct io_ring_ctx *ctx = req->ctx;
1944
1945 /*
1946 * Track whether we have multiple files in our lists. This will impact
1947 * how we do polling eventually, not spinning if we're on potentially
1948 * different devices.
1949 */
1950 if (list_empty(&ctx->poll_list)) {
1951 ctx->poll_multi_file = false;
1952 } else if (!ctx->poll_multi_file) {
1953 struct io_kiocb *list_req;
1954
1955 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
1956 list);
9adbd45d 1957 if (list_req->file != req->file)
def596e9
JA
1958 ctx->poll_multi_file = true;
1959 }
1960
1961 /*
1962 * For fast devices, IO may have already completed. If it has, add
1963 * it to the front so we find it first.
1964 */
1965 if (req->flags & REQ_F_IOPOLL_COMPLETED)
1966 list_add(&req->list, &ctx->poll_list);
1967 else
1968 list_add_tail(&req->list, &ctx->poll_list);
bdcd3eab
XW
1969
1970 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
1971 wq_has_sleeper(&ctx->sqo_wait))
1972 wake_up(&ctx->sqo_wait);
def596e9
JA
1973}
1974
3d6770fb 1975static void io_file_put(struct io_submit_state *state)
9a56a232 1976{
3d6770fb 1977 if (state->file) {
9a56a232
JA
1978 int diff = state->has_refs - state->used_refs;
1979
1980 if (diff)
1981 fput_many(state->file, diff);
1982 state->file = NULL;
1983 }
1984}
1985
1986/*
1987 * Get as many references to a file as we have IOs left in this submission,
1988 * assuming most submissions are for one file, or at least that each file
1989 * has more than one submission.
1990 */
8da11c19 1991static struct file *__io_file_get(struct io_submit_state *state, int fd)
9a56a232
JA
1992{
1993 if (!state)
1994 return fget(fd);
1995
1996 if (state->file) {
1997 if (state->fd == fd) {
1998 state->used_refs++;
1999 state->ios_left--;
2000 return state->file;
2001 }
3d6770fb 2002 io_file_put(state);
9a56a232
JA
2003 }
2004 state->file = fget_many(fd, state->ios_left);
2005 if (!state->file)
2006 return NULL;
2007
2008 state->fd = fd;
2009 state->has_refs = state->ios_left;
2010 state->used_refs = 1;
2011 state->ios_left--;
2012 return state->file;
2013}
2014
2b188cc1
JA
2015/*
2016 * If we tracked the file through the SCM inflight mechanism, we could support
2017 * any file. For now, just ensure that anything potentially problematic is done
2018 * inline.
2019 */
2020static bool io_file_supports_async(struct file *file)
2021{
2022 umode_t mode = file_inode(file)->i_mode;
2023
10d59345 2024 if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISSOCK(mode))
2b188cc1
JA
2025 return true;
2026 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
2027 return true;
2028
2029 return false;
2030}
2031
3529d8c2
JA
2032static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2033 bool force_nonblock)
2b188cc1 2034{
def596e9 2035 struct io_ring_ctx *ctx = req->ctx;
9adbd45d 2036 struct kiocb *kiocb = &req->rw.kiocb;
09bb8394
JA
2037 unsigned ioprio;
2038 int ret;
2b188cc1 2039
491381ce
JA
2040 if (S_ISREG(file_inode(req->file)->i_mode))
2041 req->flags |= REQ_F_ISREG;
2042
2b188cc1 2043 kiocb->ki_pos = READ_ONCE(sqe->off);
ba04291e
JA
2044 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
2045 req->flags |= REQ_F_CUR_POS;
2046 kiocb->ki_pos = req->file->f_pos;
2047 }
2b188cc1 2048 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
3e577dcd
PB
2049 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2050 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2051 if (unlikely(ret))
2052 return ret;
2b188cc1
JA
2053
2054 ioprio = READ_ONCE(sqe->ioprio);
2055 if (ioprio) {
2056 ret = ioprio_check_cap(ioprio);
2057 if (ret)
09bb8394 2058 return ret;
2b188cc1
JA
2059
2060 kiocb->ki_ioprio = ioprio;
2061 } else
2062 kiocb->ki_ioprio = get_current_ioprio();
2063
8449eeda 2064 /* don't allow async punt if RWF_NOWAIT was requested */
491381ce
JA
2065 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
2066 (req->file->f_flags & O_NONBLOCK))
8449eeda
SB
2067 req->flags |= REQ_F_NOWAIT;
2068
2069 if (force_nonblock)
2b188cc1 2070 kiocb->ki_flags |= IOCB_NOWAIT;
8449eeda 2071
def596e9 2072 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
2073 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2074 !kiocb->ki_filp->f_op->iopoll)
09bb8394 2075 return -EOPNOTSUPP;
2b188cc1 2076
def596e9
JA
2077 kiocb->ki_flags |= IOCB_HIPRI;
2078 kiocb->ki_complete = io_complete_rw_iopoll;
6873e0bd 2079 req->result = 0;
def596e9 2080 } else {
09bb8394
JA
2081 if (kiocb->ki_flags & IOCB_HIPRI)
2082 return -EINVAL;
def596e9
JA
2083 kiocb->ki_complete = io_complete_rw;
2084 }
9adbd45d 2085
3529d8c2
JA
2086 req->rw.addr = READ_ONCE(sqe->addr);
2087 req->rw.len = READ_ONCE(sqe->len);
bcda7baa 2088 /* we own ->private, reuse it for the buffer index / buffer ID */
9adbd45d 2089 req->rw.kiocb.private = (void *) (unsigned long)
3529d8c2 2090 READ_ONCE(sqe->buf_index);
2b188cc1 2091 return 0;
2b188cc1
JA
2092}
2093
2094static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2095{
2096 switch (ret) {
2097 case -EIOCBQUEUED:
2098 break;
2099 case -ERESTARTSYS:
2100 case -ERESTARTNOINTR:
2101 case -ERESTARTNOHAND:
2102 case -ERESTART_RESTARTBLOCK:
2103 /*
2104 * We can't just restart the syscall, since previously
2105 * submitted sqes may already be in progress. Just fail this
2106 * IO with EINTR.
2107 */
2108 ret = -EINTR;
2109 /* fall through */
2110 default:
2111 kiocb->ki_complete(kiocb, ret, 0);
2112 }
2113}
2114
014db007 2115static void kiocb_done(struct kiocb *kiocb, ssize_t ret)
ba816ad6 2116{
ba04291e
JA
2117 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2118
2119 if (req->flags & REQ_F_CUR_POS)
2120 req->file->f_pos = kiocb->ki_pos;
bcaec089 2121 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
014db007 2122 io_complete_rw(kiocb, ret, 0);
ba816ad6
JA
2123 else
2124 io_rw_done(kiocb, ret);
2125}
2126
9adbd45d 2127static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
7d009165 2128 struct iov_iter *iter)
edafccee 2129{
9adbd45d
JA
2130 struct io_ring_ctx *ctx = req->ctx;
2131 size_t len = req->rw.len;
edafccee
JA
2132 struct io_mapped_ubuf *imu;
2133 unsigned index, buf_index;
2134 size_t offset;
2135 u64 buf_addr;
2136
2137 /* attempt to use fixed buffers without having provided iovecs */
2138 if (unlikely(!ctx->user_bufs))
2139 return -EFAULT;
2140
9adbd45d 2141 buf_index = (unsigned long) req->rw.kiocb.private;
edafccee
JA
2142 if (unlikely(buf_index >= ctx->nr_user_bufs))
2143 return -EFAULT;
2144
2145 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2146 imu = &ctx->user_bufs[index];
9adbd45d 2147 buf_addr = req->rw.addr;
edafccee
JA
2148
2149 /* overflow */
2150 if (buf_addr + len < buf_addr)
2151 return -EFAULT;
2152 /* not inside the mapped region */
2153 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2154 return -EFAULT;
2155
2156 /*
2157 * May not be a start of buffer, set size appropriately
2158 * and advance us to the beginning.
2159 */
2160 offset = buf_addr - imu->ubuf;
2161 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
2162
2163 if (offset) {
2164 /*
2165 * Don't use iov_iter_advance() here, as it's really slow for
2166 * using the latter parts of a big fixed buffer - it iterates
2167 * over each segment manually. We can cheat a bit here, because
2168 * we know that:
2169 *
2170 * 1) it's a BVEC iter, we set it up
2171 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2172 * first and last bvec
2173 *
2174 * So just find our index, and adjust the iterator afterwards.
2175 * If the offset is within the first bvec (or the whole first
2176 * bvec, just use iov_iter_advance(). This makes it easier
2177 * since we can just skip the first segment, which may not
2178 * be PAGE_SIZE aligned.
2179 */
2180 const struct bio_vec *bvec = imu->bvec;
2181
2182 if (offset <= bvec->bv_len) {
2183 iov_iter_advance(iter, offset);
2184 } else {
2185 unsigned long seg_skip;
2186
2187 /* skip first vec */
2188 offset -= bvec->bv_len;
2189 seg_skip = 1 + (offset >> PAGE_SHIFT);
2190
2191 iter->bvec = bvec + seg_skip;
2192 iter->nr_segs -= seg_skip;
99c79f66 2193 iter->count -= bvec->bv_len + offset;
bd11b3a3 2194 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
2195 }
2196 }
2197
5e559561 2198 return len;
edafccee
JA
2199}
2200
bcda7baa
JA
2201static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2202{
2203 if (needs_lock)
2204 mutex_unlock(&ctx->uring_lock);
2205}
2206
2207static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2208{
2209 /*
2210 * "Normal" inline submissions always hold the uring_lock, since we
2211 * grab it from the system call. Same is true for the SQPOLL offload.
2212 * The only exception is when we've detached the request and issue it
2213 * from an async worker thread, grab the lock for that case.
2214 */
2215 if (needs_lock)
2216 mutex_lock(&ctx->uring_lock);
2217}
2218
2219static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2220 int bgid, struct io_buffer *kbuf,
2221 bool needs_lock)
2222{
2223 struct io_buffer *head;
2224
2225 if (req->flags & REQ_F_BUFFER_SELECTED)
2226 return kbuf;
2227
2228 io_ring_submit_lock(req->ctx, needs_lock);
2229
2230 lockdep_assert_held(&req->ctx->uring_lock);
2231
2232 head = idr_find(&req->ctx->io_buffer_idr, bgid);
2233 if (head) {
2234 if (!list_empty(&head->list)) {
2235 kbuf = list_last_entry(&head->list, struct io_buffer,
2236 list);
2237 list_del(&kbuf->list);
2238 } else {
2239 kbuf = head;
2240 idr_remove(&req->ctx->io_buffer_idr, bgid);
2241 }
2242 if (*len > kbuf->len)
2243 *len = kbuf->len;
2244 } else {
2245 kbuf = ERR_PTR(-ENOBUFS);
2246 }
2247
2248 io_ring_submit_unlock(req->ctx, needs_lock);
2249
2250 return kbuf;
2251}
2252
4d954c25
JA
2253static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2254 bool needs_lock)
2255{
2256 struct io_buffer *kbuf;
2257 int bgid;
2258
2259 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2260 bgid = (int) (unsigned long) req->rw.kiocb.private;
2261 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2262 if (IS_ERR(kbuf))
2263 return kbuf;
2264 req->rw.addr = (u64) (unsigned long) kbuf;
2265 req->flags |= REQ_F_BUFFER_SELECTED;
2266 return u64_to_user_ptr(kbuf->addr);
2267}
2268
2269#ifdef CONFIG_COMPAT
2270static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2271 bool needs_lock)
2272{
2273 struct compat_iovec __user *uiov;
2274 compat_ssize_t clen;
2275 void __user *buf;
2276 ssize_t len;
2277
2278 uiov = u64_to_user_ptr(req->rw.addr);
2279 if (!access_ok(uiov, sizeof(*uiov)))
2280 return -EFAULT;
2281 if (__get_user(clen, &uiov->iov_len))
2282 return -EFAULT;
2283 if (clen < 0)
2284 return -EINVAL;
2285
2286 len = clen;
2287 buf = io_rw_buffer_select(req, &len, needs_lock);
2288 if (IS_ERR(buf))
2289 return PTR_ERR(buf);
2290 iov[0].iov_base = buf;
2291 iov[0].iov_len = (compat_size_t) len;
2292 return 0;
2293}
2294#endif
2295
2296static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2297 bool needs_lock)
2298{
2299 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2300 void __user *buf;
2301 ssize_t len;
2302
2303 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2304 return -EFAULT;
2305
2306 len = iov[0].iov_len;
2307 if (len < 0)
2308 return -EINVAL;
2309 buf = io_rw_buffer_select(req, &len, needs_lock);
2310 if (IS_ERR(buf))
2311 return PTR_ERR(buf);
2312 iov[0].iov_base = buf;
2313 iov[0].iov_len = len;
2314 return 0;
2315}
2316
2317static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2318 bool needs_lock)
2319{
2320 if (req->flags & REQ_F_BUFFER_SELECTED)
2321 return 0;
2322 if (!req->rw.len)
2323 return 0;
2324 else if (req->rw.len > 1)
2325 return -EINVAL;
2326
2327#ifdef CONFIG_COMPAT
2328 if (req->ctx->compat)
2329 return io_compat_import(req, iov, needs_lock);
2330#endif
2331
2332 return __io_iov_buffer_select(req, iov, needs_lock);
2333}
2334
cf6fd4bd 2335static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
bcda7baa
JA
2336 struct iovec **iovec, struct iov_iter *iter,
2337 bool needs_lock)
2b188cc1 2338{
9adbd45d
JA
2339 void __user *buf = u64_to_user_ptr(req->rw.addr);
2340 size_t sqe_len = req->rw.len;
4d954c25 2341 ssize_t ret;
edafccee
JA
2342 u8 opcode;
2343
d625c6ee 2344 opcode = req->opcode;
7d009165 2345 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
edafccee 2346 *iovec = NULL;
9adbd45d 2347 return io_import_fixed(req, rw, iter);
edafccee 2348 }
2b188cc1 2349
bcda7baa
JA
2350 /* buffer index only valid with fixed read/write, or buffer select */
2351 if (req->rw.kiocb.private && !(req->flags & REQ_F_BUFFER_SELECT))
9adbd45d
JA
2352 return -EINVAL;
2353
3a6820f2 2354 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
bcda7baa 2355 if (req->flags & REQ_F_BUFFER_SELECT) {
4d954c25
JA
2356 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
2357 if (IS_ERR(buf)) {
bcda7baa 2358 *iovec = NULL;
4d954c25 2359 return PTR_ERR(buf);
bcda7baa 2360 }
3f9d6441 2361 req->rw.len = sqe_len;
bcda7baa
JA
2362 }
2363
3a6820f2
JA
2364 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
2365 *iovec = NULL;
3a901598 2366 return ret < 0 ? ret : sqe_len;
3a6820f2
JA
2367 }
2368
f67676d1
JA
2369 if (req->io) {
2370 struct io_async_rw *iorw = &req->io->rw;
2371
2372 *iovec = iorw->iov;
2373 iov_iter_init(iter, rw, *iovec, iorw->nr_segs, iorw->size);
2374 if (iorw->iov == iorw->fast_iov)
2375 *iovec = NULL;
2376 return iorw->size;
2377 }
2378
4d954c25
JA
2379 if (req->flags & REQ_F_BUFFER_SELECT) {
2380 ret = io_iov_buffer_select(req, *iovec, needs_lock);
3f9d6441
JA
2381 if (!ret) {
2382 ret = (*iovec)->iov_len;
2383 iov_iter_init(iter, rw, *iovec, 1, ret);
2384 }
4d954c25
JA
2385 *iovec = NULL;
2386 return ret;
2387 }
2388
2b188cc1 2389#ifdef CONFIG_COMPAT
cf6fd4bd 2390 if (req->ctx->compat)
2b188cc1
JA
2391 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
2392 iovec, iter);
2393#endif
2394
2395 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
2396}
2397
31b51510 2398/*
32960613
JA
2399 * For files that don't have ->read_iter() and ->write_iter(), handle them
2400 * by looping over ->read() or ->write() manually.
31b51510 2401 */
32960613
JA
2402static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
2403 struct iov_iter *iter)
2404{
2405 ssize_t ret = 0;
2406
2407 /*
2408 * Don't support polled IO through this interface, and we can't
2409 * support non-blocking either. For the latter, this just causes
2410 * the kiocb to be handled from an async context.
2411 */
2412 if (kiocb->ki_flags & IOCB_HIPRI)
2413 return -EOPNOTSUPP;
2414 if (kiocb->ki_flags & IOCB_NOWAIT)
2415 return -EAGAIN;
2416
2417 while (iov_iter_count(iter)) {
311ae9e1 2418 struct iovec iovec;
32960613
JA
2419 ssize_t nr;
2420
311ae9e1
PB
2421 if (!iov_iter_is_bvec(iter)) {
2422 iovec = iov_iter_iovec(iter);
2423 } else {
2424 /* fixed buffers import bvec */
2425 iovec.iov_base = kmap(iter->bvec->bv_page)
2426 + iter->iov_offset;
2427 iovec.iov_len = min(iter->count,
2428 iter->bvec->bv_len - iter->iov_offset);
2429 }
2430
32960613
JA
2431 if (rw == READ) {
2432 nr = file->f_op->read(file, iovec.iov_base,
2433 iovec.iov_len, &kiocb->ki_pos);
2434 } else {
2435 nr = file->f_op->write(file, iovec.iov_base,
2436 iovec.iov_len, &kiocb->ki_pos);
2437 }
2438
311ae9e1
PB
2439 if (iov_iter_is_bvec(iter))
2440 kunmap(iter->bvec->bv_page);
2441
32960613
JA
2442 if (nr < 0) {
2443 if (!ret)
2444 ret = nr;
2445 break;
2446 }
2447 ret += nr;
2448 if (nr != iovec.iov_len)
2449 break;
2450 iov_iter_advance(iter, nr);
2451 }
2452
2453 return ret;
2454}
2455
b7bb4f7d 2456static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
f67676d1
JA
2457 struct iovec *iovec, struct iovec *fast_iov,
2458 struct iov_iter *iter)
2459{
2460 req->io->rw.nr_segs = iter->nr_segs;
2461 req->io->rw.size = io_size;
2462 req->io->rw.iov = iovec;
2463 if (!req->io->rw.iov) {
2464 req->io->rw.iov = req->io->rw.fast_iov;
2465 memcpy(req->io->rw.iov, fast_iov,
2466 sizeof(struct iovec) * iter->nr_segs);
99bc4c38
PB
2467 } else {
2468 req->flags |= REQ_F_NEED_CLEANUP;
f67676d1
JA
2469 }
2470}
2471
b7bb4f7d 2472static int io_alloc_async_ctx(struct io_kiocb *req)
f67676d1 2473{
d3656344
JA
2474 if (!io_op_defs[req->opcode].async_ctx)
2475 return 0;
f67676d1 2476 req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
06b76d44 2477 return req->io == NULL;
b7bb4f7d
JA
2478}
2479
b7bb4f7d
JA
2480static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
2481 struct iovec *iovec, struct iovec *fast_iov,
2482 struct iov_iter *iter)
2483{
980ad263 2484 if (!io_op_defs[req->opcode].async_ctx)
74566df3 2485 return 0;
5d204bcf
JA
2486 if (!req->io) {
2487 if (io_alloc_async_ctx(req))
2488 return -ENOMEM;
b7bb4f7d 2489
5d204bcf
JA
2490 io_req_map_rw(req, io_size, iovec, fast_iov, iter);
2491 }
b7bb4f7d 2492 return 0;
f67676d1
JA
2493}
2494
3529d8c2
JA
2495static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2496 bool force_nonblock)
f67676d1 2497{
3529d8c2
JA
2498 struct io_async_ctx *io;
2499 struct iov_iter iter;
f67676d1
JA
2500 ssize_t ret;
2501
3529d8c2
JA
2502 ret = io_prep_rw(req, sqe, force_nonblock);
2503 if (ret)
2504 return ret;
f67676d1 2505
3529d8c2
JA
2506 if (unlikely(!(req->file->f_mode & FMODE_READ)))
2507 return -EBADF;
f67676d1 2508
5f798bea
PB
2509 /* either don't need iovec imported or already have it */
2510 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
3529d8c2
JA
2511 return 0;
2512
2513 io = req->io;
2514 io->rw.iov = io->rw.fast_iov;
2515 req->io = NULL;
bcda7baa 2516 ret = io_import_iovec(READ, req, &io->rw.iov, &iter, !force_nonblock);
3529d8c2
JA
2517 req->io = io;
2518 if (ret < 0)
2519 return ret;
2520
2521 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2522 return 0;
f67676d1
JA
2523}
2524
014db007 2525static int io_read(struct io_kiocb *req, bool force_nonblock)
2b188cc1
JA
2526{
2527 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 2528 struct kiocb *kiocb = &req->rw.kiocb;
2b188cc1 2529 struct iov_iter iter;
31b51510 2530 size_t iov_count;
f67676d1 2531 ssize_t io_size, ret;
2b188cc1 2532
bcda7baa 2533 ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
06b76d44
JA
2534 if (ret < 0)
2535 return ret;
2b188cc1 2536
fd6c2e4c
JA
2537 /* Ensure we clear previously set non-block flag */
2538 if (!force_nonblock)
29de5f6a 2539 kiocb->ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 2540
797f3f53 2541 req->result = 0;
f67676d1 2542 io_size = ret;
9e645e11 2543 if (req->flags & REQ_F_LINK)
f67676d1
JA
2544 req->result = io_size;
2545
2546 /*
2547 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2548 * we know to async punt it even if it was opened O_NONBLOCK
2549 */
29de5f6a 2550 if (force_nonblock && !io_file_supports_async(req->file))
f67676d1 2551 goto copy_iov;
9e645e11 2552
31b51510 2553 iov_count = iov_iter_count(&iter);
9adbd45d 2554 ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
2b188cc1
JA
2555 if (!ret) {
2556 ssize_t ret2;
2557
9adbd45d
JA
2558 if (req->file->f_op->read_iter)
2559 ret2 = call_read_iter(req->file, kiocb, &iter);
32960613 2560 else
9adbd45d 2561 ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
32960613 2562
9d93a3f5 2563 /* Catch -EAGAIN return for forced non-blocking submission */
f67676d1 2564 if (!force_nonblock || ret2 != -EAGAIN) {
014db007 2565 kiocb_done(kiocb, ret2);
f67676d1
JA
2566 } else {
2567copy_iov:
b7bb4f7d 2568 ret = io_setup_async_rw(req, io_size, iovec,
f67676d1
JA
2569 inline_vecs, &iter);
2570 if (ret)
2571 goto out_free;
29de5f6a
JA
2572 /* any defer here is final, must blocking retry */
2573 if (!(req->flags & REQ_F_NOWAIT))
2574 req->flags |= REQ_F_MUST_PUNT;
f67676d1
JA
2575 return -EAGAIN;
2576 }
2b188cc1 2577 }
f67676d1 2578out_free:
1e95081c 2579 kfree(iovec);
99bc4c38 2580 req->flags &= ~REQ_F_NEED_CLEANUP;
2b188cc1
JA
2581 return ret;
2582}
2583
3529d8c2
JA
2584static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2585 bool force_nonblock)
f67676d1 2586{
3529d8c2
JA
2587 struct io_async_ctx *io;
2588 struct iov_iter iter;
f67676d1
JA
2589 ssize_t ret;
2590
3529d8c2
JA
2591 ret = io_prep_rw(req, sqe, force_nonblock);
2592 if (ret)
2593 return ret;
f67676d1 2594
3529d8c2
JA
2595 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
2596 return -EBADF;
f67676d1 2597
4ed734b0
JA
2598 req->fsize = rlimit(RLIMIT_FSIZE);
2599
5f798bea
PB
2600 /* either don't need iovec imported or already have it */
2601 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
3529d8c2
JA
2602 return 0;
2603
2604 io = req->io;
2605 io->rw.iov = io->rw.fast_iov;
2606 req->io = NULL;
bcda7baa 2607 ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter, !force_nonblock);
3529d8c2
JA
2608 req->io = io;
2609 if (ret < 0)
2610 return ret;
2611
2612 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2613 return 0;
f67676d1
JA
2614}
2615
014db007 2616static int io_write(struct io_kiocb *req, bool force_nonblock)
2b188cc1
JA
2617{
2618 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 2619 struct kiocb *kiocb = &req->rw.kiocb;
2b188cc1 2620 struct iov_iter iter;
31b51510 2621 size_t iov_count;
f67676d1 2622 ssize_t ret, io_size;
2b188cc1 2623
bcda7baa 2624 ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
06b76d44
JA
2625 if (ret < 0)
2626 return ret;
2b188cc1 2627
fd6c2e4c
JA
2628 /* Ensure we clear previously set non-block flag */
2629 if (!force_nonblock)
9adbd45d 2630 req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 2631
797f3f53 2632 req->result = 0;
f67676d1 2633 io_size = ret;
9e645e11 2634 if (req->flags & REQ_F_LINK)
f67676d1 2635 req->result = io_size;
9e645e11 2636
f67676d1
JA
2637 /*
2638 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2639 * we know to async punt it even if it was opened O_NONBLOCK
2640 */
29de5f6a 2641 if (force_nonblock && !io_file_supports_async(req->file))
f67676d1 2642 goto copy_iov;
31b51510 2643
10d59345
JA
2644 /* file path doesn't support NOWAIT for non-direct_IO */
2645 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
2646 (req->flags & REQ_F_ISREG))
f67676d1 2647 goto copy_iov;
31b51510 2648
f67676d1 2649 iov_count = iov_iter_count(&iter);
9adbd45d 2650 ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
2b188cc1 2651 if (!ret) {
9bf7933f
RP
2652 ssize_t ret2;
2653
2b188cc1
JA
2654 /*
2655 * Open-code file_start_write here to grab freeze protection,
2656 * which will be released by another thread in
2657 * io_complete_rw(). Fool lockdep by telling it the lock got
2658 * released so that it doesn't complain about the held lock when
2659 * we return to userspace.
2660 */
491381ce 2661 if (req->flags & REQ_F_ISREG) {
9adbd45d 2662 __sb_start_write(file_inode(req->file)->i_sb,
2b188cc1 2663 SB_FREEZE_WRITE, true);
9adbd45d 2664 __sb_writers_release(file_inode(req->file)->i_sb,
2b188cc1
JA
2665 SB_FREEZE_WRITE);
2666 }
2667 kiocb->ki_flags |= IOCB_WRITE;
9bf7933f 2668
4ed734b0
JA
2669 if (!force_nonblock)
2670 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
2671
9adbd45d
JA
2672 if (req->file->f_op->write_iter)
2673 ret2 = call_write_iter(req->file, kiocb, &iter);
32960613 2674 else
9adbd45d 2675 ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
4ed734b0
JA
2676
2677 if (!force_nonblock)
2678 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
2679
faac996c 2680 /*
bff6035d 2681 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
faac996c
JA
2682 * retry them without IOCB_NOWAIT.
2683 */
2684 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
2685 ret2 = -EAGAIN;
f67676d1 2686 if (!force_nonblock || ret2 != -EAGAIN) {
014db007 2687 kiocb_done(kiocb, ret2);
f67676d1
JA
2688 } else {
2689copy_iov:
b7bb4f7d 2690 ret = io_setup_async_rw(req, io_size, iovec,
f67676d1
JA
2691 inline_vecs, &iter);
2692 if (ret)
2693 goto out_free;
29de5f6a
JA
2694 /* any defer here is final, must blocking retry */
2695 req->flags |= REQ_F_MUST_PUNT;
f67676d1
JA
2696 return -EAGAIN;
2697 }
2b188cc1 2698 }
31b51510 2699out_free:
99bc4c38 2700 req->flags &= ~REQ_F_NEED_CLEANUP;
1e95081c 2701 kfree(iovec);
2b188cc1
JA
2702 return ret;
2703}
2704
7d67af2c
PB
2705static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2706{
2707 struct io_splice* sp = &req->splice;
2708 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
2709 int ret;
2710
2711 if (req->flags & REQ_F_NEED_CLEANUP)
2712 return 0;
2713
2714 sp->file_in = NULL;
2715 sp->off_in = READ_ONCE(sqe->splice_off_in);
2716 sp->off_out = READ_ONCE(sqe->off);
2717 sp->len = READ_ONCE(sqe->len);
2718 sp->flags = READ_ONCE(sqe->splice_flags);
2719
2720 if (unlikely(sp->flags & ~valid_flags))
2721 return -EINVAL;
2722
2723 ret = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), &sp->file_in,
2724 (sp->flags & SPLICE_F_FD_IN_FIXED));
2725 if (ret)
2726 return ret;
2727 req->flags |= REQ_F_NEED_CLEANUP;
2728
2729 if (!S_ISREG(file_inode(sp->file_in)->i_mode))
2730 req->work.flags |= IO_WQ_WORK_UNBOUND;
2731
2732 return 0;
2733}
2734
2735static bool io_splice_punt(struct file *file)
2736{
2737 if (get_pipe_info(file))
2738 return false;
2739 if (!io_file_supports_async(file))
2740 return true;
2741 return !(file->f_mode & O_NONBLOCK);
2742}
2743
014db007 2744static int io_splice(struct io_kiocb *req, bool force_nonblock)
7d67af2c
PB
2745{
2746 struct io_splice *sp = &req->splice;
2747 struct file *in = sp->file_in;
2748 struct file *out = sp->file_out;
2749 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
2750 loff_t *poff_in, *poff_out;
2751 long ret;
2752
2753 if (force_nonblock) {
2754 if (io_splice_punt(in) || io_splice_punt(out))
2755 return -EAGAIN;
2756 flags |= SPLICE_F_NONBLOCK;
2757 }
2758
2759 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
2760 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
2761 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
2762 if (force_nonblock && ret == -EAGAIN)
2763 return -EAGAIN;
2764
2765 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
2766 req->flags &= ~REQ_F_NEED_CLEANUP;
2767
2768 io_cqring_add_event(req, ret);
2769 if (ret != sp->len)
2770 req_set_fail_links(req);
014db007 2771 io_put_req(req);
7d67af2c
PB
2772 return 0;
2773}
2774
2b188cc1
JA
2775/*
2776 * IORING_OP_NOP just posts a completion event, nothing else.
2777 */
78e19bbe 2778static int io_nop(struct io_kiocb *req)
2b188cc1
JA
2779{
2780 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 2781
def596e9
JA
2782 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2783 return -EINVAL;
2784
78e19bbe 2785 io_cqring_add_event(req, 0);
e65ef56d 2786 io_put_req(req);
2b188cc1
JA
2787 return 0;
2788}
2789
3529d8c2 2790static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 2791{
6b06314c 2792 struct io_ring_ctx *ctx = req->ctx;
c992fe29 2793
09bb8394
JA
2794 if (!req->file)
2795 return -EBADF;
c992fe29 2796
6b06314c 2797 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 2798 return -EINVAL;
edafccee 2799 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
2800 return -EINVAL;
2801
8ed8d3c3
JA
2802 req->sync.flags = READ_ONCE(sqe->fsync_flags);
2803 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
2804 return -EINVAL;
2805
2806 req->sync.off = READ_ONCE(sqe->off);
2807 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
2808 return 0;
2809}
2810
8ed8d3c3
JA
2811static bool io_req_cancelled(struct io_kiocb *req)
2812{
2813 if (req->work.flags & IO_WQ_WORK_CANCEL) {
2814 req_set_fail_links(req);
2815 io_cqring_add_event(req, -ECANCELED);
e9fd9396 2816 io_put_req(req);
8ed8d3c3
JA
2817 return true;
2818 }
2819
2820 return false;
2821}
2822
014db007 2823static void __io_fsync(struct io_kiocb *req)
8ed8d3c3 2824{
8ed8d3c3 2825 loff_t end = req->sync.off + req->sync.len;
8ed8d3c3
JA
2826 int ret;
2827
9adbd45d 2828 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
2829 end > 0 ? end : LLONG_MAX,
2830 req->sync.flags & IORING_FSYNC_DATASYNC);
2831 if (ret < 0)
2832 req_set_fail_links(req);
2833 io_cqring_add_event(req, ret);
014db007 2834 io_put_req(req);
5ea62161
PB
2835}
2836
2837static void io_fsync_finish(struct io_wq_work **workptr)
2838{
2839 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
5ea62161
PB
2840
2841 if (io_req_cancelled(req))
2842 return;
014db007 2843 __io_fsync(req);
e9fd9396 2844 io_steal_work(req, workptr);
8ed8d3c3
JA
2845}
2846
014db007 2847static int io_fsync(struct io_kiocb *req, bool force_nonblock)
c992fe29 2848{
c992fe29 2849 /* fsync always requires a blocking context */
8ed8d3c3 2850 if (force_nonblock) {
8ed8d3c3 2851 req->work.func = io_fsync_finish;
c992fe29 2852 return -EAGAIN;
8ed8d3c3 2853 }
014db007 2854 __io_fsync(req);
c992fe29
CH
2855 return 0;
2856}
2857
014db007 2858static void __io_fallocate(struct io_kiocb *req)
8ed8d3c3 2859{
8ed8d3c3
JA
2860 int ret;
2861
4ed734b0 2862 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
d63d1b5e
JA
2863 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
2864 req->sync.len);
4ed734b0 2865 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
8ed8d3c3
JA
2866 if (ret < 0)
2867 req_set_fail_links(req);
2868 io_cqring_add_event(req, ret);
014db007 2869 io_put_req(req);
5ea62161
PB
2870}
2871
2872static void io_fallocate_finish(struct io_wq_work **workptr)
2873{
2874 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
5ea62161 2875
594506fe
PB
2876 if (io_req_cancelled(req))
2877 return;
014db007 2878 __io_fallocate(req);
e9fd9396 2879 io_steal_work(req, workptr);
5d17b4a4
JA
2880}
2881
d63d1b5e
JA
2882static int io_fallocate_prep(struct io_kiocb *req,
2883 const struct io_uring_sqe *sqe)
2884{
2885 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
2886 return -EINVAL;
2887
2888 req->sync.off = READ_ONCE(sqe->off);
2889 req->sync.len = READ_ONCE(sqe->addr);
2890 req->sync.mode = READ_ONCE(sqe->len);
4ed734b0 2891 req->fsize = rlimit(RLIMIT_FSIZE);
d63d1b5e
JA
2892 return 0;
2893}
2894
014db007 2895static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
5d17b4a4 2896{
d63d1b5e 2897 /* fallocate always requiring blocking context */
8ed8d3c3 2898 if (force_nonblock) {
d63d1b5e 2899 req->work.func = io_fallocate_finish;
5d17b4a4 2900 return -EAGAIN;
8ed8d3c3 2901 }
5d17b4a4 2902
014db007 2903 __io_fallocate(req);
5d17b4a4
JA
2904 return 0;
2905}
2906
15b71abe 2907static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 2908{
f8748881 2909 const char __user *fname;
15b71abe 2910 int ret;
b7bb4f7d 2911
15b71abe
JA
2912 if (sqe->ioprio || sqe->buf_index)
2913 return -EINVAL;
cf3040ca
JA
2914 if (sqe->flags & IOSQE_FIXED_FILE)
2915 return -EBADF;
0bdbdd08
PB
2916 if (req->flags & REQ_F_NEED_CLEANUP)
2917 return 0;
03b1230c 2918
15b71abe 2919 req->open.dfd = READ_ONCE(sqe->fd);
c12cedf2 2920 req->open.how.mode = READ_ONCE(sqe->len);
f8748881 2921 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
c12cedf2 2922 req->open.how.flags = READ_ONCE(sqe->open_flags);
3529d8c2 2923
f8748881 2924 req->open.filename = getname(fname);
15b71abe
JA
2925 if (IS_ERR(req->open.filename)) {
2926 ret = PTR_ERR(req->open.filename);
2927 req->open.filename = NULL;
2928 return ret;
2929 }
3529d8c2 2930
8fef80bf 2931 req->flags |= REQ_F_NEED_CLEANUP;
15b71abe 2932 return 0;
03b1230c
JA
2933}
2934
cebdb986 2935static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 2936{
cebdb986
JA
2937 struct open_how __user *how;
2938 const char __user *fname;
2939 size_t len;
0fa03c62
JA
2940 int ret;
2941
cebdb986 2942 if (sqe->ioprio || sqe->buf_index)
0fa03c62 2943 return -EINVAL;
cf3040ca
JA
2944 if (sqe->flags & IOSQE_FIXED_FILE)
2945 return -EBADF;
0bdbdd08
PB
2946 if (req->flags & REQ_F_NEED_CLEANUP)
2947 return 0;
0fa03c62 2948
cebdb986
JA
2949 req->open.dfd = READ_ONCE(sqe->fd);
2950 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
2951 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
2952 len = READ_ONCE(sqe->len);
0fa03c62 2953
cebdb986
JA
2954 if (len < OPEN_HOW_SIZE_VER0)
2955 return -EINVAL;
3529d8c2 2956
cebdb986
JA
2957 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
2958 len);
2959 if (ret)
2960 return ret;
3529d8c2 2961
cebdb986
JA
2962 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
2963 req->open.how.flags |= O_LARGEFILE;
0fa03c62 2964
cebdb986
JA
2965 req->open.filename = getname(fname);
2966 if (IS_ERR(req->open.filename)) {
2967 ret = PTR_ERR(req->open.filename);
2968 req->open.filename = NULL;
2969 return ret;
2970 }
2971
8fef80bf 2972 req->flags |= REQ_F_NEED_CLEANUP;
cebdb986
JA
2973 return 0;
2974}
2975
014db007 2976static int io_openat2(struct io_kiocb *req, bool force_nonblock)
15b71abe
JA
2977{
2978 struct open_flags op;
15b71abe
JA
2979 struct file *file;
2980 int ret;
2981
f86cd20c 2982 if (force_nonblock)
15b71abe 2983 return -EAGAIN;
15b71abe 2984
cebdb986 2985 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
2986 if (ret)
2987 goto err;
2988
cebdb986 2989 ret = get_unused_fd_flags(req->open.how.flags);
15b71abe
JA
2990 if (ret < 0)
2991 goto err;
2992
2993 file = do_filp_open(req->open.dfd, req->open.filename, &op);
2994 if (IS_ERR(file)) {
2995 put_unused_fd(ret);
2996 ret = PTR_ERR(file);
2997 } else {
2998 fsnotify_open(file);
2999 fd_install(ret, file);
3000 }
3001err:
3002 putname(req->open.filename);
8fef80bf 3003 req->flags &= ~REQ_F_NEED_CLEANUP;
15b71abe
JA
3004 if (ret < 0)
3005 req_set_fail_links(req);
3006 io_cqring_add_event(req, ret);
014db007 3007 io_put_req(req);
15b71abe
JA
3008 return 0;
3009}
3010
014db007 3011static int io_openat(struct io_kiocb *req, bool force_nonblock)
cebdb986
JA
3012{
3013 req->open.how = build_open_how(req->open.how.flags, req->open.how.mode);
014db007 3014 return io_openat2(req, force_nonblock);
cebdb986
JA
3015}
3016
067524e9
JA
3017static int io_remove_buffers_prep(struct io_kiocb *req,
3018 const struct io_uring_sqe *sqe)
3019{
3020 struct io_provide_buf *p = &req->pbuf;
3021 u64 tmp;
3022
3023 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3024 return -EINVAL;
3025
3026 tmp = READ_ONCE(sqe->fd);
3027 if (!tmp || tmp > USHRT_MAX)
3028 return -EINVAL;
3029
3030 memset(p, 0, sizeof(*p));
3031 p->nbufs = tmp;
3032 p->bgid = READ_ONCE(sqe->buf_group);
3033 return 0;
3034}
3035
3036static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3037 int bgid, unsigned nbufs)
3038{
3039 unsigned i = 0;
3040
3041 /* shouldn't happen */
3042 if (!nbufs)
3043 return 0;
3044
3045 /* the head kbuf is the list itself */
3046 while (!list_empty(&buf->list)) {
3047 struct io_buffer *nxt;
3048
3049 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3050 list_del(&nxt->list);
3051 kfree(nxt);
3052 if (++i == nbufs)
3053 return i;
3054 }
3055 i++;
3056 kfree(buf);
3057 idr_remove(&ctx->io_buffer_idr, bgid);
3058
3059 return i;
3060}
3061
3062static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock)
3063{
3064 struct io_provide_buf *p = &req->pbuf;
3065 struct io_ring_ctx *ctx = req->ctx;
3066 struct io_buffer *head;
3067 int ret = 0;
3068
3069 io_ring_submit_lock(ctx, !force_nonblock);
3070
3071 lockdep_assert_held(&ctx->uring_lock);
3072
3073 ret = -ENOENT;
3074 head = idr_find(&ctx->io_buffer_idr, p->bgid);
3075 if (head)
3076 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
3077
3078 io_ring_submit_lock(ctx, !force_nonblock);
3079 if (ret < 0)
3080 req_set_fail_links(req);
3081 io_cqring_add_event(req, ret);
3082 io_put_req(req);
3083 return 0;
3084}
3085
ddf0322d
JA
3086static int io_provide_buffers_prep(struct io_kiocb *req,
3087 const struct io_uring_sqe *sqe)
3088{
3089 struct io_provide_buf *p = &req->pbuf;
3090 u64 tmp;
3091
3092 if (sqe->ioprio || sqe->rw_flags)
3093 return -EINVAL;
3094
3095 tmp = READ_ONCE(sqe->fd);
3096 if (!tmp || tmp > USHRT_MAX)
3097 return -E2BIG;
3098 p->nbufs = tmp;
3099 p->addr = READ_ONCE(sqe->addr);
3100 p->len = READ_ONCE(sqe->len);
3101
3102 if (!access_ok(u64_to_user_ptr(p->addr), p->len))
3103 return -EFAULT;
3104
3105 p->bgid = READ_ONCE(sqe->buf_group);
3106 tmp = READ_ONCE(sqe->off);
3107 if (tmp > USHRT_MAX)
3108 return -E2BIG;
3109 p->bid = tmp;
3110 return 0;
3111}
3112
3113static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3114{
3115 struct io_buffer *buf;
3116 u64 addr = pbuf->addr;
3117 int i, bid = pbuf->bid;
3118
3119 for (i = 0; i < pbuf->nbufs; i++) {
3120 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3121 if (!buf)
3122 break;
3123
3124 buf->addr = addr;
3125 buf->len = pbuf->len;
3126 buf->bid = bid;
3127 addr += pbuf->len;
3128 bid++;
3129 if (!*head) {
3130 INIT_LIST_HEAD(&buf->list);
3131 *head = buf;
3132 } else {
3133 list_add_tail(&buf->list, &(*head)->list);
3134 }
3135 }
3136
3137 return i ? i : -ENOMEM;
3138}
3139
ddf0322d
JA
3140static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock)
3141{
3142 struct io_provide_buf *p = &req->pbuf;
3143 struct io_ring_ctx *ctx = req->ctx;
3144 struct io_buffer *head, *list;
3145 int ret = 0;
3146
3147 io_ring_submit_lock(ctx, !force_nonblock);
3148
3149 lockdep_assert_held(&ctx->uring_lock);
3150
3151 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
3152
3153 ret = io_add_buffers(p, &head);
3154 if (ret < 0)
3155 goto out;
3156
3157 if (!list) {
3158 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
3159 GFP_KERNEL);
3160 if (ret < 0) {
067524e9 3161 __io_remove_buffers(ctx, head, p->bgid, -1U);
ddf0322d
JA
3162 goto out;
3163 }
3164 }
3165out:
3166 io_ring_submit_unlock(ctx, !force_nonblock);
3167 if (ret < 0)
3168 req_set_fail_links(req);
3169 io_cqring_add_event(req, ret);
3170 io_put_req(req);
3171 return 0;
3172}
3173
3e4827b0
JA
3174static int io_epoll_ctl_prep(struct io_kiocb *req,
3175 const struct io_uring_sqe *sqe)
3176{
3177#if defined(CONFIG_EPOLL)
3178 if (sqe->ioprio || sqe->buf_index)
3179 return -EINVAL;
3180
3181 req->epoll.epfd = READ_ONCE(sqe->fd);
3182 req->epoll.op = READ_ONCE(sqe->len);
3183 req->epoll.fd = READ_ONCE(sqe->off);
3184
3185 if (ep_op_has_event(req->epoll.op)) {
3186 struct epoll_event __user *ev;
3187
3188 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
3189 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
3190 return -EFAULT;
3191 }
3192
3193 return 0;
3194#else
3195 return -EOPNOTSUPP;
3196#endif
3197}
3198
014db007 3199static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock)
3e4827b0
JA
3200{
3201#if defined(CONFIG_EPOLL)
3202 struct io_epoll *ie = &req->epoll;
3203 int ret;
3204
3205 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
3206 if (force_nonblock && ret == -EAGAIN)
3207 return -EAGAIN;
3208
3209 if (ret < 0)
3210 req_set_fail_links(req);
3211 io_cqring_add_event(req, ret);
014db007 3212 io_put_req(req);
3e4827b0
JA
3213 return 0;
3214#else
3215 return -EOPNOTSUPP;
3216#endif
3217}
3218
c1ca757b
JA
3219static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3220{
3221#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3222 if (sqe->ioprio || sqe->buf_index || sqe->off)
3223 return -EINVAL;
3224
3225 req->madvise.addr = READ_ONCE(sqe->addr);
3226 req->madvise.len = READ_ONCE(sqe->len);
3227 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
3228 return 0;
3229#else
3230 return -EOPNOTSUPP;
3231#endif
3232}
3233
014db007 3234static int io_madvise(struct io_kiocb *req, bool force_nonblock)
c1ca757b
JA
3235{
3236#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3237 struct io_madvise *ma = &req->madvise;
3238 int ret;
3239
3240 if (force_nonblock)
3241 return -EAGAIN;
3242
3243 ret = do_madvise(ma->addr, ma->len, ma->advice);
3244 if (ret < 0)
3245 req_set_fail_links(req);
3246 io_cqring_add_event(req, ret);
014db007 3247 io_put_req(req);
c1ca757b
JA
3248 return 0;
3249#else
3250 return -EOPNOTSUPP;
3251#endif
3252}
3253
4840e418
JA
3254static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3255{
3256 if (sqe->ioprio || sqe->buf_index || sqe->addr)
3257 return -EINVAL;
3258
3259 req->fadvise.offset = READ_ONCE(sqe->off);
3260 req->fadvise.len = READ_ONCE(sqe->len);
3261 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
3262 return 0;
3263}
3264
014db007 3265static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
4840e418
JA
3266{
3267 struct io_fadvise *fa = &req->fadvise;
3268 int ret;
3269
3e69426d
JA
3270 if (force_nonblock) {
3271 switch (fa->advice) {
3272 case POSIX_FADV_NORMAL:
3273 case POSIX_FADV_RANDOM:
3274 case POSIX_FADV_SEQUENTIAL:
3275 break;
3276 default:
3277 return -EAGAIN;
3278 }
3279 }
4840e418
JA
3280
3281 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
3282 if (ret < 0)
3283 req_set_fail_links(req);
3284 io_cqring_add_event(req, ret);
014db007 3285 io_put_req(req);
4840e418
JA
3286 return 0;
3287}
3288
eddc7ef5
JA
3289static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3290{
f8748881 3291 const char __user *fname;
eddc7ef5
JA
3292 unsigned lookup_flags;
3293 int ret;
3294
3295 if (sqe->ioprio || sqe->buf_index)
3296 return -EINVAL;
cf3040ca
JA
3297 if (sqe->flags & IOSQE_FIXED_FILE)
3298 return -EBADF;
0bdbdd08
PB
3299 if (req->flags & REQ_F_NEED_CLEANUP)
3300 return 0;
eddc7ef5
JA
3301
3302 req->open.dfd = READ_ONCE(sqe->fd);
3303 req->open.mask = READ_ONCE(sqe->len);
f8748881 3304 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
eddc7ef5 3305 req->open.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
c12cedf2 3306 req->open.how.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5 3307
c12cedf2 3308 if (vfs_stat_set_lookup_flags(&lookup_flags, req->open.how.flags))
eddc7ef5
JA
3309 return -EINVAL;
3310
f8748881 3311 req->open.filename = getname_flags(fname, lookup_flags, NULL);
eddc7ef5
JA
3312 if (IS_ERR(req->open.filename)) {
3313 ret = PTR_ERR(req->open.filename);
3314 req->open.filename = NULL;
3315 return ret;
3316 }
3317
8fef80bf 3318 req->flags |= REQ_F_NEED_CLEANUP;
eddc7ef5
JA
3319 return 0;
3320}
3321
014db007 3322static int io_statx(struct io_kiocb *req, bool force_nonblock)
eddc7ef5
JA
3323{
3324 struct io_open *ctx = &req->open;
3325 unsigned lookup_flags;
3326 struct path path;
3327 struct kstat stat;
3328 int ret;
3329
3330 if (force_nonblock)
3331 return -EAGAIN;
3332
c12cedf2 3333 if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags))
eddc7ef5
JA
3334 return -EINVAL;
3335
3336retry:
3337 /* filename_lookup() drops it, keep a reference */
3338 ctx->filename->refcnt++;
3339
3340 ret = filename_lookup(ctx->dfd, ctx->filename, lookup_flags, &path,
3341 NULL);
3342 if (ret)
3343 goto err;
3344
c12cedf2 3345 ret = vfs_getattr(&path, &stat, ctx->mask, ctx->how.flags);
eddc7ef5
JA
3346 path_put(&path);
3347 if (retry_estale(ret, lookup_flags)) {
3348 lookup_flags |= LOOKUP_REVAL;
3349 goto retry;
3350 }
3351 if (!ret)
3352 ret = cp_statx(&stat, ctx->buffer);
3353err:
3354 putname(ctx->filename);
8fef80bf 3355 req->flags &= ~REQ_F_NEED_CLEANUP;
eddc7ef5
JA
3356 if (ret < 0)
3357 req_set_fail_links(req);
3358 io_cqring_add_event(req, ret);
014db007 3359 io_put_req(req);
eddc7ef5
JA
3360 return 0;
3361}
3362
b5dba59e
JA
3363static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3364{
3365 /*
3366 * If we queue this for async, it must not be cancellable. That would
3367 * leave the 'file' in an undeterminate state.
3368 */
3369 req->work.flags |= IO_WQ_WORK_NO_CANCEL;
3370
3371 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
3372 sqe->rw_flags || sqe->buf_index)
3373 return -EINVAL;
3374 if (sqe->flags & IOSQE_FIXED_FILE)
cf3040ca 3375 return -EBADF;
b5dba59e
JA
3376
3377 req->close.fd = READ_ONCE(sqe->fd);
3378 if (req->file->f_op == &io_uring_fops ||
b14cca0c 3379 req->close.fd == req->ctx->ring_fd)
b5dba59e
JA
3380 return -EBADF;
3381
3382 return 0;
3383}
3384
a93b3331 3385/* only called when __close_fd_get_file() is done */
014db007 3386static void __io_close_finish(struct io_kiocb *req)
a93b3331
PB
3387{
3388 int ret;
3389
3390 ret = filp_close(req->close.put_file, req->work.files);
3391 if (ret < 0)
3392 req_set_fail_links(req);
3393 io_cqring_add_event(req, ret);
3394 fput(req->close.put_file);
014db007 3395 io_put_req(req);
a93b3331
PB
3396}
3397
b5dba59e
JA
3398static void io_close_finish(struct io_wq_work **workptr)
3399{
3400 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
b5dba59e 3401
7fbeb95d 3402 /* not cancellable, don't do io_req_cancelled() */
014db007 3403 __io_close_finish(req);
e9fd9396 3404 io_steal_work(req, workptr);
b5dba59e
JA
3405}
3406
014db007 3407static int io_close(struct io_kiocb *req, bool force_nonblock)
b5dba59e
JA
3408{
3409 int ret;
3410
3411 req->close.put_file = NULL;
3412 ret = __close_fd_get_file(req->close.fd, &req->close.put_file);
3413 if (ret < 0)
3414 return ret;
3415
3416 /* if the file has a flush method, be safe and punt to async */
a2100672 3417 if (req->close.put_file->f_op->flush && force_nonblock) {
594506fe
PB
3418 /* submission ref will be dropped, take it for async */
3419 refcount_inc(&req->refs);
3420
a2100672
PB
3421 req->work.func = io_close_finish;
3422 /*
3423 * Do manual async queue here to avoid grabbing files - we don't
3424 * need the files, and it'll cause io_close_finish() to close
3425 * the file again and cause a double CQE entry for this request
3426 */
3427 io_queue_async_work(req);
3428 return 0;
3429 }
b5dba59e
JA
3430
3431 /*
3432 * No ->flush(), safely close from here and just punt the
3433 * fput() to async context.
3434 */
014db007 3435 __io_close_finish(req);
a93b3331 3436 return 0;
b5dba59e
JA
3437}
3438
3529d8c2 3439static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4
JA
3440{
3441 struct io_ring_ctx *ctx = req->ctx;
5d17b4a4
JA
3442
3443 if (!req->file)
3444 return -EBADF;
5d17b4a4
JA
3445
3446 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3447 return -EINVAL;
3448 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
3449 return -EINVAL;
3450
8ed8d3c3
JA
3451 req->sync.off = READ_ONCE(sqe->off);
3452 req->sync.len = READ_ONCE(sqe->len);
3453 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
3454 return 0;
3455}
3456
014db007 3457static void __io_sync_file_range(struct io_kiocb *req)
8ed8d3c3 3458{
8ed8d3c3
JA
3459 int ret;
3460
9adbd45d 3461 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3
JA
3462 req->sync.flags);
3463 if (ret < 0)
3464 req_set_fail_links(req);
3465 io_cqring_add_event(req, ret);
014db007 3466 io_put_req(req);
5ea62161
PB
3467}
3468
3469
3470static void io_sync_file_range_finish(struct io_wq_work **workptr)
3471{
3472 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
3473 struct io_kiocb *nxt = NULL;
3474
3475 if (io_req_cancelled(req))
3476 return;
014db007 3477 __io_sync_file_range(req);
594506fe 3478 io_put_req(req); /* put submission ref */
8ed8d3c3 3479 if (nxt)
78912934 3480 io_wq_assign_next(workptr, nxt);
5d17b4a4
JA
3481}
3482
014db007 3483static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
5d17b4a4 3484{
5d17b4a4 3485 /* sync_file_range always requires a blocking context */
8ed8d3c3 3486 if (force_nonblock) {
8ed8d3c3 3487 req->work.func = io_sync_file_range_finish;
5d17b4a4 3488 return -EAGAIN;
8ed8d3c3 3489 }
5d17b4a4 3490
014db007 3491 __io_sync_file_range(req);
5d17b4a4
JA
3492 return 0;
3493}
3494
469956e8 3495#if defined(CONFIG_NET)
02d27d89
PB
3496static int io_setup_async_msg(struct io_kiocb *req,
3497 struct io_async_msghdr *kmsg)
3498{
3499 if (req->io)
3500 return -EAGAIN;
3501 if (io_alloc_async_ctx(req)) {
3502 if (kmsg->iov != kmsg->fast_iov)
3503 kfree(kmsg->iov);
3504 return -ENOMEM;
3505 }
3506 req->flags |= REQ_F_NEED_CLEANUP;
3507 memcpy(&req->io->msg, kmsg, sizeof(*kmsg));
3508 return -EAGAIN;
3509}
3510
3529d8c2 3511static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 3512{
e47293fd 3513 struct io_sr_msg *sr = &req->sr_msg;
3529d8c2 3514 struct io_async_ctx *io = req->io;
99bc4c38 3515 int ret;
03b1230c 3516
e47293fd
JA
3517 sr->msg_flags = READ_ONCE(sqe->msg_flags);
3518 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 3519 sr->len = READ_ONCE(sqe->len);
3529d8c2 3520
d8768362
JA
3521#ifdef CONFIG_COMPAT
3522 if (req->ctx->compat)
3523 sr->msg_flags |= MSG_CMSG_COMPAT;
3524#endif
3525
fddaface 3526 if (!io || req->opcode == IORING_OP_SEND)
3529d8c2 3527 return 0;
5f798bea
PB
3528 /* iovec is already imported */
3529 if (req->flags & REQ_F_NEED_CLEANUP)
3530 return 0;
3529d8c2 3531
d9688565 3532 io->msg.iov = io->msg.fast_iov;
99bc4c38 3533 ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
e47293fd 3534 &io->msg.iov);
99bc4c38
PB
3535 if (!ret)
3536 req->flags |= REQ_F_NEED_CLEANUP;
3537 return ret;
03b1230c
JA
3538}
3539
014db007 3540static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
aa1fa28f 3541{
0b416c3e 3542 struct io_async_msghdr *kmsg = NULL;
0fa03c62
JA
3543 struct socket *sock;
3544 int ret;
3545
3546 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3547 return -EINVAL;
3548
3549 sock = sock_from_file(req->file, &ret);
3550 if (sock) {
b7bb4f7d 3551 struct io_async_ctx io;
0fa03c62
JA
3552 unsigned flags;
3553
03b1230c 3554 if (req->io) {
0b416c3e 3555 kmsg = &req->io->msg;
b537916c 3556 kmsg->msg.msg_name = &req->io->msg.addr;
0b416c3e
JA
3557 /* if iov is set, it's allocated already */
3558 if (!kmsg->iov)
3559 kmsg->iov = kmsg->fast_iov;
3560 kmsg->msg.msg_iter.iov = kmsg->iov;
03b1230c 3561 } else {
3529d8c2
JA
3562 struct io_sr_msg *sr = &req->sr_msg;
3563
0b416c3e 3564 kmsg = &io.msg;
b537916c 3565 kmsg->msg.msg_name = &io.msg.addr;
3529d8c2
JA
3566
3567 io.msg.iov = io.msg.fast_iov;
3568 ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
3569 sr->msg_flags, &io.msg.iov);
03b1230c 3570 if (ret)
3529d8c2 3571 return ret;
03b1230c 3572 }
0fa03c62 3573
e47293fd
JA
3574 flags = req->sr_msg.msg_flags;
3575 if (flags & MSG_DONTWAIT)
3576 req->flags |= REQ_F_NOWAIT;
3577 else if (force_nonblock)
3578 flags |= MSG_DONTWAIT;
3579
0b416c3e 3580 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
02d27d89
PB
3581 if (force_nonblock && ret == -EAGAIN)
3582 return io_setup_async_msg(req, kmsg);
441cdbd5
JA
3583 if (ret == -ERESTARTSYS)
3584 ret = -EINTR;
0fa03c62
JA
3585 }
3586
1e95081c 3587 if (kmsg && kmsg->iov != kmsg->fast_iov)
0b416c3e 3588 kfree(kmsg->iov);
99bc4c38 3589 req->flags &= ~REQ_F_NEED_CLEANUP;
78e19bbe 3590 io_cqring_add_event(req, ret);
4e88d6e7
JA
3591 if (ret < 0)
3592 req_set_fail_links(req);
014db007 3593 io_put_req(req);
5d17b4a4 3594 return 0;
03b1230c 3595}
aa1fa28f 3596
014db007 3597static int io_send(struct io_kiocb *req, bool force_nonblock)
fddaface 3598{
fddaface
JA
3599 struct socket *sock;
3600 int ret;
3601
3602 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3603 return -EINVAL;
3604
3605 sock = sock_from_file(req->file, &ret);
3606 if (sock) {
3607 struct io_sr_msg *sr = &req->sr_msg;
3608 struct msghdr msg;
3609 struct iovec iov;
3610 unsigned flags;
3611
3612 ret = import_single_range(WRITE, sr->buf, sr->len, &iov,
3613 &msg.msg_iter);
3614 if (ret)
3615 return ret;
3616
3617 msg.msg_name = NULL;
3618 msg.msg_control = NULL;
3619 msg.msg_controllen = 0;
3620 msg.msg_namelen = 0;
3621
3622 flags = req->sr_msg.msg_flags;
3623 if (flags & MSG_DONTWAIT)
3624 req->flags |= REQ_F_NOWAIT;
3625 else if (force_nonblock)
3626 flags |= MSG_DONTWAIT;
3627
0b7b21e4
JA
3628 msg.msg_flags = flags;
3629 ret = sock_sendmsg(sock, &msg);
fddaface
JA
3630 if (force_nonblock && ret == -EAGAIN)
3631 return -EAGAIN;
3632 if (ret == -ERESTARTSYS)
3633 ret = -EINTR;
3634 }
3635
3636 io_cqring_add_event(req, ret);
3637 if (ret < 0)
3638 req_set_fail_links(req);
014db007 3639 io_put_req(req);
fddaface 3640 return 0;
fddaface
JA
3641}
3642
52de1fe1
JA
3643static int __io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
3644{
3645 struct io_sr_msg *sr = &req->sr_msg;
3646 struct iovec __user *uiov;
3647 size_t iov_len;
3648 int ret;
3649
3650 ret = __copy_msghdr_from_user(&io->msg.msg, sr->msg, &io->msg.uaddr,
3651 &uiov, &iov_len);
3652 if (ret)
3653 return ret;
3654
3655 if (req->flags & REQ_F_BUFFER_SELECT) {
3656 if (iov_len > 1)
3657 return -EINVAL;
3658 if (copy_from_user(io->msg.iov, uiov, sizeof(*uiov)))
3659 return -EFAULT;
3660 sr->len = io->msg.iov[0].iov_len;
3661 iov_iter_init(&io->msg.msg.msg_iter, READ, io->msg.iov, 1,
3662 sr->len);
3663 io->msg.iov = NULL;
3664 } else {
3665 ret = import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
3666 &io->msg.iov, &io->msg.msg.msg_iter);
3667 if (ret > 0)
3668 ret = 0;
3669 }
3670
3671 return ret;
3672}
3673
3674#ifdef CONFIG_COMPAT
3675static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
3676 struct io_async_ctx *io)
3677{
3678 struct compat_msghdr __user *msg_compat;
3679 struct io_sr_msg *sr = &req->sr_msg;
3680 struct compat_iovec __user *uiov;
3681 compat_uptr_t ptr;
3682 compat_size_t len;
3683 int ret;
3684
3685 msg_compat = (struct compat_msghdr __user *) sr->msg;
3686 ret = __get_compat_msghdr(&io->msg.msg, msg_compat, &io->msg.uaddr,
3687 &ptr, &len);
3688 if (ret)
3689 return ret;
3690
3691 uiov = compat_ptr(ptr);
3692 if (req->flags & REQ_F_BUFFER_SELECT) {
3693 compat_ssize_t clen;
3694
3695 if (len > 1)
3696 return -EINVAL;
3697 if (!access_ok(uiov, sizeof(*uiov)))
3698 return -EFAULT;
3699 if (__get_user(clen, &uiov->iov_len))
3700 return -EFAULT;
3701 if (clen < 0)
3702 return -EINVAL;
3703 sr->len = io->msg.iov[0].iov_len;
3704 io->msg.iov = NULL;
3705 } else {
3706 ret = compat_import_iovec(READ, uiov, len, UIO_FASTIOV,
3707 &io->msg.iov,
3708 &io->msg.msg.msg_iter);
3709 if (ret < 0)
3710 return ret;
3711 }
3712
3713 return 0;
3714}
3715#endif
3716
3717static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
3718{
3719 io->msg.iov = io->msg.fast_iov;
3720
3721#ifdef CONFIG_COMPAT
3722 if (req->ctx->compat)
3723 return __io_compat_recvmsg_copy_hdr(req, io);
3724#endif
3725
3726 return __io_recvmsg_copy_hdr(req, io);
3727}
3728
bcda7baa
JA
3729static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
3730 int *cflags, bool needs_lock)
3731{
3732 struct io_sr_msg *sr = &req->sr_msg;
3733 struct io_buffer *kbuf;
3734
3735 if (!(req->flags & REQ_F_BUFFER_SELECT))
3736 return NULL;
3737
3738 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
3739 if (IS_ERR(kbuf))
3740 return kbuf;
3741
3742 sr->kbuf = kbuf;
3743 req->flags |= REQ_F_BUFFER_SELECTED;
3744
3745 *cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
3746 *cflags |= IORING_CQE_F_BUFFER;
3747 return kbuf;
3748}
3749
3529d8c2
JA
3750static int io_recvmsg_prep(struct io_kiocb *req,
3751 const struct io_uring_sqe *sqe)
aa1fa28f 3752{
e47293fd 3753 struct io_sr_msg *sr = &req->sr_msg;
3529d8c2 3754 struct io_async_ctx *io = req->io;
99bc4c38 3755 int ret;
3529d8c2
JA
3756
3757 sr->msg_flags = READ_ONCE(sqe->msg_flags);
3758 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 3759 sr->len = READ_ONCE(sqe->len);
bcda7baa 3760 sr->bgid = READ_ONCE(sqe->buf_group);
06b76d44 3761
d8768362
JA
3762#ifdef CONFIG_COMPAT
3763 if (req->ctx->compat)
3764 sr->msg_flags |= MSG_CMSG_COMPAT;
3765#endif
3766
fddaface 3767 if (!io || req->opcode == IORING_OP_RECV)
06b76d44 3768 return 0;
5f798bea
PB
3769 /* iovec is already imported */
3770 if (req->flags & REQ_F_NEED_CLEANUP)
3771 return 0;
03b1230c 3772
52de1fe1 3773 ret = io_recvmsg_copy_hdr(req, io);
99bc4c38
PB
3774 if (!ret)
3775 req->flags |= REQ_F_NEED_CLEANUP;
3776 return ret;
aa1fa28f
JA
3777}
3778
014db007 3779static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
aa1fa28f 3780{
0b416c3e 3781 struct io_async_msghdr *kmsg = NULL;
03b1230c 3782 struct socket *sock;
52de1fe1 3783 int ret, cflags = 0;
03b1230c
JA
3784
3785 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3786 return -EINVAL;
3787
3788 sock = sock_from_file(req->file, &ret);
3789 if (sock) {
52de1fe1 3790 struct io_buffer *kbuf;
b7bb4f7d 3791 struct io_async_ctx io;
03b1230c
JA
3792 unsigned flags;
3793
03b1230c 3794 if (req->io) {
0b416c3e 3795 kmsg = &req->io->msg;
b537916c 3796 kmsg->msg.msg_name = &req->io->msg.addr;
0b416c3e
JA
3797 /* if iov is set, it's allocated already */
3798 if (!kmsg->iov)
3799 kmsg->iov = kmsg->fast_iov;
3800 kmsg->msg.msg_iter.iov = kmsg->iov;
03b1230c 3801 } else {
0b416c3e 3802 kmsg = &io.msg;
b537916c 3803 kmsg->msg.msg_name = &io.msg.addr;
3529d8c2 3804
52de1fe1 3805 ret = io_recvmsg_copy_hdr(req, &io);
03b1230c 3806 if (ret)
3529d8c2 3807 return ret;
03b1230c
JA
3808 }
3809
52de1fe1
JA
3810 kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
3811 if (IS_ERR(kbuf)) {
3812 return PTR_ERR(kbuf);
3813 } else if (kbuf) {
3814 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3815 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
3816 1, req->sr_msg.len);
3817 }
3818
e47293fd
JA
3819 flags = req->sr_msg.msg_flags;
3820 if (flags & MSG_DONTWAIT)
3821 req->flags |= REQ_F_NOWAIT;
3822 else if (force_nonblock)
3823 flags |= MSG_DONTWAIT;
3824
3825 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg,
3826 kmsg->uaddr, flags);
02d27d89
PB
3827 if (force_nonblock && ret == -EAGAIN)
3828 return io_setup_async_msg(req, kmsg);
03b1230c
JA
3829 if (ret == -ERESTARTSYS)
3830 ret = -EINTR;
3831 }
3832
1e95081c 3833 if (kmsg && kmsg->iov != kmsg->fast_iov)
0b416c3e 3834 kfree(kmsg->iov);
99bc4c38 3835 req->flags &= ~REQ_F_NEED_CLEANUP;
52de1fe1 3836 __io_cqring_add_event(req, ret, cflags);
4e88d6e7
JA
3837 if (ret < 0)
3838 req_set_fail_links(req);
014db007 3839 io_put_req(req);
03b1230c 3840 return 0;
0fa03c62 3841}
5d17b4a4 3842
014db007 3843static int io_recv(struct io_kiocb *req, bool force_nonblock)
fddaface 3844{
bcda7baa 3845 struct io_buffer *kbuf = NULL;
fddaface 3846 struct socket *sock;
bcda7baa 3847 int ret, cflags = 0;
fddaface
JA
3848
3849 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3850 return -EINVAL;
3851
3852 sock = sock_from_file(req->file, &ret);
3853 if (sock) {
3854 struct io_sr_msg *sr = &req->sr_msg;
bcda7baa 3855 void __user *buf = sr->buf;
fddaface
JA
3856 struct msghdr msg;
3857 struct iovec iov;
3858 unsigned flags;
3859
bcda7baa
JA
3860 kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
3861 if (IS_ERR(kbuf))
3862 return PTR_ERR(kbuf);
3863 else if (kbuf)
3864 buf = u64_to_user_ptr(kbuf->addr);
3865
3866 ret = import_single_range(READ, buf, sr->len, &iov,
fddaface 3867 &msg.msg_iter);
bcda7baa
JA
3868 if (ret) {
3869 kfree(kbuf);
fddaface 3870 return ret;
bcda7baa 3871 }
fddaface 3872
bcda7baa 3873 req->flags |= REQ_F_NEED_CLEANUP;
fddaface
JA
3874 msg.msg_name = NULL;
3875 msg.msg_control = NULL;
3876 msg.msg_controllen = 0;
3877 msg.msg_namelen = 0;
3878 msg.msg_iocb = NULL;
3879 msg.msg_flags = 0;
3880
3881 flags = req->sr_msg.msg_flags;
3882 if (flags & MSG_DONTWAIT)
3883 req->flags |= REQ_F_NOWAIT;
3884 else if (force_nonblock)
3885 flags |= MSG_DONTWAIT;
3886
0b7b21e4 3887 ret = sock_recvmsg(sock, &msg, flags);
fddaface
JA
3888 if (force_nonblock && ret == -EAGAIN)
3889 return -EAGAIN;
3890 if (ret == -ERESTARTSYS)
3891 ret = -EINTR;
3892 }
3893
bcda7baa
JA
3894 kfree(kbuf);
3895 req->flags &= ~REQ_F_NEED_CLEANUP;
3896 __io_cqring_add_event(req, ret, cflags);
fddaface
JA
3897 if (ret < 0)
3898 req_set_fail_links(req);
014db007 3899 io_put_req(req);
fddaface 3900 return 0;
fddaface
JA
3901}
3902
3529d8c2 3903static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35 3904{
8ed8d3c3
JA
3905 struct io_accept *accept = &req->accept;
3906
17f2fe35
JA
3907 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3908 return -EINVAL;
8042d6ce 3909 if (sqe->ioprio || sqe->len || sqe->buf_index)
17f2fe35
JA
3910 return -EINVAL;
3911
d55e5f5b
JA
3912 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3913 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 3914 accept->flags = READ_ONCE(sqe->accept_flags);
8ed8d3c3 3915 return 0;
8ed8d3c3 3916}
17f2fe35 3917
014db007 3918static int __io_accept(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3
JA
3919{
3920 struct io_accept *accept = &req->accept;
3921 unsigned file_flags;
3922 int ret;
3923
3924 file_flags = force_nonblock ? O_NONBLOCK : 0;
3925 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
3926 accept->addr_len, accept->flags);
3927 if (ret == -EAGAIN && force_nonblock)
17f2fe35 3928 return -EAGAIN;
8e3cca12
JA
3929 if (ret == -ERESTARTSYS)
3930 ret = -EINTR;
4e88d6e7
JA
3931 if (ret < 0)
3932 req_set_fail_links(req);
78e19bbe 3933 io_cqring_add_event(req, ret);
014db007 3934 io_put_req(req);
17f2fe35 3935 return 0;
8ed8d3c3
JA
3936}
3937
3938static void io_accept_finish(struct io_wq_work **workptr)
3939{
3940 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
8ed8d3c3
JA
3941
3942 if (io_req_cancelled(req))
3943 return;
014db007 3944 __io_accept(req, false);
e9fd9396 3945 io_steal_work(req, workptr);
8ed8d3c3 3946}
8ed8d3c3 3947
014db007 3948static int io_accept(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3 3949{
8ed8d3c3
JA
3950 int ret;
3951
014db007 3952 ret = __io_accept(req, force_nonblock);
8ed8d3c3
JA
3953 if (ret == -EAGAIN && force_nonblock) {
3954 req->work.func = io_accept_finish;
8ed8d3c3
JA
3955 return -EAGAIN;
3956 }
3957 return 0;
0fa03c62 3958}
5d17b4a4 3959
3529d8c2 3960static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021 3961{
3529d8c2
JA
3962 struct io_connect *conn = &req->connect;
3963 struct io_async_ctx *io = req->io;
f499a021 3964
3fbb51c1
JA
3965 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3966 return -EINVAL;
3967 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
3968 return -EINVAL;
3969
3529d8c2
JA
3970 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3971 conn->addr_len = READ_ONCE(sqe->addr2);
3972
3973 if (!io)
3974 return 0;
3975
3976 return move_addr_to_kernel(conn->addr, conn->addr_len,
3fbb51c1 3977 &io->connect.address);
f499a021
JA
3978}
3979
014db007 3980static int io_connect(struct io_kiocb *req, bool force_nonblock)
f8e85cf2 3981{
f499a021 3982 struct io_async_ctx __io, *io;
f8e85cf2 3983 unsigned file_flags;
3fbb51c1 3984 int ret;
f8e85cf2 3985
f499a021
JA
3986 if (req->io) {
3987 io = req->io;
3988 } else {
3529d8c2
JA
3989 ret = move_addr_to_kernel(req->connect.addr,
3990 req->connect.addr_len,
3991 &__io.connect.address);
f499a021
JA
3992 if (ret)
3993 goto out;
3994 io = &__io;
3995 }
3996
3fbb51c1
JA
3997 file_flags = force_nonblock ? O_NONBLOCK : 0;
3998
3999 ret = __sys_connect_file(req->file, &io->connect.address,
4000 req->connect.addr_len, file_flags);
87f80d62 4001 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
b7bb4f7d
JA
4002 if (req->io)
4003 return -EAGAIN;
4004 if (io_alloc_async_ctx(req)) {
f499a021
JA
4005 ret = -ENOMEM;
4006 goto out;
4007 }
b7bb4f7d 4008 memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect));
f8e85cf2 4009 return -EAGAIN;
f499a021 4010 }
f8e85cf2
JA
4011 if (ret == -ERESTARTSYS)
4012 ret = -EINTR;
f499a021 4013out:
4e88d6e7
JA
4014 if (ret < 0)
4015 req_set_fail_links(req);
f8e85cf2 4016 io_cqring_add_event(req, ret);
014db007 4017 io_put_req(req);
f8e85cf2 4018 return 0;
469956e8
Y
4019}
4020#else /* !CONFIG_NET */
4021static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4022{
4023 return -EOPNOTSUPP;
4024}
4025
4026static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
4027{
4028 return -EOPNOTSUPP;
4029}
4030
4031static int io_send(struct io_kiocb *req, bool force_nonblock)
4032{
4033 return -EOPNOTSUPP;
4034}
4035
4036static int io_recvmsg_prep(struct io_kiocb *req,
4037 const struct io_uring_sqe *sqe)
4038{
4039 return -EOPNOTSUPP;
4040}
4041
4042static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
4043{
4044 return -EOPNOTSUPP;
4045}
4046
4047static int io_recv(struct io_kiocb *req, bool force_nonblock)
4048{
4049 return -EOPNOTSUPP;
4050}
4051
4052static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4053{
4054 return -EOPNOTSUPP;
4055}
4056
4057static int io_accept(struct io_kiocb *req, bool force_nonblock)
4058{
4059 return -EOPNOTSUPP;
4060}
4061
4062static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4063{
4064 return -EOPNOTSUPP;
4065}
4066
4067static int io_connect(struct io_kiocb *req, bool force_nonblock)
4068{
f8e85cf2 4069 return -EOPNOTSUPP;
f8e85cf2 4070}
469956e8 4071#endif /* CONFIG_NET */
f8e85cf2 4072
d7718a9d
JA
4073struct io_poll_table {
4074 struct poll_table_struct pt;
4075 struct io_kiocb *req;
4076 int error;
4077};
4078
4079static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
4080 struct wait_queue_head *head)
4081{
4082 if (unlikely(poll->head)) {
4083 pt->error = -EINVAL;
4084 return;
4085 }
4086
4087 pt->error = 0;
4088 poll->head = head;
4089 add_wait_queue(head, &poll->wait);
4090}
4091
4092static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
4093 struct poll_table_struct *p)
4094{
4095 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4096
4097 __io_queue_proc(&pt->req->apoll->poll, pt, head);
4098}
4099
4100static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4101 __poll_t mask, task_work_func_t func)
4102{
4103 struct task_struct *tsk;
4104
4105 /* for instances that support it check for an event match first: */
4106 if (mask && !(mask & poll->events))
4107 return 0;
4108
4109 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4110
4111 list_del_init(&poll->wait.entry);
4112
4113 tsk = req->task;
4114 req->result = mask;
4115 init_task_work(&req->task_work, func);
4116 /*
4117 * If this fails, then the task is exiting. If that is the case, then
4118 * the exit check will ultimately cancel these work items. Hence we
4119 * don't need to check here and handle it specifically.
4120 */
4121 task_work_add(tsk, &req->task_work, true);
4122 wake_up_process(tsk);
4123 return 1;
4124}
4125
4126static void io_async_task_func(struct callback_head *cb)
4127{
4128 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4129 struct async_poll *apoll = req->apoll;
4130 struct io_ring_ctx *ctx = req->ctx;
4131
4132 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
4133
4134 WARN_ON_ONCE(!list_empty(&req->apoll->poll.wait.entry));
4135
4136 if (hash_hashed(&req->hash_node)) {
4137 spin_lock_irq(&ctx->completion_lock);
4138 hash_del(&req->hash_node);
4139 spin_unlock_irq(&ctx->completion_lock);
4140 }
4141
4142 /* restore ->work in case we need to retry again */
4143 memcpy(&req->work, &apoll->work, sizeof(req->work));
4144
4145 __set_current_state(TASK_RUNNING);
4146 mutex_lock(&ctx->uring_lock);
4147 __io_queue_sqe(req, NULL);
4148 mutex_unlock(&ctx->uring_lock);
4149
4150 kfree(apoll);
4151}
4152
4153static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4154 void *key)
4155{
4156 struct io_kiocb *req = wait->private;
4157 struct io_poll_iocb *poll = &req->apoll->poll;
4158
4159 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
4160 key_to_poll(key));
4161
4162 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
4163}
4164
4165static void io_poll_req_insert(struct io_kiocb *req)
4166{
4167 struct io_ring_ctx *ctx = req->ctx;
4168 struct hlist_head *list;
4169
4170 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
4171 hlist_add_head(&req->hash_node, list);
4172}
4173
4174static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
4175 struct io_poll_iocb *poll,
4176 struct io_poll_table *ipt, __poll_t mask,
4177 wait_queue_func_t wake_func)
4178 __acquires(&ctx->completion_lock)
4179{
4180 struct io_ring_ctx *ctx = req->ctx;
4181 bool cancel = false;
4182
4183 poll->file = req->file;
4184 poll->head = NULL;
4185 poll->done = poll->canceled = false;
4186 poll->events = mask;
4187
4188 ipt->pt._key = mask;
4189 ipt->req = req;
4190 ipt->error = -EINVAL;
4191
4192 INIT_LIST_HEAD(&poll->wait.entry);
4193 init_waitqueue_func_entry(&poll->wait, wake_func);
4194 poll->wait.private = req;
4195
4196 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
4197
4198 spin_lock_irq(&ctx->completion_lock);
4199 if (likely(poll->head)) {
4200 spin_lock(&poll->head->lock);
4201 if (unlikely(list_empty(&poll->wait.entry))) {
4202 if (ipt->error)
4203 cancel = true;
4204 ipt->error = 0;
4205 mask = 0;
4206 }
4207 if (mask || ipt->error)
4208 list_del_init(&poll->wait.entry);
4209 else if (cancel)
4210 WRITE_ONCE(poll->canceled, true);
4211 else if (!poll->done) /* actually waiting for an event */
4212 io_poll_req_insert(req);
4213 spin_unlock(&poll->head->lock);
4214 }
4215
4216 return mask;
4217}
4218
4219static bool io_arm_poll_handler(struct io_kiocb *req)
4220{
4221 const struct io_op_def *def = &io_op_defs[req->opcode];
4222 struct io_ring_ctx *ctx = req->ctx;
4223 struct async_poll *apoll;
4224 struct io_poll_table ipt;
4225 __poll_t mask, ret;
4226
4227 if (!req->file || !file_can_poll(req->file))
4228 return false;
4229 if (req->flags & (REQ_F_MUST_PUNT | REQ_F_POLLED))
4230 return false;
4231 if (!def->pollin && !def->pollout)
4232 return false;
4233
4234 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
4235 if (unlikely(!apoll))
4236 return false;
4237
4238 req->flags |= REQ_F_POLLED;
4239 memcpy(&apoll->work, &req->work, sizeof(req->work));
4240
4241 /*
4242 * Don't need a reference here, as we're adding it to the task
4243 * task_works list. If the task exits, the list is pruned.
4244 */
4245 req->task = current;
4246 req->apoll = apoll;
4247 INIT_HLIST_NODE(&req->hash_node);
4248
8755d97a 4249 mask = 0;
d7718a9d 4250 if (def->pollin)
8755d97a 4251 mask |= POLLIN | POLLRDNORM;
d7718a9d
JA
4252 if (def->pollout)
4253 mask |= POLLOUT | POLLWRNORM;
4254 mask |= POLLERR | POLLPRI;
4255
4256 ipt.pt._qproc = io_async_queue_proc;
4257
4258 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
4259 io_async_wake);
4260 if (ret) {
4261 ipt.error = 0;
4262 apoll->poll.done = true;
4263 spin_unlock_irq(&ctx->completion_lock);
4264 memcpy(&req->work, &apoll->work, sizeof(req->work));
4265 kfree(apoll);
4266 return false;
4267 }
4268 spin_unlock_irq(&ctx->completion_lock);
4269 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
4270 apoll->poll.events);
4271 return true;
4272}
4273
4274static bool __io_poll_remove_one(struct io_kiocb *req,
4275 struct io_poll_iocb *poll)
221c5eb2 4276{
b41e9852 4277 bool do_complete = false;
221c5eb2
JA
4278
4279 spin_lock(&poll->head->lock);
4280 WRITE_ONCE(poll->canceled, true);
392edb45
JA
4281 if (!list_empty(&poll->wait.entry)) {
4282 list_del_init(&poll->wait.entry);
b41e9852 4283 do_complete = true;
221c5eb2
JA
4284 }
4285 spin_unlock(&poll->head->lock);
d7718a9d
JA
4286 return do_complete;
4287}
4288
4289static bool io_poll_remove_one(struct io_kiocb *req)
4290{
4291 bool do_complete;
4292
4293 if (req->opcode == IORING_OP_POLL_ADD) {
4294 do_complete = __io_poll_remove_one(req, &req->poll);
4295 } else {
4296 /* non-poll requests have submit ref still */
4297 do_complete = __io_poll_remove_one(req, &req->apoll->poll);
4298 if (do_complete)
4299 io_put_req(req);
4300 }
4301
78076bb6 4302 hash_del(&req->hash_node);
d7718a9d 4303
b41e9852
JA
4304 if (do_complete) {
4305 io_cqring_fill_event(req, -ECANCELED);
4306 io_commit_cqring(req->ctx);
4307 req->flags |= REQ_F_COMP_LOCKED;
4308 io_put_req(req);
4309 }
4310
4311 return do_complete;
221c5eb2
JA
4312}
4313
4314static void io_poll_remove_all(struct io_ring_ctx *ctx)
4315{
78076bb6 4316 struct hlist_node *tmp;
221c5eb2 4317 struct io_kiocb *req;
78076bb6 4318 int i;
221c5eb2
JA
4319
4320 spin_lock_irq(&ctx->completion_lock);
78076bb6
JA
4321 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
4322 struct hlist_head *list;
4323
4324 list = &ctx->cancel_hash[i];
4325 hlist_for_each_entry_safe(req, tmp, list, hash_node)
4326 io_poll_remove_one(req);
221c5eb2
JA
4327 }
4328 spin_unlock_irq(&ctx->completion_lock);
b41e9852
JA
4329
4330 io_cqring_ev_posted(ctx);
221c5eb2
JA
4331}
4332
47f46768
JA
4333static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
4334{
78076bb6 4335 struct hlist_head *list;
47f46768
JA
4336 struct io_kiocb *req;
4337
78076bb6
JA
4338 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
4339 hlist_for_each_entry(req, list, hash_node) {
b41e9852
JA
4340 if (sqe_addr != req->user_data)
4341 continue;
4342 if (io_poll_remove_one(req))
eac406c6 4343 return 0;
b41e9852 4344 return -EALREADY;
47f46768
JA
4345 }
4346
4347 return -ENOENT;
4348}
4349
3529d8c2
JA
4350static int io_poll_remove_prep(struct io_kiocb *req,
4351 const struct io_uring_sqe *sqe)
0969e783 4352{
0969e783
JA
4353 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4354 return -EINVAL;
4355 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
4356 sqe->poll_events)
4357 return -EINVAL;
4358
4359 req->poll.addr = READ_ONCE(sqe->addr);
0969e783
JA
4360 return 0;
4361}
4362
221c5eb2
JA
4363/*
4364 * Find a running poll command that matches one specified in sqe->addr,
4365 * and remove it if found.
4366 */
fc4df999 4367static int io_poll_remove(struct io_kiocb *req)
221c5eb2
JA
4368{
4369 struct io_ring_ctx *ctx = req->ctx;
0969e783 4370 u64 addr;
47f46768 4371 int ret;
221c5eb2 4372
0969e783 4373 addr = req->poll.addr;
221c5eb2 4374 spin_lock_irq(&ctx->completion_lock);
0969e783 4375 ret = io_poll_cancel(ctx, addr);
221c5eb2
JA
4376 spin_unlock_irq(&ctx->completion_lock);
4377
78e19bbe 4378 io_cqring_add_event(req, ret);
4e88d6e7
JA
4379 if (ret < 0)
4380 req_set_fail_links(req);
e65ef56d 4381 io_put_req(req);
221c5eb2
JA
4382 return 0;
4383}
4384
b0dd8a41 4385static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
221c5eb2 4386{
a197f664
JL
4387 struct io_ring_ctx *ctx = req->ctx;
4388
8c838788 4389 req->poll.done = true;
b0a20349 4390 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
8c838788 4391 io_commit_cqring(ctx);
221c5eb2
JA
4392}
4393
b41e9852 4394static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
221c5eb2 4395{
221c5eb2 4396 struct io_ring_ctx *ctx = req->ctx;
221c5eb2 4397
221c5eb2 4398 spin_lock_irq(&ctx->completion_lock);
78076bb6 4399 hash_del(&req->hash_node);
b41e9852
JA
4400 io_poll_complete(req, req->result, 0);
4401 req->flags |= REQ_F_COMP_LOCKED;
4402 io_put_req_find_next(req, nxt);
e94f141b
JA
4403 spin_unlock_irq(&ctx->completion_lock);
4404
4405 io_cqring_ev_posted(ctx);
e94f141b
JA
4406}
4407
b41e9852 4408static void io_poll_task_func(struct callback_head *cb)
f0b493e6 4409{
b41e9852
JA
4410 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4411 struct io_kiocb *nxt = NULL;
f0b493e6 4412
b41e9852 4413 io_poll_task_handler(req, &nxt);
d7718a9d
JA
4414 if (nxt) {
4415 struct io_ring_ctx *ctx = nxt->ctx;
4416
4417 mutex_lock(&ctx->uring_lock);
b41e9852 4418 __io_queue_sqe(nxt, NULL);
d7718a9d
JA
4419 mutex_unlock(&ctx->uring_lock);
4420 }
f0b493e6
JA
4421}
4422
221c5eb2
JA
4423static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4424 void *key)
4425{
c2f2eb7d
JA
4426 struct io_kiocb *req = wait->private;
4427 struct io_poll_iocb *poll = &req->poll;
221c5eb2 4428
d7718a9d 4429 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
221c5eb2
JA
4430}
4431
221c5eb2
JA
4432static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
4433 struct poll_table_struct *p)
4434{
4435 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4436
d7718a9d 4437 __io_queue_proc(&pt->req->poll, pt, head);
eac406c6
JA
4438}
4439
3529d8c2 4440static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
4441{
4442 struct io_poll_iocb *poll = &req->poll;
221c5eb2 4443 u16 events;
221c5eb2
JA
4444
4445 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4446 return -EINVAL;
4447 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
4448 return -EINVAL;
09bb8394
JA
4449 if (!poll->file)
4450 return -EBADF;
221c5eb2 4451
221c5eb2
JA
4452 events = READ_ONCE(sqe->poll_events);
4453 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
b41e9852 4454
d7718a9d
JA
4455 /*
4456 * Don't need a reference here, as we're adding it to the task
4457 * task_works list. If the task exits, the list is pruned.
4458 */
b41e9852 4459 req->task = current;
0969e783
JA
4460 return 0;
4461}
4462
014db007 4463static int io_poll_add(struct io_kiocb *req)
0969e783
JA
4464{
4465 struct io_poll_iocb *poll = &req->poll;
4466 struct io_ring_ctx *ctx = req->ctx;
4467 struct io_poll_table ipt;
0969e783 4468 __poll_t mask;
0969e783 4469
78076bb6 4470 INIT_HLIST_NODE(&req->hash_node);
36703247 4471 INIT_LIST_HEAD(&req->list);
d7718a9d 4472 ipt.pt._qproc = io_poll_queue_proc;
36703247 4473
d7718a9d
JA
4474 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
4475 io_poll_wake);
221c5eb2 4476
8c838788 4477 if (mask) { /* no async, we'd stolen it */
221c5eb2 4478 ipt.error = 0;
b0dd8a41 4479 io_poll_complete(req, mask, 0);
221c5eb2 4480 }
221c5eb2
JA
4481 spin_unlock_irq(&ctx->completion_lock);
4482
8c838788
JA
4483 if (mask) {
4484 io_cqring_ev_posted(ctx);
014db007 4485 io_put_req(req);
221c5eb2 4486 }
8c838788 4487 return ipt.error;
221c5eb2
JA
4488}
4489
5262f567
JA
4490static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
4491{
ad8a48ac
JA
4492 struct io_timeout_data *data = container_of(timer,
4493 struct io_timeout_data, timer);
4494 struct io_kiocb *req = data->req;
4495 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
4496 unsigned long flags;
4497
5262f567
JA
4498 atomic_inc(&ctx->cq_timeouts);
4499
4500 spin_lock_irqsave(&ctx->completion_lock, flags);
ef03681a 4501 /*
11365043
JA
4502 * We could be racing with timeout deletion. If the list is empty,
4503 * then timeout lookup already found it and will be handling it.
ef03681a 4504 */
842f9612 4505 if (!list_empty(&req->list)) {
11365043 4506 struct io_kiocb *prev;
5262f567 4507
11365043
JA
4508 /*
4509 * Adjust the reqs sequence before the current one because it
d195a66e 4510 * will consume a slot in the cq_ring and the cq_tail
11365043
JA
4511 * pointer will be increased, otherwise other timeout reqs may
4512 * return in advance without waiting for enough wait_nr.
4513 */
4514 prev = req;
4515 list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
4516 prev->sequence++;
11365043 4517 list_del_init(&req->list);
11365043 4518 }
5262f567 4519
78e19bbe 4520 io_cqring_fill_event(req, -ETIME);
5262f567
JA
4521 io_commit_cqring(ctx);
4522 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4523
4524 io_cqring_ev_posted(ctx);
4e88d6e7 4525 req_set_fail_links(req);
5262f567
JA
4526 io_put_req(req);
4527 return HRTIMER_NORESTART;
4528}
4529
47f46768
JA
4530static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
4531{
4532 struct io_kiocb *req;
4533 int ret = -ENOENT;
4534
4535 list_for_each_entry(req, &ctx->timeout_list, list) {
4536 if (user_data == req->user_data) {
4537 list_del_init(&req->list);
4538 ret = 0;
4539 break;
4540 }
4541 }
4542
4543 if (ret == -ENOENT)
4544 return ret;
4545
2d28390a 4546 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
47f46768
JA
4547 if (ret == -1)
4548 return -EALREADY;
4549
4e88d6e7 4550 req_set_fail_links(req);
47f46768
JA
4551 io_cqring_fill_event(req, -ECANCELED);
4552 io_put_req(req);
4553 return 0;
4554}
4555
3529d8c2
JA
4556static int io_timeout_remove_prep(struct io_kiocb *req,
4557 const struct io_uring_sqe *sqe)
b29472ee 4558{
b29472ee
JA
4559 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4560 return -EINVAL;
4561 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
4562 return -EINVAL;
4563
4564 req->timeout.addr = READ_ONCE(sqe->addr);
4565 req->timeout.flags = READ_ONCE(sqe->timeout_flags);
4566 if (req->timeout.flags)
4567 return -EINVAL;
4568
b29472ee
JA
4569 return 0;
4570}
4571
11365043
JA
4572/*
4573 * Remove or update an existing timeout command
4574 */
fc4df999 4575static int io_timeout_remove(struct io_kiocb *req)
11365043
JA
4576{
4577 struct io_ring_ctx *ctx = req->ctx;
47f46768 4578 int ret;
11365043 4579
11365043 4580 spin_lock_irq(&ctx->completion_lock);
b29472ee 4581 ret = io_timeout_cancel(ctx, req->timeout.addr);
11365043 4582
47f46768 4583 io_cqring_fill_event(req, ret);
11365043
JA
4584 io_commit_cqring(ctx);
4585 spin_unlock_irq(&ctx->completion_lock);
5262f567 4586 io_cqring_ev_posted(ctx);
4e88d6e7
JA
4587 if (ret < 0)
4588 req_set_fail_links(req);
ec9c02ad 4589 io_put_req(req);
11365043 4590 return 0;
5262f567
JA
4591}
4592
3529d8c2 4593static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2d28390a 4594 bool is_timeout_link)
5262f567 4595{
ad8a48ac 4596 struct io_timeout_data *data;
a41525ab 4597 unsigned flags;
5262f567 4598
ad8a48ac 4599 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5262f567 4600 return -EINVAL;
ad8a48ac 4601 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
a41525ab 4602 return -EINVAL;
2d28390a
JA
4603 if (sqe->off && is_timeout_link)
4604 return -EINVAL;
a41525ab
JA
4605 flags = READ_ONCE(sqe->timeout_flags);
4606 if (flags & ~IORING_TIMEOUT_ABS)
5262f567 4607 return -EINVAL;
bdf20073 4608
26a61679
JA
4609 req->timeout.count = READ_ONCE(sqe->off);
4610
3529d8c2 4611 if (!req->io && io_alloc_async_ctx(req))
26a61679
JA
4612 return -ENOMEM;
4613
4614 data = &req->io->timeout;
ad8a48ac 4615 data->req = req;
ad8a48ac
JA
4616 req->flags |= REQ_F_TIMEOUT;
4617
4618 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
4619 return -EFAULT;
4620
11365043 4621 if (flags & IORING_TIMEOUT_ABS)
ad8a48ac 4622 data->mode = HRTIMER_MODE_ABS;
11365043 4623 else
ad8a48ac 4624 data->mode = HRTIMER_MODE_REL;
11365043 4625
ad8a48ac
JA
4626 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
4627 return 0;
4628}
4629
fc4df999 4630static int io_timeout(struct io_kiocb *req)
ad8a48ac
JA
4631{
4632 unsigned count;
4633 struct io_ring_ctx *ctx = req->ctx;
4634 struct io_timeout_data *data;
4635 struct list_head *entry;
4636 unsigned span = 0;
ad8a48ac 4637
2d28390a 4638 data = &req->io->timeout;
93bd25bb 4639
5262f567
JA
4640 /*
4641 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
4642 * timeout event to be satisfied. If it isn't set, then this is
4643 * a pure timeout request, sequence isn't used.
5262f567 4644 */
26a61679 4645 count = req->timeout.count;
93bd25bb
JA
4646 if (!count) {
4647 req->flags |= REQ_F_TIMEOUT_NOSEQ;
4648 spin_lock_irq(&ctx->completion_lock);
4649 entry = ctx->timeout_list.prev;
4650 goto add;
4651 }
5262f567
JA
4652
4653 req->sequence = ctx->cached_sq_head + count - 1;
2d28390a 4654 data->seq_offset = count;
5262f567
JA
4655
4656 /*
4657 * Insertion sort, ensuring the first entry in the list is always
4658 * the one we need first.
4659 */
5262f567
JA
4660 spin_lock_irq(&ctx->completion_lock);
4661 list_for_each_prev(entry, &ctx->timeout_list) {
4662 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
5da0fb1a 4663 unsigned nxt_sq_head;
4664 long long tmp, tmp_nxt;
2d28390a 4665 u32 nxt_offset = nxt->io->timeout.seq_offset;
5262f567 4666
93bd25bb
JA
4667 if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
4668 continue;
4669
5da0fb1a 4670 /*
4671 * Since cached_sq_head + count - 1 can overflow, use type long
4672 * long to store it.
4673 */
4674 tmp = (long long)ctx->cached_sq_head + count - 1;
cc42e0ac
PB
4675 nxt_sq_head = nxt->sequence - nxt_offset + 1;
4676 tmp_nxt = (long long)nxt_sq_head + nxt_offset - 1;
5da0fb1a 4677
4678 /*
4679 * cached_sq_head may overflow, and it will never overflow twice
4680 * once there is some timeout req still be valid.
4681 */
4682 if (ctx->cached_sq_head < nxt_sq_head)
8b07a65a 4683 tmp += UINT_MAX;
5da0fb1a 4684
a1f58ba4 4685 if (tmp > tmp_nxt)
5262f567 4686 break;
a1f58ba4 4687
4688 /*
4689 * Sequence of reqs after the insert one and itself should
4690 * be adjusted because each timeout req consumes a slot.
4691 */
4692 span++;
4693 nxt->sequence++;
5262f567 4694 }
a1f58ba4 4695 req->sequence -= span;
93bd25bb 4696add:
5262f567 4697 list_add(&req->list, entry);
ad8a48ac
JA
4698 data->timer.function = io_timeout_fn;
4699 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5262f567 4700 spin_unlock_irq(&ctx->completion_lock);
5262f567
JA
4701 return 0;
4702}
5262f567 4703
62755e35
JA
4704static bool io_cancel_cb(struct io_wq_work *work, void *data)
4705{
4706 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
4707
4708 return req->user_data == (unsigned long) data;
4709}
4710
e977d6d3 4711static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
62755e35 4712{
62755e35 4713 enum io_wq_cancel cancel_ret;
62755e35
JA
4714 int ret = 0;
4715
62755e35
JA
4716 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
4717 switch (cancel_ret) {
4718 case IO_WQ_CANCEL_OK:
4719 ret = 0;
4720 break;
4721 case IO_WQ_CANCEL_RUNNING:
4722 ret = -EALREADY;
4723 break;
4724 case IO_WQ_CANCEL_NOTFOUND:
4725 ret = -ENOENT;
4726 break;
4727 }
4728
e977d6d3
JA
4729 return ret;
4730}
4731
47f46768
JA
4732static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
4733 struct io_kiocb *req, __u64 sqe_addr,
014db007 4734 int success_ret)
47f46768
JA
4735{
4736 unsigned long flags;
4737 int ret;
4738
4739 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
4740 if (ret != -ENOENT) {
4741 spin_lock_irqsave(&ctx->completion_lock, flags);
4742 goto done;
4743 }
4744
4745 spin_lock_irqsave(&ctx->completion_lock, flags);
4746 ret = io_timeout_cancel(ctx, sqe_addr);
4747 if (ret != -ENOENT)
4748 goto done;
4749 ret = io_poll_cancel(ctx, sqe_addr);
4750done:
b0dd8a41
JA
4751 if (!ret)
4752 ret = success_ret;
47f46768
JA
4753 io_cqring_fill_event(req, ret);
4754 io_commit_cqring(ctx);
4755 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4756 io_cqring_ev_posted(ctx);
4757
4e88d6e7
JA
4758 if (ret < 0)
4759 req_set_fail_links(req);
014db007 4760 io_put_req(req);
47f46768
JA
4761}
4762
3529d8c2
JA
4763static int io_async_cancel_prep(struct io_kiocb *req,
4764 const struct io_uring_sqe *sqe)
e977d6d3 4765{
fbf23849 4766 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
e977d6d3
JA
4767 return -EINVAL;
4768 if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
4769 sqe->cancel_flags)
4770 return -EINVAL;
4771
fbf23849
JA
4772 req->cancel.addr = READ_ONCE(sqe->addr);
4773 return 0;
4774}
4775
014db007 4776static int io_async_cancel(struct io_kiocb *req)
fbf23849
JA
4777{
4778 struct io_ring_ctx *ctx = req->ctx;
fbf23849 4779
014db007 4780 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
5262f567
JA
4781 return 0;
4782}
4783
05f3fb3c
JA
4784static int io_files_update_prep(struct io_kiocb *req,
4785 const struct io_uring_sqe *sqe)
4786{
4787 if (sqe->flags || sqe->ioprio || sqe->rw_flags)
4788 return -EINVAL;
4789
4790 req->files_update.offset = READ_ONCE(sqe->off);
4791 req->files_update.nr_args = READ_ONCE(sqe->len);
4792 if (!req->files_update.nr_args)
4793 return -EINVAL;
4794 req->files_update.arg = READ_ONCE(sqe->addr);
4795 return 0;
4796}
4797
4798static int io_files_update(struct io_kiocb *req, bool force_nonblock)
fbf23849
JA
4799{
4800 struct io_ring_ctx *ctx = req->ctx;
05f3fb3c
JA
4801 struct io_uring_files_update up;
4802 int ret;
fbf23849 4803
f86cd20c 4804 if (force_nonblock)
05f3fb3c 4805 return -EAGAIN;
05f3fb3c
JA
4806
4807 up.offset = req->files_update.offset;
4808 up.fds = req->files_update.arg;
4809
4810 mutex_lock(&ctx->uring_lock);
4811 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
4812 mutex_unlock(&ctx->uring_lock);
4813
4814 if (ret < 0)
4815 req_set_fail_links(req);
4816 io_cqring_add_event(req, ret);
4817 io_put_req(req);
5262f567
JA
4818 return 0;
4819}
4820
3529d8c2
JA
4821static int io_req_defer_prep(struct io_kiocb *req,
4822 const struct io_uring_sqe *sqe)
f67676d1 4823{
e781573e 4824 ssize_t ret = 0;
f67676d1 4825
f86cd20c
JA
4826 if (io_op_defs[req->opcode].file_table) {
4827 ret = io_grab_files(req);
4828 if (unlikely(ret))
4829 return ret;
4830 }
4831
cccf0ee8
JA
4832 io_req_work_grab_env(req, &io_op_defs[req->opcode]);
4833
d625c6ee 4834 switch (req->opcode) {
e781573e
JA
4835 case IORING_OP_NOP:
4836 break;
f67676d1
JA
4837 case IORING_OP_READV:
4838 case IORING_OP_READ_FIXED:
3a6820f2 4839 case IORING_OP_READ:
3529d8c2 4840 ret = io_read_prep(req, sqe, true);
f67676d1
JA
4841 break;
4842 case IORING_OP_WRITEV:
4843 case IORING_OP_WRITE_FIXED:
3a6820f2 4844 case IORING_OP_WRITE:
3529d8c2 4845 ret = io_write_prep(req, sqe, true);
f67676d1 4846 break;
0969e783 4847 case IORING_OP_POLL_ADD:
3529d8c2 4848 ret = io_poll_add_prep(req, sqe);
0969e783
JA
4849 break;
4850 case IORING_OP_POLL_REMOVE:
3529d8c2 4851 ret = io_poll_remove_prep(req, sqe);
0969e783 4852 break;
8ed8d3c3 4853 case IORING_OP_FSYNC:
3529d8c2 4854 ret = io_prep_fsync(req, sqe);
8ed8d3c3
JA
4855 break;
4856 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2 4857 ret = io_prep_sfr(req, sqe);
8ed8d3c3 4858 break;
03b1230c 4859 case IORING_OP_SENDMSG:
fddaface 4860 case IORING_OP_SEND:
3529d8c2 4861 ret = io_sendmsg_prep(req, sqe);
03b1230c
JA
4862 break;
4863 case IORING_OP_RECVMSG:
fddaface 4864 case IORING_OP_RECV:
3529d8c2 4865 ret = io_recvmsg_prep(req, sqe);
03b1230c 4866 break;
f499a021 4867 case IORING_OP_CONNECT:
3529d8c2 4868 ret = io_connect_prep(req, sqe);
f499a021 4869 break;
2d28390a 4870 case IORING_OP_TIMEOUT:
3529d8c2 4871 ret = io_timeout_prep(req, sqe, false);
b7bb4f7d 4872 break;
b29472ee 4873 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2 4874 ret = io_timeout_remove_prep(req, sqe);
b29472ee 4875 break;
fbf23849 4876 case IORING_OP_ASYNC_CANCEL:
3529d8c2 4877 ret = io_async_cancel_prep(req, sqe);
fbf23849 4878 break;
2d28390a 4879 case IORING_OP_LINK_TIMEOUT:
3529d8c2 4880 ret = io_timeout_prep(req, sqe, true);
b7bb4f7d 4881 break;
8ed8d3c3 4882 case IORING_OP_ACCEPT:
3529d8c2 4883 ret = io_accept_prep(req, sqe);
8ed8d3c3 4884 break;
d63d1b5e
JA
4885 case IORING_OP_FALLOCATE:
4886 ret = io_fallocate_prep(req, sqe);
4887 break;
15b71abe
JA
4888 case IORING_OP_OPENAT:
4889 ret = io_openat_prep(req, sqe);
4890 break;
b5dba59e
JA
4891 case IORING_OP_CLOSE:
4892 ret = io_close_prep(req, sqe);
4893 break;
05f3fb3c
JA
4894 case IORING_OP_FILES_UPDATE:
4895 ret = io_files_update_prep(req, sqe);
4896 break;
eddc7ef5
JA
4897 case IORING_OP_STATX:
4898 ret = io_statx_prep(req, sqe);
4899 break;
4840e418
JA
4900 case IORING_OP_FADVISE:
4901 ret = io_fadvise_prep(req, sqe);
4902 break;
c1ca757b
JA
4903 case IORING_OP_MADVISE:
4904 ret = io_madvise_prep(req, sqe);
4905 break;
cebdb986
JA
4906 case IORING_OP_OPENAT2:
4907 ret = io_openat2_prep(req, sqe);
4908 break;
3e4827b0
JA
4909 case IORING_OP_EPOLL_CTL:
4910 ret = io_epoll_ctl_prep(req, sqe);
4911 break;
7d67af2c
PB
4912 case IORING_OP_SPLICE:
4913 ret = io_splice_prep(req, sqe);
4914 break;
ddf0322d
JA
4915 case IORING_OP_PROVIDE_BUFFERS:
4916 ret = io_provide_buffers_prep(req, sqe);
4917 break;
067524e9
JA
4918 case IORING_OP_REMOVE_BUFFERS:
4919 ret = io_remove_buffers_prep(req, sqe);
4920 break;
f67676d1 4921 default:
e781573e
JA
4922 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
4923 req->opcode);
4924 ret = -EINVAL;
b7bb4f7d 4925 break;
f67676d1
JA
4926 }
4927
b7bb4f7d 4928 return ret;
f67676d1
JA
4929}
4930
3529d8c2 4931static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
de0617e4 4932{
a197f664 4933 struct io_ring_ctx *ctx = req->ctx;
f67676d1 4934 int ret;
de0617e4 4935
9d858b21
BL
4936 /* Still need defer if there is pending req in defer list. */
4937 if (!req_need_defer(req) && list_empty(&ctx->defer_list))
de0617e4
JA
4938 return 0;
4939
3529d8c2 4940 if (!req->io && io_alloc_async_ctx(req))
de0617e4
JA
4941 return -EAGAIN;
4942
3529d8c2 4943 ret = io_req_defer_prep(req, sqe);
b7bb4f7d 4944 if (ret < 0)
2d28390a 4945 return ret;
2d28390a 4946
de0617e4 4947 spin_lock_irq(&ctx->completion_lock);
9d858b21 4948 if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
de0617e4 4949 spin_unlock_irq(&ctx->completion_lock);
de0617e4
JA
4950 return 0;
4951 }
4952
915967f6 4953 trace_io_uring_defer(ctx, req, req->user_data);
de0617e4
JA
4954 list_add_tail(&req->list, &ctx->defer_list);
4955 spin_unlock_irq(&ctx->completion_lock);
4956 return -EIOCBQUEUED;
4957}
4958
99bc4c38
PB
4959static void io_cleanup_req(struct io_kiocb *req)
4960{
4961 struct io_async_ctx *io = req->io;
4962
4963 switch (req->opcode) {
4964 case IORING_OP_READV:
4965 case IORING_OP_READ_FIXED:
4966 case IORING_OP_READ:
bcda7baa
JA
4967 if (req->flags & REQ_F_BUFFER_SELECTED)
4968 kfree((void *)(unsigned long)req->rw.addr);
4969 /* fallthrough */
99bc4c38
PB
4970 case IORING_OP_WRITEV:
4971 case IORING_OP_WRITE_FIXED:
4972 case IORING_OP_WRITE:
4973 if (io->rw.iov != io->rw.fast_iov)
4974 kfree(io->rw.iov);
4975 break;
99bc4c38 4976 case IORING_OP_RECVMSG:
52de1fe1
JA
4977 if (req->flags & REQ_F_BUFFER_SELECTED)
4978 kfree(req->sr_msg.kbuf);
4979 /* fallthrough */
4980 case IORING_OP_SENDMSG:
99bc4c38
PB
4981 if (io->msg.iov != io->msg.fast_iov)
4982 kfree(io->msg.iov);
4983 break;
bcda7baa
JA
4984 case IORING_OP_RECV:
4985 if (req->flags & REQ_F_BUFFER_SELECTED)
4986 kfree(req->sr_msg.kbuf);
4987 break;
8fef80bf
PB
4988 case IORING_OP_OPENAT:
4989 case IORING_OP_OPENAT2:
4990 case IORING_OP_STATX:
4991 putname(req->open.filename);
4992 break;
7d67af2c
PB
4993 case IORING_OP_SPLICE:
4994 io_put_file(req, req->splice.file_in,
4995 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
4996 break;
99bc4c38
PB
4997 }
4998
4999 req->flags &= ~REQ_F_NEED_CLEANUP;
5000}
5001
3529d8c2 5002static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
014db007 5003 bool force_nonblock)
2b188cc1 5004{
a197f664 5005 struct io_ring_ctx *ctx = req->ctx;
d625c6ee 5006 int ret;
2b188cc1 5007
d625c6ee 5008 switch (req->opcode) {
2b188cc1 5009 case IORING_OP_NOP:
78e19bbe 5010 ret = io_nop(req);
2b188cc1
JA
5011 break;
5012 case IORING_OP_READV:
edafccee 5013 case IORING_OP_READ_FIXED:
3a6820f2 5014 case IORING_OP_READ:
3529d8c2
JA
5015 if (sqe) {
5016 ret = io_read_prep(req, sqe, force_nonblock);
5017 if (ret < 0)
5018 break;
5019 }
014db007 5020 ret = io_read(req, force_nonblock);
edafccee 5021 break;
3529d8c2 5022 case IORING_OP_WRITEV:
edafccee 5023 case IORING_OP_WRITE_FIXED:
3a6820f2 5024 case IORING_OP_WRITE:
3529d8c2
JA
5025 if (sqe) {
5026 ret = io_write_prep(req, sqe, force_nonblock);
5027 if (ret < 0)
5028 break;
5029 }
014db007 5030 ret = io_write(req, force_nonblock);
2b188cc1 5031 break;
c992fe29 5032 case IORING_OP_FSYNC:
3529d8c2
JA
5033 if (sqe) {
5034 ret = io_prep_fsync(req, sqe);
5035 if (ret < 0)
5036 break;
5037 }
014db007 5038 ret = io_fsync(req, force_nonblock);
c992fe29 5039 break;
221c5eb2 5040 case IORING_OP_POLL_ADD:
3529d8c2
JA
5041 if (sqe) {
5042 ret = io_poll_add_prep(req, sqe);
5043 if (ret)
5044 break;
5045 }
014db007 5046 ret = io_poll_add(req);
221c5eb2
JA
5047 break;
5048 case IORING_OP_POLL_REMOVE:
3529d8c2
JA
5049 if (sqe) {
5050 ret = io_poll_remove_prep(req, sqe);
5051 if (ret < 0)
5052 break;
5053 }
fc4df999 5054 ret = io_poll_remove(req);
221c5eb2 5055 break;
5d17b4a4 5056 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2
JA
5057 if (sqe) {
5058 ret = io_prep_sfr(req, sqe);
5059 if (ret < 0)
5060 break;
5061 }
014db007 5062 ret = io_sync_file_range(req, force_nonblock);
5d17b4a4 5063 break;
0fa03c62 5064 case IORING_OP_SENDMSG:
fddaface 5065 case IORING_OP_SEND:
3529d8c2
JA
5066 if (sqe) {
5067 ret = io_sendmsg_prep(req, sqe);
5068 if (ret < 0)
5069 break;
5070 }
fddaface 5071 if (req->opcode == IORING_OP_SENDMSG)
014db007 5072 ret = io_sendmsg(req, force_nonblock);
fddaface 5073 else
014db007 5074 ret = io_send(req, force_nonblock);
0fa03c62 5075 break;
aa1fa28f 5076 case IORING_OP_RECVMSG:
fddaface 5077 case IORING_OP_RECV:
3529d8c2
JA
5078 if (sqe) {
5079 ret = io_recvmsg_prep(req, sqe);
5080 if (ret)
5081 break;
5082 }
fddaface 5083 if (req->opcode == IORING_OP_RECVMSG)
014db007 5084 ret = io_recvmsg(req, force_nonblock);
fddaface 5085 else
014db007 5086 ret = io_recv(req, force_nonblock);
aa1fa28f 5087 break;
5262f567 5088 case IORING_OP_TIMEOUT:
3529d8c2
JA
5089 if (sqe) {
5090 ret = io_timeout_prep(req, sqe, false);
5091 if (ret)
5092 break;
5093 }
fc4df999 5094 ret = io_timeout(req);
5262f567 5095 break;
11365043 5096 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2
JA
5097 if (sqe) {
5098 ret = io_timeout_remove_prep(req, sqe);
5099 if (ret)
5100 break;
5101 }
fc4df999 5102 ret = io_timeout_remove(req);
11365043 5103 break;
17f2fe35 5104 case IORING_OP_ACCEPT:
3529d8c2
JA
5105 if (sqe) {
5106 ret = io_accept_prep(req, sqe);
5107 if (ret)
5108 break;
5109 }
014db007 5110 ret = io_accept(req, force_nonblock);
17f2fe35 5111 break;
f8e85cf2 5112 case IORING_OP_CONNECT:
3529d8c2
JA
5113 if (sqe) {
5114 ret = io_connect_prep(req, sqe);
5115 if (ret)
5116 break;
5117 }
014db007 5118 ret = io_connect(req, force_nonblock);
f8e85cf2 5119 break;
62755e35 5120 case IORING_OP_ASYNC_CANCEL:
3529d8c2
JA
5121 if (sqe) {
5122 ret = io_async_cancel_prep(req, sqe);
5123 if (ret)
5124 break;
5125 }
014db007 5126 ret = io_async_cancel(req);
62755e35 5127 break;
d63d1b5e
JA
5128 case IORING_OP_FALLOCATE:
5129 if (sqe) {
5130 ret = io_fallocate_prep(req, sqe);
5131 if (ret)
5132 break;
5133 }
014db007 5134 ret = io_fallocate(req, force_nonblock);
d63d1b5e 5135 break;
15b71abe
JA
5136 case IORING_OP_OPENAT:
5137 if (sqe) {
5138 ret = io_openat_prep(req, sqe);
5139 if (ret)
5140 break;
5141 }
014db007 5142 ret = io_openat(req, force_nonblock);
15b71abe 5143 break;
b5dba59e
JA
5144 case IORING_OP_CLOSE:
5145 if (sqe) {
5146 ret = io_close_prep(req, sqe);
5147 if (ret)
5148 break;
5149 }
014db007 5150 ret = io_close(req, force_nonblock);
b5dba59e 5151 break;
05f3fb3c
JA
5152 case IORING_OP_FILES_UPDATE:
5153 if (sqe) {
5154 ret = io_files_update_prep(req, sqe);
5155 if (ret)
5156 break;
5157 }
5158 ret = io_files_update(req, force_nonblock);
5159 break;
eddc7ef5
JA
5160 case IORING_OP_STATX:
5161 if (sqe) {
5162 ret = io_statx_prep(req, sqe);
5163 if (ret)
5164 break;
5165 }
014db007 5166 ret = io_statx(req, force_nonblock);
eddc7ef5 5167 break;
4840e418
JA
5168 case IORING_OP_FADVISE:
5169 if (sqe) {
5170 ret = io_fadvise_prep(req, sqe);
5171 if (ret)
5172 break;
5173 }
014db007 5174 ret = io_fadvise(req, force_nonblock);
4840e418 5175 break;
c1ca757b
JA
5176 case IORING_OP_MADVISE:
5177 if (sqe) {
5178 ret = io_madvise_prep(req, sqe);
5179 if (ret)
5180 break;
5181 }
014db007 5182 ret = io_madvise(req, force_nonblock);
c1ca757b 5183 break;
cebdb986
JA
5184 case IORING_OP_OPENAT2:
5185 if (sqe) {
5186 ret = io_openat2_prep(req, sqe);
5187 if (ret)
5188 break;
5189 }
014db007 5190 ret = io_openat2(req, force_nonblock);
cebdb986 5191 break;
3e4827b0
JA
5192 case IORING_OP_EPOLL_CTL:
5193 if (sqe) {
5194 ret = io_epoll_ctl_prep(req, sqe);
5195 if (ret)
5196 break;
5197 }
014db007 5198 ret = io_epoll_ctl(req, force_nonblock);
3e4827b0 5199 break;
7d67af2c
PB
5200 case IORING_OP_SPLICE:
5201 if (sqe) {
5202 ret = io_splice_prep(req, sqe);
5203 if (ret < 0)
5204 break;
5205 }
014db007 5206 ret = io_splice(req, force_nonblock);
7d67af2c 5207 break;
ddf0322d
JA
5208 case IORING_OP_PROVIDE_BUFFERS:
5209 if (sqe) {
5210 ret = io_provide_buffers_prep(req, sqe);
5211 if (ret)
5212 break;
5213 }
5214 ret = io_provide_buffers(req, force_nonblock);
5215 break;
067524e9
JA
5216 case IORING_OP_REMOVE_BUFFERS:
5217 if (sqe) {
5218 ret = io_remove_buffers_prep(req, sqe);
5219 if (ret)
5220 break;
5221 }
5222 ret = io_remove_buffers(req, force_nonblock);
5223 break;
2b188cc1
JA
5224 default:
5225 ret = -EINVAL;
5226 break;
5227 }
5228
def596e9
JA
5229 if (ret)
5230 return ret;
5231
5232 if (ctx->flags & IORING_SETUP_IOPOLL) {
11ba820b
JA
5233 const bool in_async = io_wq_current_is_worker();
5234
9e645e11 5235 if (req->result == -EAGAIN)
def596e9
JA
5236 return -EAGAIN;
5237
11ba820b
JA
5238 /* workqueue context doesn't hold uring_lock, grab it now */
5239 if (in_async)
5240 mutex_lock(&ctx->uring_lock);
5241
def596e9 5242 io_iopoll_req_issued(req);
11ba820b
JA
5243
5244 if (in_async)
5245 mutex_unlock(&ctx->uring_lock);
def596e9
JA
5246 }
5247
5248 return 0;
2b188cc1
JA
5249}
5250
561fb04a 5251static void io_wq_submit_work(struct io_wq_work **workptr)
2b188cc1 5252{
561fb04a 5253 struct io_wq_work *work = *workptr;
2b188cc1 5254 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
561fb04a 5255 int ret = 0;
2b188cc1 5256
0c9d5ccd
JA
5257 /* if NO_CANCEL is set, we must still run the work */
5258 if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
5259 IO_WQ_WORK_CANCEL) {
561fb04a 5260 ret = -ECANCELED;
0c9d5ccd 5261 }
31b51510 5262
561fb04a 5263 if (!ret) {
561fb04a 5264 do {
014db007 5265 ret = io_issue_sqe(req, NULL, false);
561fb04a
JA
5266 /*
5267 * We can get EAGAIN for polled IO even though we're
5268 * forcing a sync submission from here, since we can't
5269 * wait for request slots on the block side.
5270 */
5271 if (ret != -EAGAIN)
5272 break;
5273 cond_resched();
5274 } while (1);
5275 }
31b51510 5276
561fb04a 5277 if (ret) {
4e88d6e7 5278 req_set_fail_links(req);
78e19bbe 5279 io_cqring_add_event(req, ret);
817869d2 5280 io_put_req(req);
edafccee 5281 }
2b188cc1 5282
e9fd9396 5283 io_steal_work(req, workptr);
2b188cc1
JA
5284}
5285
15b71abe 5286static int io_req_needs_file(struct io_kiocb *req, int fd)
9e3aa61a 5287{
d3656344 5288 if (!io_op_defs[req->opcode].needs_file)
9e3aa61a 5289 return 0;
0b5faf6b 5290 if ((fd == -1 || fd == AT_FDCWD) && io_op_defs[req->opcode].fd_non_neg)
d3656344
JA
5291 return 0;
5292 return 1;
09bb8394
JA
5293}
5294
65e19f54
JA
5295static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
5296 int index)
5297{
5298 struct fixed_file_table *table;
5299
05f3fb3c
JA
5300 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
5301 return table->files[index & IORING_FILE_TABLE_MASK];;
65e19f54
JA
5302}
5303
8da11c19
PB
5304static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
5305 int fd, struct file **out_file, bool fixed)
09bb8394 5306{
a197f664 5307 struct io_ring_ctx *ctx = req->ctx;
8da11c19 5308 struct file *file;
09bb8394 5309
8da11c19 5310 if (fixed) {
05f3fb3c 5311 if (unlikely(!ctx->file_data ||
09bb8394
JA
5312 (unsigned) fd >= ctx->nr_user_files))
5313 return -EBADF;
b7620121 5314 fd = array_index_nospec(fd, ctx->nr_user_files);
8da11c19
PB
5315 file = io_file_from_index(ctx, fd);
5316 if (!file)
08a45173 5317 return -EBADF;
05f3fb3c 5318 percpu_ref_get(&ctx->file_data->refs);
09bb8394 5319 } else {
c826bd7a 5320 trace_io_uring_file_get(ctx, fd);
8da11c19
PB
5321 file = __io_file_get(state, fd);
5322 if (unlikely(!file))
09bb8394
JA
5323 return -EBADF;
5324 }
5325
8da11c19 5326 *out_file = file;
09bb8394
JA
5327 return 0;
5328}
5329
8da11c19
PB
5330static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
5331 const struct io_uring_sqe *sqe)
5332{
5333 unsigned flags;
5334 int fd;
5335 bool fixed;
5336
5337 flags = READ_ONCE(sqe->flags);
5338 fd = READ_ONCE(sqe->fd);
5339
5340 if (!io_req_needs_file(req, fd))
5341 return 0;
5342
5343 fixed = (flags & IOSQE_FIXED_FILE);
5344 if (unlikely(!fixed && req->needs_fixed_file))
5345 return -EBADF;
5346
5347 return io_file_get(state, req, fd, &req->file, fixed);
5348}
5349
a197f664 5350static int io_grab_files(struct io_kiocb *req)
fcb323cc
JA
5351{
5352 int ret = -EBADF;
a197f664 5353 struct io_ring_ctx *ctx = req->ctx;
fcb323cc 5354
f86cd20c
JA
5355 if (req->work.files)
5356 return 0;
b14cca0c 5357 if (!ctx->ring_file)
b5dba59e
JA
5358 return -EBADF;
5359
fcb323cc
JA
5360 rcu_read_lock();
5361 spin_lock_irq(&ctx->inflight_lock);
5362 /*
5363 * We use the f_ops->flush() handler to ensure that we can flush
5364 * out work accessing these files if the fd is closed. Check if
5365 * the fd has changed since we started down this path, and disallow
5366 * this operation if it has.
5367 */
b14cca0c 5368 if (fcheck(ctx->ring_fd) == ctx->ring_file) {
fcb323cc
JA
5369 list_add(&req->inflight_entry, &ctx->inflight_list);
5370 req->flags |= REQ_F_INFLIGHT;
5371 req->work.files = current->files;
5372 ret = 0;
5373 }
5374 spin_unlock_irq(&ctx->inflight_lock);
5375 rcu_read_unlock();
5376
5377 return ret;
5378}
5379
2665abfd 5380static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 5381{
ad8a48ac
JA
5382 struct io_timeout_data *data = container_of(timer,
5383 struct io_timeout_data, timer);
5384 struct io_kiocb *req = data->req;
2665abfd
JA
5385 struct io_ring_ctx *ctx = req->ctx;
5386 struct io_kiocb *prev = NULL;
5387 unsigned long flags;
2665abfd
JA
5388
5389 spin_lock_irqsave(&ctx->completion_lock, flags);
5390
5391 /*
5392 * We don't expect the list to be empty, that will only happen if we
5393 * race with the completion of the linked work.
5394 */
4493233e
PB
5395 if (!list_empty(&req->link_list)) {
5396 prev = list_entry(req->link_list.prev, struct io_kiocb,
5397 link_list);
5d960724 5398 if (refcount_inc_not_zero(&prev->refs)) {
4493233e 5399 list_del_init(&req->link_list);
5d960724
JA
5400 prev->flags &= ~REQ_F_LINK_TIMEOUT;
5401 } else
76a46e06 5402 prev = NULL;
2665abfd
JA
5403 }
5404
5405 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5406
5407 if (prev) {
4e88d6e7 5408 req_set_fail_links(prev);
014db007 5409 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
76a46e06 5410 io_put_req(prev);
47f46768
JA
5411 } else {
5412 io_cqring_add_event(req, -ETIME);
5413 io_put_req(req);
2665abfd 5414 }
2665abfd
JA
5415 return HRTIMER_NORESTART;
5416}
5417
ad8a48ac 5418static void io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 5419{
76a46e06 5420 struct io_ring_ctx *ctx = req->ctx;
2665abfd 5421
76a46e06
JA
5422 /*
5423 * If the list is now empty, then our linked request finished before
5424 * we got a chance to setup the timer
5425 */
5426 spin_lock_irq(&ctx->completion_lock);
4493233e 5427 if (!list_empty(&req->link_list)) {
2d28390a 5428 struct io_timeout_data *data = &req->io->timeout;
94ae5e77 5429
ad8a48ac
JA
5430 data->timer.function = io_link_timeout_fn;
5431 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
5432 data->mode);
2665abfd 5433 }
76a46e06 5434 spin_unlock_irq(&ctx->completion_lock);
2665abfd 5435
2665abfd 5436 /* drop submission reference */
76a46e06
JA
5437 io_put_req(req);
5438}
2665abfd 5439
ad8a48ac 5440static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
2665abfd
JA
5441{
5442 struct io_kiocb *nxt;
5443
5444 if (!(req->flags & REQ_F_LINK))
5445 return NULL;
d7718a9d
JA
5446 /* for polled retry, if flag is set, we already went through here */
5447 if (req->flags & REQ_F_POLLED)
5448 return NULL;
2665abfd 5449
4493233e
PB
5450 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
5451 link_list);
d625c6ee 5452 if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
76a46e06 5453 return NULL;
2665abfd 5454
76a46e06 5455 req->flags |= REQ_F_LINK_TIMEOUT;
76a46e06 5456 return nxt;
2665abfd
JA
5457}
5458
3529d8c2 5459static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2b188cc1 5460{
4a0a7a18 5461 struct io_kiocb *linked_timeout;
4bc4494e 5462 struct io_kiocb *nxt;
193155c8 5463 const struct cred *old_creds = NULL;
e0c5c576 5464 int ret;
2b188cc1 5465
4a0a7a18
JA
5466again:
5467 linked_timeout = io_prep_linked_timeout(req);
5468
193155c8
JA
5469 if (req->work.creds && req->work.creds != current_cred()) {
5470 if (old_creds)
5471 revert_creds(old_creds);
5472 if (old_creds == req->work.creds)
5473 old_creds = NULL; /* restored original creds */
5474 else
5475 old_creds = override_creds(req->work.creds);
5476 }
5477
014db007 5478 ret = io_issue_sqe(req, sqe, true);
491381ce
JA
5479
5480 /*
5481 * We async punt it if the file wasn't marked NOWAIT, or if the file
5482 * doesn't support non-blocking read/write attempts
5483 */
5484 if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
5485 (req->flags & REQ_F_MUST_PUNT))) {
d7718a9d
JA
5486 if (io_arm_poll_handler(req)) {
5487 if (linked_timeout)
5488 io_queue_linked_timeout(linked_timeout);
4bc4494e 5489 goto exit;
d7718a9d 5490 }
86a761f8 5491punt:
f86cd20c 5492 if (io_op_defs[req->opcode].file_table) {
bbad27b2
PB
5493 ret = io_grab_files(req);
5494 if (ret)
5495 goto err;
2b188cc1 5496 }
bbad27b2
PB
5497
5498 /*
5499 * Queued up for async execution, worker will release
5500 * submit reference when the iocb is actually submitted.
5501 */
5502 io_queue_async_work(req);
4bc4494e 5503 goto exit;
2b188cc1 5504 }
e65ef56d 5505
fcb323cc 5506err:
4bc4494e 5507 nxt = NULL;
76a46e06 5508 /* drop submission reference */
2a44f467 5509 io_put_req_find_next(req, &nxt);
e65ef56d 5510
f9bd67f6 5511 if (linked_timeout) {
76a46e06 5512 if (!ret)
f9bd67f6 5513 io_queue_linked_timeout(linked_timeout);
76a46e06 5514 else
f9bd67f6 5515 io_put_req(linked_timeout);
76a46e06
JA
5516 }
5517
e65ef56d 5518 /* and drop final reference, if we failed */
9e645e11 5519 if (ret) {
78e19bbe 5520 io_cqring_add_event(req, ret);
4e88d6e7 5521 req_set_fail_links(req);
e65ef56d 5522 io_put_req(req);
9e645e11 5523 }
4a0a7a18
JA
5524 if (nxt) {
5525 req = nxt;
86a761f8
PB
5526
5527 if (req->flags & REQ_F_FORCE_ASYNC)
5528 goto punt;
4a0a7a18
JA
5529 goto again;
5530 }
4bc4494e 5531exit:
193155c8
JA
5532 if (old_creds)
5533 revert_creds(old_creds);
2b188cc1
JA
5534}
5535
3529d8c2 5536static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4fe2c963
JL
5537{
5538 int ret;
5539
3529d8c2 5540 ret = io_req_defer(req, sqe);
4fe2c963
JL
5541 if (ret) {
5542 if (ret != -EIOCBQUEUED) {
1118591a 5543fail_req:
78e19bbe 5544 io_cqring_add_event(req, ret);
4e88d6e7 5545 req_set_fail_links(req);
78e19bbe 5546 io_double_put_req(req);
4fe2c963 5547 }
2550878f 5548 } else if (req->flags & REQ_F_FORCE_ASYNC) {
1118591a
PB
5549 ret = io_req_defer_prep(req, sqe);
5550 if (unlikely(ret < 0))
5551 goto fail_req;
ce35a47a
JA
5552 /*
5553 * Never try inline submit of IOSQE_ASYNC is set, go straight
5554 * to async execution.
5555 */
5556 req->work.flags |= IO_WQ_WORK_CONCURRENT;
5557 io_queue_async_work(req);
5558 } else {
3529d8c2 5559 __io_queue_sqe(req, sqe);
ce35a47a 5560 }
4fe2c963
JL
5561}
5562
1b4a51b6 5563static inline void io_queue_link_head(struct io_kiocb *req)
4fe2c963 5564{
94ae5e77 5565 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
1b4a51b6
PB
5566 io_cqring_add_event(req, -ECANCELED);
5567 io_double_put_req(req);
5568 } else
3529d8c2 5569 io_queue_sqe(req, NULL);
4fe2c963
JL
5570}
5571
4e88d6e7 5572#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
bcda7baa
JA
5573 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
5574 IOSQE_BUFFER_SELECT)
9e645e11 5575
3529d8c2
JA
5576static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5577 struct io_submit_state *state, struct io_kiocb **link)
9e645e11 5578{
a197f664 5579 struct io_ring_ctx *ctx = req->ctx;
32fe525b 5580 unsigned int sqe_flags;
75c6a039 5581 int ret, id;
9e645e11 5582
32fe525b 5583 sqe_flags = READ_ONCE(sqe->flags);
9e645e11
JA
5584
5585 /* enforce forwards compatibility on users */
32fe525b 5586 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
9e645e11 5587 ret = -EINVAL;
196be95c 5588 goto err_req;
9e645e11
JA
5589 }
5590
bcda7baa
JA
5591 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
5592 !io_op_defs[req->opcode].buffer_select) {
5593 ret = -EOPNOTSUPP;
5594 goto err_req;
5595 }
5596
75c6a039
JA
5597 id = READ_ONCE(sqe->personality);
5598 if (id) {
193155c8
JA
5599 req->work.creds = idr_find(&ctx->personality_idr, id);
5600 if (unlikely(!req->work.creds)) {
75c6a039
JA
5601 ret = -EINVAL;
5602 goto err_req;
5603 }
193155c8 5604 get_cred(req->work.creds);
75c6a039
JA
5605 }
5606
6b47ee6e 5607 /* same numerical values with corresponding REQ_F_*, safe to copy */
8da11c19 5608 req->flags |= sqe_flags & (IOSQE_IO_DRAIN | IOSQE_IO_HARDLINK |
bcda7baa
JA
5609 IOSQE_ASYNC | IOSQE_FIXED_FILE |
5610 IOSQE_BUFFER_SELECT);
9e645e11 5611
3529d8c2 5612 ret = io_req_set_file(state, req, sqe);
9e645e11
JA
5613 if (unlikely(ret)) {
5614err_req:
78e19bbe
JA
5615 io_cqring_add_event(req, ret);
5616 io_double_put_req(req);
2e6e1fde 5617 return false;
9e645e11
JA
5618 }
5619
9e645e11
JA
5620 /*
5621 * If we already have a head request, queue this one for async
5622 * submittal once the head completes. If we don't have a head but
5623 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
5624 * submitted sync once the chain is complete. If none of those
5625 * conditions are true (normal request), then just queue it.
5626 */
5627 if (*link) {
9d76377f 5628 struct io_kiocb *head = *link;
4e88d6e7 5629
8cdf2193
PB
5630 /*
5631 * Taking sequential execution of a link, draining both sides
5632 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
5633 * requests in the link. So, it drains the head and the
5634 * next after the link request. The last one is done via
5635 * drain_next flag to persist the effect across calls.
5636 */
711be031
PB
5637 if (sqe_flags & IOSQE_IO_DRAIN) {
5638 head->flags |= REQ_F_IO_DRAIN;
5639 ctx->drain_next = 1;
5640 }
b7bb4f7d 5641 if (io_alloc_async_ctx(req)) {
9e645e11
JA
5642 ret = -EAGAIN;
5643 goto err_req;
5644 }
5645
3529d8c2 5646 ret = io_req_defer_prep(req, sqe);
2d28390a 5647 if (ret) {
4e88d6e7 5648 /* fail even hard links since we don't submit */
9d76377f 5649 head->flags |= REQ_F_FAIL_LINK;
f67676d1 5650 goto err_req;
2d28390a 5651 }
9d76377f
PB
5652 trace_io_uring_link(ctx, req, head);
5653 list_add_tail(&req->link_list, &head->link_list);
32fe525b
PB
5654
5655 /* last request of a link, enqueue the link */
5656 if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK))) {
5657 io_queue_link_head(head);
5658 *link = NULL;
5659 }
9e645e11 5660 } else {
711be031
PB
5661 if (unlikely(ctx->drain_next)) {
5662 req->flags |= REQ_F_IO_DRAIN;
5663 req->ctx->drain_next = 0;
5664 }
5665 if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
5666 req->flags |= REQ_F_LINK;
711be031
PB
5667 INIT_LIST_HEAD(&req->link_list);
5668 ret = io_req_defer_prep(req, sqe);
5669 if (ret)
5670 req->flags |= REQ_F_FAIL_LINK;
5671 *link = req;
5672 } else {
5673 io_queue_sqe(req, sqe);
5674 }
9e645e11 5675 }
2e6e1fde
PB
5676
5677 return true;
9e645e11
JA
5678}
5679
9a56a232
JA
5680/*
5681 * Batched submission is done, ensure local IO is flushed out.
5682 */
5683static void io_submit_state_end(struct io_submit_state *state)
5684{
5685 blk_finish_plug(&state->plug);
3d6770fb 5686 io_file_put(state);
2579f913 5687 if (state->free_reqs)
6c8a3134 5688 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
9a56a232
JA
5689}
5690
5691/*
5692 * Start submission side cache.
5693 */
5694static void io_submit_state_start(struct io_submit_state *state,
22efde59 5695 unsigned int max_ios)
9a56a232
JA
5696{
5697 blk_start_plug(&state->plug);
2579f913 5698 state->free_reqs = 0;
9a56a232
JA
5699 state->file = NULL;
5700 state->ios_left = max_ios;
5701}
5702
2b188cc1
JA
5703static void io_commit_sqring(struct io_ring_ctx *ctx)
5704{
75b28aff 5705 struct io_rings *rings = ctx->rings;
2b188cc1 5706
caf582c6
PB
5707 /*
5708 * Ensure any loads from the SQEs are done at this point,
5709 * since once we write the new head, the application could
5710 * write new data to them.
5711 */
5712 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
5713}
5714
2b188cc1 5715/*
3529d8c2 5716 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
2b188cc1
JA
5717 * that is mapped by userspace. This means that care needs to be taken to
5718 * ensure that reads are stable, as we cannot rely on userspace always
5719 * being a good citizen. If members of the sqe are validated and then later
5720 * used, it's important that those reads are done through READ_ONCE() to
5721 * prevent a re-load down the line.
5722 */
3529d8c2
JA
5723static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
5724 const struct io_uring_sqe **sqe_ptr)
2b188cc1 5725{
75b28aff 5726 u32 *sq_array = ctx->sq_array;
2b188cc1
JA
5727 unsigned head;
5728
5729 /*
5730 * The cached sq head (or cq tail) serves two purposes:
5731 *
5732 * 1) allows us to batch the cost of updating the user visible
5733 * head updates.
5734 * 2) allows the kernel side to track the head on its own, even
5735 * though the application is the one updating it.
5736 */
ee7d46d9 5737 head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
9835d6fa 5738 if (likely(head < ctx->sq_entries)) {
cf6fd4bd
PB
5739 /*
5740 * All io need record the previous position, if LINK vs DARIN,
5741 * it can be used to mark the position of the first IO in the
5742 * link list.
5743 */
5744 req->sequence = ctx->cached_sq_head;
3529d8c2
JA
5745 *sqe_ptr = &ctx->sq_sqes[head];
5746 req->opcode = READ_ONCE((*sqe_ptr)->opcode);
5747 req->user_data = READ_ONCE((*sqe_ptr)->user_data);
2b188cc1
JA
5748 ctx->cached_sq_head++;
5749 return true;
5750 }
5751
5752 /* drop invalid entries */
5753 ctx->cached_sq_head++;
498ccd9e 5754 ctx->cached_sq_dropped++;
ee7d46d9 5755 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
2b188cc1
JA
5756 return false;
5757}
5758
fb5ccc98 5759static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
ae9428ca
PB
5760 struct file *ring_file, int ring_fd,
5761 struct mm_struct **mm, bool async)
6c271ce2
JA
5762{
5763 struct io_submit_state state, *statep = NULL;
9e645e11 5764 struct io_kiocb *link = NULL;
9e645e11 5765 int i, submitted = 0;
95a1b3ff 5766 bool mm_fault = false;
6c271ce2 5767
c4a2ed72 5768 /* if we have a backlog and couldn't flush it all, return BUSY */
ad3eb2c8
JA
5769 if (test_bit(0, &ctx->sq_check_overflow)) {
5770 if (!list_empty(&ctx->cq_overflow_list) &&
5771 !io_cqring_overflow_flush(ctx, false))
5772 return -EBUSY;
5773 }
6c271ce2 5774
ee7d46d9
PB
5775 /* make sure SQ entry isn't read before tail */
5776 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
9ef4f124 5777
2b85edfc
PB
5778 if (!percpu_ref_tryget_many(&ctx->refs, nr))
5779 return -EAGAIN;
6c271ce2
JA
5780
5781 if (nr > IO_PLUG_THRESHOLD) {
22efde59 5782 io_submit_state_start(&state, nr);
6c271ce2
JA
5783 statep = &state;
5784 }
5785
b14cca0c
PB
5786 ctx->ring_fd = ring_fd;
5787 ctx->ring_file = ring_file;
5788
6c271ce2 5789 for (i = 0; i < nr; i++) {
3529d8c2 5790 const struct io_uring_sqe *sqe;
196be95c 5791 struct io_kiocb *req;
1cb1edb2 5792 int err;
fb5ccc98 5793
196be95c
PB
5794 req = io_get_req(ctx, statep);
5795 if (unlikely(!req)) {
5796 if (!submitted)
5797 submitted = -EAGAIN;
fb5ccc98 5798 break;
196be95c 5799 }
3529d8c2 5800 if (!io_get_sqring(ctx, req, &sqe)) {
2b85edfc 5801 __io_req_do_free(req);
196be95c
PB
5802 break;
5803 }
fb5ccc98 5804
d3656344
JA
5805 /* will complete beyond this point, count as submitted */
5806 submitted++;
5807
5808 if (unlikely(req->opcode >= IORING_OP_LAST)) {
1cb1edb2
PB
5809 err = -EINVAL;
5810fail_req:
5811 io_cqring_add_event(req, err);
d3656344 5812 io_double_put_req(req);
196be95c
PB
5813 break;
5814 }
fb5ccc98 5815
d3656344 5816 if (io_op_defs[req->opcode].needs_mm && !*mm) {
95a1b3ff 5817 mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
1cb1edb2
PB
5818 if (unlikely(mm_fault)) {
5819 err = -EFAULT;
5820 goto fail_req;
95a1b3ff 5821 }
1cb1edb2
PB
5822 use_mm(ctx->sqo_mm);
5823 *mm = ctx->sqo_mm;
9e645e11 5824 }
9e645e11 5825
cf6fd4bd 5826 req->needs_fixed_file = async;
354420f7
JA
5827 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
5828 true, async);
3529d8c2 5829 if (!io_submit_sqe(req, sqe, statep, &link))
2e6e1fde 5830 break;
6c271ce2
JA
5831 }
5832
9466f437
PB
5833 if (unlikely(submitted != nr)) {
5834 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
5835
5836 percpu_ref_put_many(&ctx->refs, nr - ref_used);
5837 }
9e645e11 5838 if (link)
1b4a51b6 5839 io_queue_link_head(link);
6c271ce2
JA
5840 if (statep)
5841 io_submit_state_end(&state);
5842
ae9428ca
PB
5843 /* Commit SQ ring head once we've consumed and submitted all SQEs */
5844 io_commit_sqring(ctx);
5845
6c271ce2
JA
5846 return submitted;
5847}
5848
5849static int io_sq_thread(void *data)
5850{
6c271ce2
JA
5851 struct io_ring_ctx *ctx = data;
5852 struct mm_struct *cur_mm = NULL;
181e448d 5853 const struct cred *old_cred;
6c271ce2
JA
5854 mm_segment_t old_fs;
5855 DEFINE_WAIT(wait);
6c271ce2 5856 unsigned long timeout;
bdcd3eab 5857 int ret = 0;
6c271ce2 5858
206aefde 5859 complete(&ctx->completions[1]);
a4c0b3de 5860
6c271ce2
JA
5861 old_fs = get_fs();
5862 set_fs(USER_DS);
181e448d 5863 old_cred = override_creds(ctx->creds);
6c271ce2 5864
bdcd3eab 5865 timeout = jiffies + ctx->sq_thread_idle;
2bbcd6d3 5866 while (!kthread_should_park()) {
fb5ccc98 5867 unsigned int to_submit;
6c271ce2 5868
bdcd3eab 5869 if (!list_empty(&ctx->poll_list)) {
6c271ce2
JA
5870 unsigned nr_events = 0;
5871
bdcd3eab
XW
5872 mutex_lock(&ctx->uring_lock);
5873 if (!list_empty(&ctx->poll_list))
5874 io_iopoll_getevents(ctx, &nr_events, 0);
5875 else
6c271ce2 5876 timeout = jiffies + ctx->sq_thread_idle;
bdcd3eab 5877 mutex_unlock(&ctx->uring_lock);
6c271ce2
JA
5878 }
5879
fb5ccc98 5880 to_submit = io_sqring_entries(ctx);
c1edbf5f
JA
5881
5882 /*
5883 * If submit got -EBUSY, flag us as needing the application
5884 * to enter the kernel to reap and flush events.
5885 */
5886 if (!to_submit || ret == -EBUSY) {
7143b5ac
SG
5887 /*
5888 * Drop cur_mm before scheduling, we can't hold it for
5889 * long periods (or over schedule()). Do this before
5890 * adding ourselves to the waitqueue, as the unuse/drop
5891 * may sleep.
5892 */
5893 if (cur_mm) {
5894 unuse_mm(cur_mm);
5895 mmput(cur_mm);
5896 cur_mm = NULL;
5897 }
5898
6c271ce2
JA
5899 /*
5900 * We're polling. If we're within the defined idle
5901 * period, then let us spin without work before going
c1edbf5f
JA
5902 * to sleep. The exception is if we got EBUSY doing
5903 * more IO, we should wait for the application to
5904 * reap events and wake us up.
6c271ce2 5905 */
bdcd3eab 5906 if (!list_empty(&ctx->poll_list) ||
df069d80
JA
5907 (!time_after(jiffies, timeout) && ret != -EBUSY &&
5908 !percpu_ref_is_dying(&ctx->refs))) {
b41e9852
JA
5909 if (current->task_works)
5910 task_work_run();
9831a90c 5911 cond_resched();
6c271ce2
JA
5912 continue;
5913 }
5914
6c271ce2
JA
5915 prepare_to_wait(&ctx->sqo_wait, &wait,
5916 TASK_INTERRUPTIBLE);
5917
bdcd3eab
XW
5918 /*
5919 * While doing polled IO, before going to sleep, we need
5920 * to check if there are new reqs added to poll_list, it
5921 * is because reqs may have been punted to io worker and
5922 * will be added to poll_list later, hence check the
5923 * poll_list again.
5924 */
5925 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
5926 !list_empty_careful(&ctx->poll_list)) {
5927 finish_wait(&ctx->sqo_wait, &wait);
5928 continue;
5929 }
5930
6c271ce2 5931 /* Tell userspace we may need a wakeup call */
75b28aff 5932 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
0d7bae69
SB
5933 /* make sure to read SQ tail after writing flags */
5934 smp_mb();
6c271ce2 5935
fb5ccc98 5936 to_submit = io_sqring_entries(ctx);
c1edbf5f 5937 if (!to_submit || ret == -EBUSY) {
2bbcd6d3 5938 if (kthread_should_park()) {
6c271ce2
JA
5939 finish_wait(&ctx->sqo_wait, &wait);
5940 break;
5941 }
b41e9852
JA
5942 if (current->task_works) {
5943 task_work_run();
5944 continue;
5945 }
6c271ce2
JA
5946 if (signal_pending(current))
5947 flush_signals(current);
5948 schedule();
5949 finish_wait(&ctx->sqo_wait, &wait);
5950
75b28aff 5951 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
5952 continue;
5953 }
5954 finish_wait(&ctx->sqo_wait, &wait);
5955
75b28aff 5956 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
5957 }
5958
8a4955ff 5959 mutex_lock(&ctx->uring_lock);
1d7bb1d5 5960 ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
8a4955ff 5961 mutex_unlock(&ctx->uring_lock);
bdcd3eab 5962 timeout = jiffies + ctx->sq_thread_idle;
6c271ce2
JA
5963 }
5964
b41e9852
JA
5965 if (current->task_works)
5966 task_work_run();
5967
6c271ce2
JA
5968 set_fs(old_fs);
5969 if (cur_mm) {
5970 unuse_mm(cur_mm);
5971 mmput(cur_mm);
5972 }
181e448d 5973 revert_creds(old_cred);
06058632 5974
2bbcd6d3 5975 kthread_parkme();
06058632 5976
6c271ce2
JA
5977 return 0;
5978}
5979
bda52162
JA
5980struct io_wait_queue {
5981 struct wait_queue_entry wq;
5982 struct io_ring_ctx *ctx;
5983 unsigned to_wait;
5984 unsigned nr_timeouts;
5985};
5986
1d7bb1d5 5987static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
bda52162
JA
5988{
5989 struct io_ring_ctx *ctx = iowq->ctx;
5990
5991 /*
d195a66e 5992 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
5993 * started waiting. For timeouts, we always want to return to userspace,
5994 * regardless of event count.
5995 */
1d7bb1d5 5996 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
bda52162
JA
5997 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
5998}
5999
6000static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6001 int wake_flags, void *key)
6002{
6003 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6004 wq);
6005
1d7bb1d5
JA
6006 /* use noflush == true, as we can't safely rely on locking context */
6007 if (!io_should_wake(iowq, true))
bda52162
JA
6008 return -1;
6009
6010 return autoremove_wake_function(curr, mode, wake_flags, key);
6011}
6012
2b188cc1
JA
6013/*
6014 * Wait until events become available, if we don't already have some. The
6015 * application must reap them itself, as they reside on the shared cq ring.
6016 */
6017static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
6018 const sigset_t __user *sig, size_t sigsz)
6019{
bda52162
JA
6020 struct io_wait_queue iowq = {
6021 .wq = {
6022 .private = current,
6023 .func = io_wake_function,
6024 .entry = LIST_HEAD_INIT(iowq.wq.entry),
6025 },
6026 .ctx = ctx,
6027 .to_wait = min_events,
6028 };
75b28aff 6029 struct io_rings *rings = ctx->rings;
e9ffa5c2 6030 int ret = 0;
2b188cc1 6031
b41e9852
JA
6032 do {
6033 if (io_cqring_events(ctx, false) >= min_events)
6034 return 0;
6035 if (!current->task_works)
6036 break;
6037 task_work_run();
6038 } while (1);
2b188cc1
JA
6039
6040 if (sig) {
9e75ad5d
AB
6041#ifdef CONFIG_COMPAT
6042 if (in_compat_syscall())
6043 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 6044 sigsz);
9e75ad5d
AB
6045 else
6046#endif
b772434b 6047 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 6048
2b188cc1
JA
6049 if (ret)
6050 return ret;
6051 }
6052
bda52162 6053 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
c826bd7a 6054 trace_io_uring_cqring_wait(ctx, min_events);
bda52162
JA
6055 do {
6056 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
6057 TASK_INTERRUPTIBLE);
b41e9852
JA
6058 if (current->task_works)
6059 task_work_run();
1d7bb1d5 6060 if (io_should_wake(&iowq, false))
bda52162
JA
6061 break;
6062 schedule();
6063 if (signal_pending(current)) {
e9ffa5c2 6064 ret = -EINTR;
bda52162
JA
6065 break;
6066 }
6067 } while (1);
6068 finish_wait(&ctx->wait, &iowq.wq);
6069
e9ffa5c2 6070 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 6071
75b28aff 6072 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
6073}
6074
6b06314c
JA
6075static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
6076{
6077#if defined(CONFIG_UNIX)
6078 if (ctx->ring_sock) {
6079 struct sock *sock = ctx->ring_sock->sk;
6080 struct sk_buff *skb;
6081
6082 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
6083 kfree_skb(skb);
6084 }
6085#else
6086 int i;
6087
65e19f54
JA
6088 for (i = 0; i < ctx->nr_user_files; i++) {
6089 struct file *file;
6090
6091 file = io_file_from_index(ctx, i);
6092 if (file)
6093 fput(file);
6094 }
6b06314c
JA
6095#endif
6096}
6097
05f3fb3c
JA
6098static void io_file_ref_kill(struct percpu_ref *ref)
6099{
6100 struct fixed_file_data *data;
6101
6102 data = container_of(ref, struct fixed_file_data, refs);
6103 complete(&data->done);
6104}
6105
6b06314c
JA
6106static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
6107{
05f3fb3c 6108 struct fixed_file_data *data = ctx->file_data;
65e19f54
JA
6109 unsigned nr_tables, i;
6110
05f3fb3c 6111 if (!data)
6b06314c
JA
6112 return -ENXIO;
6113
05f3fb3c 6114 percpu_ref_kill_and_confirm(&data->refs, io_file_ref_kill);
e46a7950 6115 flush_work(&data->ref_work);
2faf852d
JA
6116 wait_for_completion(&data->done);
6117 io_ring_file_ref_flush(data);
05f3fb3c
JA
6118 percpu_ref_exit(&data->refs);
6119
6b06314c 6120 __io_sqe_files_unregister(ctx);
65e19f54
JA
6121 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
6122 for (i = 0; i < nr_tables; i++)
05f3fb3c
JA
6123 kfree(data->table[i].files);
6124 kfree(data->table);
6125 kfree(data);
6126 ctx->file_data = NULL;
6b06314c
JA
6127 ctx->nr_user_files = 0;
6128 return 0;
6129}
6130
6c271ce2
JA
6131static void io_sq_thread_stop(struct io_ring_ctx *ctx)
6132{
6133 if (ctx->sqo_thread) {
206aefde 6134 wait_for_completion(&ctx->completions[1]);
2bbcd6d3
RP
6135 /*
6136 * The park is a bit of a work-around, without it we get
6137 * warning spews on shutdown with SQPOLL set and affinity
6138 * set to a single CPU.
6139 */
06058632 6140 kthread_park(ctx->sqo_thread);
6c271ce2
JA
6141 kthread_stop(ctx->sqo_thread);
6142 ctx->sqo_thread = NULL;
6143 }
6144}
6145
6b06314c
JA
6146static void io_finish_async(struct io_ring_ctx *ctx)
6147{
6c271ce2
JA
6148 io_sq_thread_stop(ctx);
6149
561fb04a
JA
6150 if (ctx->io_wq) {
6151 io_wq_destroy(ctx->io_wq);
6152 ctx->io_wq = NULL;
6b06314c
JA
6153 }
6154}
6155
6156#if defined(CONFIG_UNIX)
6b06314c
JA
6157/*
6158 * Ensure the UNIX gc is aware of our file set, so we are certain that
6159 * the io_uring can be safely unregistered on process exit, even if we have
6160 * loops in the file referencing.
6161 */
6162static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
6163{
6164 struct sock *sk = ctx->ring_sock->sk;
6165 struct scm_fp_list *fpl;
6166 struct sk_buff *skb;
08a45173 6167 int i, nr_files;
6b06314c
JA
6168
6169 if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
6170 unsigned long inflight = ctx->user->unix_inflight + nr;
6171
6172 if (inflight > task_rlimit(current, RLIMIT_NOFILE))
6173 return -EMFILE;
6174 }
6175
6176 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
6177 if (!fpl)
6178 return -ENOMEM;
6179
6180 skb = alloc_skb(0, GFP_KERNEL);
6181 if (!skb) {
6182 kfree(fpl);
6183 return -ENOMEM;
6184 }
6185
6186 skb->sk = sk;
6b06314c 6187
08a45173 6188 nr_files = 0;
6b06314c
JA
6189 fpl->user = get_uid(ctx->user);
6190 for (i = 0; i < nr; i++) {
65e19f54
JA
6191 struct file *file = io_file_from_index(ctx, i + offset);
6192
6193 if (!file)
08a45173 6194 continue;
65e19f54 6195 fpl->fp[nr_files] = get_file(file);
08a45173
JA
6196 unix_inflight(fpl->user, fpl->fp[nr_files]);
6197 nr_files++;
6b06314c
JA
6198 }
6199
08a45173
JA
6200 if (nr_files) {
6201 fpl->max = SCM_MAX_FD;
6202 fpl->count = nr_files;
6203 UNIXCB(skb).fp = fpl;
05f3fb3c 6204 skb->destructor = unix_destruct_scm;
08a45173
JA
6205 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
6206 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 6207
08a45173
JA
6208 for (i = 0; i < nr_files; i++)
6209 fput(fpl->fp[i]);
6210 } else {
6211 kfree_skb(skb);
6212 kfree(fpl);
6213 }
6b06314c
JA
6214
6215 return 0;
6216}
6217
6218/*
6219 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
6220 * causes regular reference counting to break down. We rely on the UNIX
6221 * garbage collection to take care of this problem for us.
6222 */
6223static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6224{
6225 unsigned left, total;
6226 int ret = 0;
6227
6228 total = 0;
6229 left = ctx->nr_user_files;
6230 while (left) {
6231 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
6232
6233 ret = __io_sqe_files_scm(ctx, this_files, total);
6234 if (ret)
6235 break;
6236 left -= this_files;
6237 total += this_files;
6238 }
6239
6240 if (!ret)
6241 return 0;
6242
6243 while (total < ctx->nr_user_files) {
65e19f54
JA
6244 struct file *file = io_file_from_index(ctx, total);
6245
6246 if (file)
6247 fput(file);
6b06314c
JA
6248 total++;
6249 }
6250
6251 return ret;
6252}
6253#else
6254static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6255{
6256 return 0;
6257}
6258#endif
6259
65e19f54
JA
6260static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
6261 unsigned nr_files)
6262{
6263 int i;
6264
6265 for (i = 0; i < nr_tables; i++) {
05f3fb3c 6266 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
6267 unsigned this_files;
6268
6269 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
6270 table->files = kcalloc(this_files, sizeof(struct file *),
6271 GFP_KERNEL);
6272 if (!table->files)
6273 break;
6274 nr_files -= this_files;
6275 }
6276
6277 if (i == nr_tables)
6278 return 0;
6279
6280 for (i = 0; i < nr_tables; i++) {
05f3fb3c 6281 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
6282 kfree(table->files);
6283 }
6284 return 1;
6285}
6286
05f3fb3c
JA
6287static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
6288{
6289#if defined(CONFIG_UNIX)
6290 struct sock *sock = ctx->ring_sock->sk;
6291 struct sk_buff_head list, *head = &sock->sk_receive_queue;
6292 struct sk_buff *skb;
6293 int i;
6294
6295 __skb_queue_head_init(&list);
6296
6297 /*
6298 * Find the skb that holds this file in its SCM_RIGHTS. When found,
6299 * remove this entry and rearrange the file array.
6300 */
6301 skb = skb_dequeue(head);
6302 while (skb) {
6303 struct scm_fp_list *fp;
6304
6305 fp = UNIXCB(skb).fp;
6306 for (i = 0; i < fp->count; i++) {
6307 int left;
6308
6309 if (fp->fp[i] != file)
6310 continue;
6311
6312 unix_notinflight(fp->user, fp->fp[i]);
6313 left = fp->count - 1 - i;
6314 if (left) {
6315 memmove(&fp->fp[i], &fp->fp[i + 1],
6316 left * sizeof(struct file *));
6317 }
6318 fp->count--;
6319 if (!fp->count) {
6320 kfree_skb(skb);
6321 skb = NULL;
6322 } else {
6323 __skb_queue_tail(&list, skb);
6324 }
6325 fput(file);
6326 file = NULL;
6327 break;
6328 }
6329
6330 if (!file)
6331 break;
6332
6333 __skb_queue_tail(&list, skb);
6334
6335 skb = skb_dequeue(head);
6336 }
6337
6338 if (skb_peek(&list)) {
6339 spin_lock_irq(&head->lock);
6340 while ((skb = __skb_dequeue(&list)) != NULL)
6341 __skb_queue_tail(head, skb);
6342 spin_unlock_irq(&head->lock);
6343 }
6344#else
6345 fput(file);
6346#endif
6347}
6348
6349struct io_file_put {
6350 struct llist_node llist;
6351 struct file *file;
05f3fb3c
JA
6352};
6353
2faf852d 6354static void io_ring_file_ref_flush(struct fixed_file_data *data)
65e19f54 6355{
05f3fb3c 6356 struct io_file_put *pfile, *tmp;
05f3fb3c 6357 struct llist_node *node;
65e19f54 6358
05f3fb3c
JA
6359 while ((node = llist_del_all(&data->put_llist)) != NULL) {
6360 llist_for_each_entry_safe(pfile, tmp, node, llist) {
6361 io_ring_file_put(data->ctx, pfile->file);
a5318d3c 6362 kfree(pfile);
05f3fb3c 6363 }
65e19f54 6364 }
2faf852d 6365}
65e19f54 6366
2faf852d
JA
6367static void io_ring_file_ref_switch(struct work_struct *work)
6368{
6369 struct fixed_file_data *data;
65e19f54 6370
2faf852d
JA
6371 data = container_of(work, struct fixed_file_data, ref_work);
6372 io_ring_file_ref_flush(data);
05f3fb3c
JA
6373 percpu_ref_switch_to_percpu(&data->refs);
6374}
65e19f54 6375
05f3fb3c
JA
6376static void io_file_data_ref_zero(struct percpu_ref *ref)
6377{
6378 struct fixed_file_data *data;
6379
6380 data = container_of(ref, struct fixed_file_data, refs);
6381
2faf852d
JA
6382 /*
6383 * We can't safely switch from inside this context, punt to wq. If
6384 * the table ref is going away, the table is being unregistered.
6385 * Don't queue up the async work for that case, the caller will
6386 * handle it.
6387 */
6388 if (!percpu_ref_is_dying(&data->refs))
6389 queue_work(system_wq, &data->ref_work);
65e19f54
JA
6390}
6391
6b06314c
JA
6392static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
6393 unsigned nr_args)
6394{
6395 __s32 __user *fds = (__s32 __user *) arg;
65e19f54 6396 unsigned nr_tables;
05f3fb3c 6397 struct file *file;
6b06314c
JA
6398 int fd, ret = 0;
6399 unsigned i;
6400
05f3fb3c 6401 if (ctx->file_data)
6b06314c
JA
6402 return -EBUSY;
6403 if (!nr_args)
6404 return -EINVAL;
6405 if (nr_args > IORING_MAX_FIXED_FILES)
6406 return -EMFILE;
6407
05f3fb3c
JA
6408 ctx->file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
6409 if (!ctx->file_data)
6410 return -ENOMEM;
6411 ctx->file_data->ctx = ctx;
6412 init_completion(&ctx->file_data->done);
6413
65e19f54 6414 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
05f3fb3c
JA
6415 ctx->file_data->table = kcalloc(nr_tables,
6416 sizeof(struct fixed_file_table),
65e19f54 6417 GFP_KERNEL);
05f3fb3c
JA
6418 if (!ctx->file_data->table) {
6419 kfree(ctx->file_data);
6420 ctx->file_data = NULL;
6b06314c 6421 return -ENOMEM;
05f3fb3c
JA
6422 }
6423
6424 if (percpu_ref_init(&ctx->file_data->refs, io_file_data_ref_zero,
6425 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
6426 kfree(ctx->file_data->table);
6427 kfree(ctx->file_data);
6428 ctx->file_data = NULL;
6b06314c 6429 return -ENOMEM;
05f3fb3c
JA
6430 }
6431 ctx->file_data->put_llist.first = NULL;
6432 INIT_WORK(&ctx->file_data->ref_work, io_ring_file_ref_switch);
6b06314c 6433
65e19f54 6434 if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
05f3fb3c
JA
6435 percpu_ref_exit(&ctx->file_data->refs);
6436 kfree(ctx->file_data->table);
6437 kfree(ctx->file_data);
6438 ctx->file_data = NULL;
65e19f54
JA
6439 return -ENOMEM;
6440 }
6441
08a45173 6442 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
65e19f54
JA
6443 struct fixed_file_table *table;
6444 unsigned index;
6445
6b06314c
JA
6446 ret = -EFAULT;
6447 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
6448 break;
08a45173
JA
6449 /* allow sparse sets */
6450 if (fd == -1) {
6451 ret = 0;
6452 continue;
6453 }
6b06314c 6454
05f3fb3c 6455 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54 6456 index = i & IORING_FILE_TABLE_MASK;
05f3fb3c 6457 file = fget(fd);
6b06314c
JA
6458
6459 ret = -EBADF;
05f3fb3c 6460 if (!file)
6b06314c 6461 break;
05f3fb3c 6462
6b06314c
JA
6463 /*
6464 * Don't allow io_uring instances to be registered. If UNIX
6465 * isn't enabled, then this causes a reference cycle and this
6466 * instance can never get freed. If UNIX is enabled we'll
6467 * handle it just fine, but there's still no point in allowing
6468 * a ring fd as it doesn't support regular read/write anyway.
6469 */
05f3fb3c
JA
6470 if (file->f_op == &io_uring_fops) {
6471 fput(file);
6b06314c
JA
6472 break;
6473 }
6b06314c 6474 ret = 0;
05f3fb3c 6475 table->files[index] = file;
6b06314c
JA
6476 }
6477
6478 if (ret) {
65e19f54 6479 for (i = 0; i < ctx->nr_user_files; i++) {
65e19f54
JA
6480 file = io_file_from_index(ctx, i);
6481 if (file)
6482 fput(file);
6483 }
6484 for (i = 0; i < nr_tables; i++)
05f3fb3c 6485 kfree(ctx->file_data->table[i].files);
6b06314c 6486
05f3fb3c
JA
6487 kfree(ctx->file_data->table);
6488 kfree(ctx->file_data);
6489 ctx->file_data = NULL;
6b06314c
JA
6490 ctx->nr_user_files = 0;
6491 return ret;
6492 }
6493
6494 ret = io_sqe_files_scm(ctx);
6495 if (ret)
6496 io_sqe_files_unregister(ctx);
6497
6498 return ret;
6499}
6500
c3a31e60
JA
6501static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
6502 int index)
6503{
6504#if defined(CONFIG_UNIX)
6505 struct sock *sock = ctx->ring_sock->sk;
6506 struct sk_buff_head *head = &sock->sk_receive_queue;
6507 struct sk_buff *skb;
6508
6509 /*
6510 * See if we can merge this file into an existing skb SCM_RIGHTS
6511 * file set. If there's no room, fall back to allocating a new skb
6512 * and filling it in.
6513 */
6514 spin_lock_irq(&head->lock);
6515 skb = skb_peek(head);
6516 if (skb) {
6517 struct scm_fp_list *fpl = UNIXCB(skb).fp;
6518
6519 if (fpl->count < SCM_MAX_FD) {
6520 __skb_unlink(skb, head);
6521 spin_unlock_irq(&head->lock);
6522 fpl->fp[fpl->count] = get_file(file);
6523 unix_inflight(fpl->user, fpl->fp[fpl->count]);
6524 fpl->count++;
6525 spin_lock_irq(&head->lock);
6526 __skb_queue_head(head, skb);
6527 } else {
6528 skb = NULL;
6529 }
6530 }
6531 spin_unlock_irq(&head->lock);
6532
6533 if (skb) {
6534 fput(file);
6535 return 0;
6536 }
6537
6538 return __io_sqe_files_scm(ctx, 1, index);
6539#else
6540 return 0;
6541#endif
6542}
6543
05f3fb3c 6544static void io_atomic_switch(struct percpu_ref *ref)
c3a31e60 6545{
05f3fb3c
JA
6546 struct fixed_file_data *data;
6547
dd3db2a3
JA
6548 /*
6549 * Juggle reference to ensure we hit zero, if needed, so we can
6550 * switch back to percpu mode
6551 */
05f3fb3c 6552 data = container_of(ref, struct fixed_file_data, refs);
dd3db2a3
JA
6553 percpu_ref_put(&data->refs);
6554 percpu_ref_get(&data->refs);
05f3fb3c
JA
6555}
6556
a5318d3c 6557static int io_queue_file_removal(struct fixed_file_data *data,
05f3fb3c
JA
6558 struct file *file)
6559{
a5318d3c 6560 struct io_file_put *pfile;
05f3fb3c 6561
05f3fb3c 6562 pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
a5318d3c
HD
6563 if (!pfile)
6564 return -ENOMEM;
05f3fb3c
JA
6565
6566 pfile->file = file;
6567 llist_add(&pfile->llist, &data->put_llist);
a5318d3c 6568 return 0;
05f3fb3c
JA
6569}
6570
6571static int __io_sqe_files_update(struct io_ring_ctx *ctx,
6572 struct io_uring_files_update *up,
6573 unsigned nr_args)
6574{
6575 struct fixed_file_data *data = ctx->file_data;
6576 bool ref_switch = false;
6577 struct file *file;
c3a31e60
JA
6578 __s32 __user *fds;
6579 int fd, i, err;
6580 __u32 done;
6581
05f3fb3c 6582 if (check_add_overflow(up->offset, nr_args, &done))
c3a31e60
JA
6583 return -EOVERFLOW;
6584 if (done > ctx->nr_user_files)
6585 return -EINVAL;
6586
6587 done = 0;
05f3fb3c 6588 fds = u64_to_user_ptr(up->fds);
c3a31e60 6589 while (nr_args) {
65e19f54
JA
6590 struct fixed_file_table *table;
6591 unsigned index;
6592
c3a31e60
JA
6593 err = 0;
6594 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
6595 err = -EFAULT;
6596 break;
6597 }
05f3fb3c
JA
6598 i = array_index_nospec(up->offset, ctx->nr_user_files);
6599 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54
JA
6600 index = i & IORING_FILE_TABLE_MASK;
6601 if (table->files[index]) {
05f3fb3c 6602 file = io_file_from_index(ctx, index);
a5318d3c
HD
6603 err = io_queue_file_removal(data, file);
6604 if (err)
6605 break;
65e19f54 6606 table->files[index] = NULL;
a5318d3c 6607 ref_switch = true;
c3a31e60
JA
6608 }
6609 if (fd != -1) {
c3a31e60
JA
6610 file = fget(fd);
6611 if (!file) {
6612 err = -EBADF;
6613 break;
6614 }
6615 /*
6616 * Don't allow io_uring instances to be registered. If
6617 * UNIX isn't enabled, then this causes a reference
6618 * cycle and this instance can never get freed. If UNIX
6619 * is enabled we'll handle it just fine, but there's
6620 * still no point in allowing a ring fd as it doesn't
6621 * support regular read/write anyway.
6622 */
6623 if (file->f_op == &io_uring_fops) {
6624 fput(file);
6625 err = -EBADF;
6626 break;
6627 }
65e19f54 6628 table->files[index] = file;
c3a31e60
JA
6629 err = io_sqe_file_register(ctx, file, i);
6630 if (err)
6631 break;
6632 }
6633 nr_args--;
6634 done++;
05f3fb3c
JA
6635 up->offset++;
6636 }
6637
dd3db2a3 6638 if (ref_switch)
05f3fb3c 6639 percpu_ref_switch_to_atomic(&data->refs, io_atomic_switch);
c3a31e60
JA
6640
6641 return done ? done : err;
6642}
05f3fb3c
JA
6643static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
6644 unsigned nr_args)
6645{
6646 struct io_uring_files_update up;
6647
6648 if (!ctx->file_data)
6649 return -ENXIO;
6650 if (!nr_args)
6651 return -EINVAL;
6652 if (copy_from_user(&up, arg, sizeof(up)))
6653 return -EFAULT;
6654 if (up.resv)
6655 return -EINVAL;
6656
6657 return __io_sqe_files_update(ctx, &up, nr_args);
6658}
c3a31e60 6659
e9fd9396 6660static void io_free_work(struct io_wq_work *work)
7d723065
JA
6661{
6662 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6663
e9fd9396 6664 /* Consider that io_steal_work() relies on this ref */
7d723065
JA
6665 io_put_req(req);
6666}
6667
24369c2e
PB
6668static int io_init_wq_offload(struct io_ring_ctx *ctx,
6669 struct io_uring_params *p)
6670{
6671 struct io_wq_data data;
6672 struct fd f;
6673 struct io_ring_ctx *ctx_attach;
6674 unsigned int concurrency;
6675 int ret = 0;
6676
6677 data.user = ctx->user;
e9fd9396 6678 data.free_work = io_free_work;
24369c2e
PB
6679
6680 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
6681 /* Do QD, or 4 * CPUS, whatever is smallest */
6682 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
6683
6684 ctx->io_wq = io_wq_create(concurrency, &data);
6685 if (IS_ERR(ctx->io_wq)) {
6686 ret = PTR_ERR(ctx->io_wq);
6687 ctx->io_wq = NULL;
6688 }
6689 return ret;
6690 }
6691
6692 f = fdget(p->wq_fd);
6693 if (!f.file)
6694 return -EBADF;
6695
6696 if (f.file->f_op != &io_uring_fops) {
6697 ret = -EINVAL;
6698 goto out_fput;
6699 }
6700
6701 ctx_attach = f.file->private_data;
6702 /* @io_wq is protected by holding the fd */
6703 if (!io_wq_get(ctx_attach->io_wq, &data)) {
6704 ret = -EINVAL;
6705 goto out_fput;
6706 }
6707
6708 ctx->io_wq = ctx_attach->io_wq;
6709out_fput:
6710 fdput(f);
6711 return ret;
6712}
6713
6c271ce2
JA
6714static int io_sq_offload_start(struct io_ring_ctx *ctx,
6715 struct io_uring_params *p)
2b188cc1
JA
6716{
6717 int ret;
6718
6c271ce2 6719 init_waitqueue_head(&ctx->sqo_wait);
2b188cc1
JA
6720 mmgrab(current->mm);
6721 ctx->sqo_mm = current->mm;
6722
6c271ce2 6723 if (ctx->flags & IORING_SETUP_SQPOLL) {
3ec482d1
JA
6724 ret = -EPERM;
6725 if (!capable(CAP_SYS_ADMIN))
6726 goto err;
6727
917257da
JA
6728 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
6729 if (!ctx->sq_thread_idle)
6730 ctx->sq_thread_idle = HZ;
6731
6c271ce2 6732 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 6733 int cpu = p->sq_thread_cpu;
6c271ce2 6734
917257da 6735 ret = -EINVAL;
44a9bd18
JA
6736 if (cpu >= nr_cpu_ids)
6737 goto err;
7889f44d 6738 if (!cpu_online(cpu))
917257da
JA
6739 goto err;
6740
6c271ce2
JA
6741 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
6742 ctx, cpu,
6743 "io_uring-sq");
6744 } else {
6745 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
6746 "io_uring-sq");
6747 }
6748 if (IS_ERR(ctx->sqo_thread)) {
6749 ret = PTR_ERR(ctx->sqo_thread);
6750 ctx->sqo_thread = NULL;
6751 goto err;
6752 }
6753 wake_up_process(ctx->sqo_thread);
6754 } else if (p->flags & IORING_SETUP_SQ_AFF) {
6755 /* Can't have SQ_AFF without SQPOLL */
6756 ret = -EINVAL;
6757 goto err;
6758 }
6759
24369c2e
PB
6760 ret = io_init_wq_offload(ctx, p);
6761 if (ret)
2b188cc1 6762 goto err;
2b188cc1
JA
6763
6764 return 0;
6765err:
54a91f3b 6766 io_finish_async(ctx);
2b188cc1
JA
6767 mmdrop(ctx->sqo_mm);
6768 ctx->sqo_mm = NULL;
6769 return ret;
6770}
6771
6772static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
6773{
6774 atomic_long_sub(nr_pages, &user->locked_vm);
6775}
6776
6777static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
6778{
6779 unsigned long page_limit, cur_pages, new_pages;
6780
6781 /* Don't allow more pages than we can safely lock */
6782 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
6783
6784 do {
6785 cur_pages = atomic_long_read(&user->locked_vm);
6786 new_pages = cur_pages + nr_pages;
6787 if (new_pages > page_limit)
6788 return -ENOMEM;
6789 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
6790 new_pages) != cur_pages);
6791
6792 return 0;
6793}
6794
6795static void io_mem_free(void *ptr)
6796{
52e04ef4
MR
6797 struct page *page;
6798
6799 if (!ptr)
6800 return;
2b188cc1 6801
52e04ef4 6802 page = virt_to_head_page(ptr);
2b188cc1
JA
6803 if (put_page_testzero(page))
6804 free_compound_page(page);
6805}
6806
6807static void *io_mem_alloc(size_t size)
6808{
6809 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
6810 __GFP_NORETRY;
6811
6812 return (void *) __get_free_pages(gfp_flags, get_order(size));
6813}
6814
75b28aff
HV
6815static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
6816 size_t *sq_offset)
6817{
6818 struct io_rings *rings;
6819 size_t off, sq_array_size;
6820
6821 off = struct_size(rings, cqes, cq_entries);
6822 if (off == SIZE_MAX)
6823 return SIZE_MAX;
6824
6825#ifdef CONFIG_SMP
6826 off = ALIGN(off, SMP_CACHE_BYTES);
6827 if (off == 0)
6828 return SIZE_MAX;
6829#endif
6830
6831 sq_array_size = array_size(sizeof(u32), sq_entries);
6832 if (sq_array_size == SIZE_MAX)
6833 return SIZE_MAX;
6834
6835 if (check_add_overflow(off, sq_array_size, &off))
6836 return SIZE_MAX;
6837
6838 if (sq_offset)
6839 *sq_offset = off;
6840
6841 return off;
6842}
6843
2b188cc1
JA
6844static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
6845{
75b28aff 6846 size_t pages;
2b188cc1 6847
75b28aff
HV
6848 pages = (size_t)1 << get_order(
6849 rings_size(sq_entries, cq_entries, NULL));
6850 pages += (size_t)1 << get_order(
6851 array_size(sizeof(struct io_uring_sqe), sq_entries));
2b188cc1 6852
75b28aff 6853 return pages;
2b188cc1
JA
6854}
6855
edafccee
JA
6856static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
6857{
6858 int i, j;
6859
6860 if (!ctx->user_bufs)
6861 return -ENXIO;
6862
6863 for (i = 0; i < ctx->nr_user_bufs; i++) {
6864 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
6865
6866 for (j = 0; j < imu->nr_bvecs; j++)
f1f6a7dd 6867 unpin_user_page(imu->bvec[j].bv_page);
edafccee
JA
6868
6869 if (ctx->account_mem)
6870 io_unaccount_mem(ctx->user, imu->nr_bvecs);
d4ef6475 6871 kvfree(imu->bvec);
edafccee
JA
6872 imu->nr_bvecs = 0;
6873 }
6874
6875 kfree(ctx->user_bufs);
6876 ctx->user_bufs = NULL;
6877 ctx->nr_user_bufs = 0;
6878 return 0;
6879}
6880
6881static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
6882 void __user *arg, unsigned index)
6883{
6884 struct iovec __user *src;
6885
6886#ifdef CONFIG_COMPAT
6887 if (ctx->compat) {
6888 struct compat_iovec __user *ciovs;
6889 struct compat_iovec ciov;
6890
6891 ciovs = (struct compat_iovec __user *) arg;
6892 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
6893 return -EFAULT;
6894
d55e5f5b 6895 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
6896 dst->iov_len = ciov.iov_len;
6897 return 0;
6898 }
6899#endif
6900 src = (struct iovec __user *) arg;
6901 if (copy_from_user(dst, &src[index], sizeof(*dst)))
6902 return -EFAULT;
6903 return 0;
6904}
6905
6906static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
6907 unsigned nr_args)
6908{
6909 struct vm_area_struct **vmas = NULL;
6910 struct page **pages = NULL;
6911 int i, j, got_pages = 0;
6912 int ret = -EINVAL;
6913
6914 if (ctx->user_bufs)
6915 return -EBUSY;
6916 if (!nr_args || nr_args > UIO_MAXIOV)
6917 return -EINVAL;
6918
6919 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
6920 GFP_KERNEL);
6921 if (!ctx->user_bufs)
6922 return -ENOMEM;
6923
6924 for (i = 0; i < nr_args; i++) {
6925 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
6926 unsigned long off, start, end, ubuf;
6927 int pret, nr_pages;
6928 struct iovec iov;
6929 size_t size;
6930
6931 ret = io_copy_iov(ctx, &iov, arg, i);
6932 if (ret)
a278682d 6933 goto err;
edafccee
JA
6934
6935 /*
6936 * Don't impose further limits on the size and buffer
6937 * constraints here, we'll -EINVAL later when IO is
6938 * submitted if they are wrong.
6939 */
6940 ret = -EFAULT;
6941 if (!iov.iov_base || !iov.iov_len)
6942 goto err;
6943
6944 /* arbitrary limit, but we need something */
6945 if (iov.iov_len > SZ_1G)
6946 goto err;
6947
6948 ubuf = (unsigned long) iov.iov_base;
6949 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
6950 start = ubuf >> PAGE_SHIFT;
6951 nr_pages = end - start;
6952
6953 if (ctx->account_mem) {
6954 ret = io_account_mem(ctx->user, nr_pages);
6955 if (ret)
6956 goto err;
6957 }
6958
6959 ret = 0;
6960 if (!pages || nr_pages > got_pages) {
6961 kfree(vmas);
6962 kfree(pages);
d4ef6475 6963 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
edafccee 6964 GFP_KERNEL);
d4ef6475 6965 vmas = kvmalloc_array(nr_pages,
edafccee
JA
6966 sizeof(struct vm_area_struct *),
6967 GFP_KERNEL);
6968 if (!pages || !vmas) {
6969 ret = -ENOMEM;
6970 if (ctx->account_mem)
6971 io_unaccount_mem(ctx->user, nr_pages);
6972 goto err;
6973 }
6974 got_pages = nr_pages;
6975 }
6976
d4ef6475 6977 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
edafccee
JA
6978 GFP_KERNEL);
6979 ret = -ENOMEM;
6980 if (!imu->bvec) {
6981 if (ctx->account_mem)
6982 io_unaccount_mem(ctx->user, nr_pages);
6983 goto err;
6984 }
6985
6986 ret = 0;
6987 down_read(&current->mm->mmap_sem);
2113b05d 6988 pret = pin_user_pages(ubuf, nr_pages,
932f4a63
IW
6989 FOLL_WRITE | FOLL_LONGTERM,
6990 pages, vmas);
edafccee
JA
6991 if (pret == nr_pages) {
6992 /* don't support file backed memory */
6993 for (j = 0; j < nr_pages; j++) {
6994 struct vm_area_struct *vma = vmas[j];
6995
6996 if (vma->vm_file &&
6997 !is_file_hugepages(vma->vm_file)) {
6998 ret = -EOPNOTSUPP;
6999 break;
7000 }
7001 }
7002 } else {
7003 ret = pret < 0 ? pret : -EFAULT;
7004 }
7005 up_read(&current->mm->mmap_sem);
7006 if (ret) {
7007 /*
7008 * if we did partial map, or found file backed vmas,
7009 * release any pages we did get
7010 */
27c4d3a3 7011 if (pret > 0)
f1f6a7dd 7012 unpin_user_pages(pages, pret);
edafccee
JA
7013 if (ctx->account_mem)
7014 io_unaccount_mem(ctx->user, nr_pages);
d4ef6475 7015 kvfree(imu->bvec);
edafccee
JA
7016 goto err;
7017 }
7018
7019 off = ubuf & ~PAGE_MASK;
7020 size = iov.iov_len;
7021 for (j = 0; j < nr_pages; j++) {
7022 size_t vec_len;
7023
7024 vec_len = min_t(size_t, size, PAGE_SIZE - off);
7025 imu->bvec[j].bv_page = pages[j];
7026 imu->bvec[j].bv_len = vec_len;
7027 imu->bvec[j].bv_offset = off;
7028 off = 0;
7029 size -= vec_len;
7030 }
7031 /* store original address for later verification */
7032 imu->ubuf = ubuf;
7033 imu->len = iov.iov_len;
7034 imu->nr_bvecs = nr_pages;
7035
7036 ctx->nr_user_bufs++;
7037 }
d4ef6475
MR
7038 kvfree(pages);
7039 kvfree(vmas);
edafccee
JA
7040 return 0;
7041err:
d4ef6475
MR
7042 kvfree(pages);
7043 kvfree(vmas);
edafccee
JA
7044 io_sqe_buffer_unregister(ctx);
7045 return ret;
7046}
7047
9b402849
JA
7048static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
7049{
7050 __s32 __user *fds = arg;
7051 int fd;
7052
7053 if (ctx->cq_ev_fd)
7054 return -EBUSY;
7055
7056 if (copy_from_user(&fd, fds, sizeof(*fds)))
7057 return -EFAULT;
7058
7059 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
7060 if (IS_ERR(ctx->cq_ev_fd)) {
7061 int ret = PTR_ERR(ctx->cq_ev_fd);
7062 ctx->cq_ev_fd = NULL;
7063 return ret;
7064 }
7065
7066 return 0;
7067}
7068
7069static int io_eventfd_unregister(struct io_ring_ctx *ctx)
7070{
7071 if (ctx->cq_ev_fd) {
7072 eventfd_ctx_put(ctx->cq_ev_fd);
7073 ctx->cq_ev_fd = NULL;
7074 return 0;
7075 }
7076
7077 return -ENXIO;
7078}
7079
5a2e745d
JA
7080static int __io_destroy_buffers(int id, void *p, void *data)
7081{
7082 struct io_ring_ctx *ctx = data;
7083 struct io_buffer *buf = p;
7084
067524e9 7085 __io_remove_buffers(ctx, buf, id, -1U);
5a2e745d
JA
7086 return 0;
7087}
7088
7089static void io_destroy_buffers(struct io_ring_ctx *ctx)
7090{
7091 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
7092 idr_destroy(&ctx->io_buffer_idr);
7093}
7094
2b188cc1
JA
7095static void io_ring_ctx_free(struct io_ring_ctx *ctx)
7096{
6b06314c 7097 io_finish_async(ctx);
2b188cc1
JA
7098 if (ctx->sqo_mm)
7099 mmdrop(ctx->sqo_mm);
def596e9
JA
7100
7101 io_iopoll_reap_events(ctx);
edafccee 7102 io_sqe_buffer_unregister(ctx);
6b06314c 7103 io_sqe_files_unregister(ctx);
9b402849 7104 io_eventfd_unregister(ctx);
5a2e745d 7105 io_destroy_buffers(ctx);
41726c9a 7106 idr_destroy(&ctx->personality_idr);
def596e9 7107
2b188cc1 7108#if defined(CONFIG_UNIX)
355e8d26
EB
7109 if (ctx->ring_sock) {
7110 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 7111 sock_release(ctx->ring_sock);
355e8d26 7112 }
2b188cc1
JA
7113#endif
7114
75b28aff 7115 io_mem_free(ctx->rings);
2b188cc1 7116 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
7117
7118 percpu_ref_exit(&ctx->refs);
7119 if (ctx->account_mem)
7120 io_unaccount_mem(ctx->user,
7121 ring_pages(ctx->sq_entries, ctx->cq_entries));
7122 free_uid(ctx->user);
181e448d 7123 put_cred(ctx->creds);
206aefde 7124 kfree(ctx->completions);
78076bb6 7125 kfree(ctx->cancel_hash);
0ddf92e8 7126 kmem_cache_free(req_cachep, ctx->fallback_req);
2b188cc1
JA
7127 kfree(ctx);
7128}
7129
7130static __poll_t io_uring_poll(struct file *file, poll_table *wait)
7131{
7132 struct io_ring_ctx *ctx = file->private_data;
7133 __poll_t mask = 0;
7134
7135 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
7136 /*
7137 * synchronizes with barrier from wq_has_sleeper call in
7138 * io_commit_cqring
7139 */
2b188cc1 7140 smp_rmb();
75b28aff
HV
7141 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
7142 ctx->rings->sq_ring_entries)
2b188cc1 7143 mask |= EPOLLOUT | EPOLLWRNORM;
63e5d81f 7144 if (io_cqring_events(ctx, false))
2b188cc1
JA
7145 mask |= EPOLLIN | EPOLLRDNORM;
7146
7147 return mask;
7148}
7149
7150static int io_uring_fasync(int fd, struct file *file, int on)
7151{
7152 struct io_ring_ctx *ctx = file->private_data;
7153
7154 return fasync_helper(fd, file, on, &ctx->cq_fasync);
7155}
7156
071698e1
JA
7157static int io_remove_personalities(int id, void *p, void *data)
7158{
7159 struct io_ring_ctx *ctx = data;
7160 const struct cred *cred;
7161
7162 cred = idr_remove(&ctx->personality_idr, id);
7163 if (cred)
7164 put_cred(cred);
7165 return 0;
7166}
7167
2b188cc1
JA
7168static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
7169{
7170 mutex_lock(&ctx->uring_lock);
7171 percpu_ref_kill(&ctx->refs);
7172 mutex_unlock(&ctx->uring_lock);
7173
df069d80
JA
7174 /*
7175 * Wait for sq thread to idle, if we have one. It won't spin on new
7176 * work after we've killed the ctx ref above. This is important to do
7177 * before we cancel existing commands, as the thread could otherwise
7178 * be queueing new work post that. If that's work we need to cancel,
7179 * it could cause shutdown to hang.
7180 */
7181 while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait))
7182 cpu_relax();
7183
5262f567 7184 io_kill_timeouts(ctx);
221c5eb2 7185 io_poll_remove_all(ctx);
561fb04a
JA
7186
7187 if (ctx->io_wq)
7188 io_wq_cancel_all(ctx->io_wq);
7189
def596e9 7190 io_iopoll_reap_events(ctx);
15dff286
JA
7191 /* if we failed setting up the ctx, we might not have any rings */
7192 if (ctx->rings)
7193 io_cqring_overflow_flush(ctx, true);
071698e1 7194 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
206aefde 7195 wait_for_completion(&ctx->completions[0]);
2b188cc1
JA
7196 io_ring_ctx_free(ctx);
7197}
7198
7199static int io_uring_release(struct inode *inode, struct file *file)
7200{
7201 struct io_ring_ctx *ctx = file->private_data;
7202
7203 file->private_data = NULL;
7204 io_ring_ctx_wait_and_kill(ctx);
7205 return 0;
7206}
7207
fcb323cc
JA
7208static void io_uring_cancel_files(struct io_ring_ctx *ctx,
7209 struct files_struct *files)
7210{
7211 struct io_kiocb *req;
7212 DEFINE_WAIT(wait);
7213
7214 while (!list_empty_careful(&ctx->inflight_list)) {
768134d4 7215 struct io_kiocb *cancel_req = NULL;
fcb323cc
JA
7216
7217 spin_lock_irq(&ctx->inflight_lock);
7218 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
768134d4
JA
7219 if (req->work.files != files)
7220 continue;
7221 /* req is being completed, ignore */
7222 if (!refcount_inc_not_zero(&req->refs))
7223 continue;
7224 cancel_req = req;
7225 break;
fcb323cc 7226 }
768134d4 7227 if (cancel_req)
fcb323cc 7228 prepare_to_wait(&ctx->inflight_wait, &wait,
768134d4 7229 TASK_UNINTERRUPTIBLE);
fcb323cc
JA
7230 spin_unlock_irq(&ctx->inflight_lock);
7231
768134d4
JA
7232 /* We need to keep going until we don't find a matching req */
7233 if (!cancel_req)
fcb323cc 7234 break;
2f6d9b9d 7235
2ca10259
JA
7236 if (cancel_req->flags & REQ_F_OVERFLOW) {
7237 spin_lock_irq(&ctx->completion_lock);
7238 list_del(&cancel_req->list);
7239 cancel_req->flags &= ~REQ_F_OVERFLOW;
7240 if (list_empty(&ctx->cq_overflow_list)) {
7241 clear_bit(0, &ctx->sq_check_overflow);
7242 clear_bit(0, &ctx->cq_check_overflow);
7243 }
7244 spin_unlock_irq(&ctx->completion_lock);
7245
7246 WRITE_ONCE(ctx->rings->cq_overflow,
7247 atomic_inc_return(&ctx->cached_cq_overflow));
7248
7249 /*
7250 * Put inflight ref and overflow ref. If that's
7251 * all we had, then we're done with this request.
7252 */
7253 if (refcount_sub_and_test(2, &cancel_req->refs)) {
7254 io_put_req(cancel_req);
7255 continue;
7256 }
7257 }
7258
2f6d9b9d
BL
7259 io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
7260 io_put_req(cancel_req);
fcb323cc
JA
7261 schedule();
7262 }
768134d4 7263 finish_wait(&ctx->inflight_wait, &wait);
fcb323cc
JA
7264}
7265
7266static int io_uring_flush(struct file *file, void *data)
7267{
7268 struct io_ring_ctx *ctx = file->private_data;
7269
7270 io_uring_cancel_files(ctx, data);
6ab23144
JA
7271
7272 /*
7273 * If the task is going away, cancel work it may have pending
7274 */
7275 if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
7276 io_wq_cancel_pid(ctx->io_wq, task_pid_vnr(current));
7277
fcb323cc
JA
7278 return 0;
7279}
7280
6c5c240e
RP
7281static void *io_uring_validate_mmap_request(struct file *file,
7282 loff_t pgoff, size_t sz)
2b188cc1 7283{
2b188cc1 7284 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 7285 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
7286 struct page *page;
7287 void *ptr;
7288
7289 switch (offset) {
7290 case IORING_OFF_SQ_RING:
75b28aff
HV
7291 case IORING_OFF_CQ_RING:
7292 ptr = ctx->rings;
2b188cc1
JA
7293 break;
7294 case IORING_OFF_SQES:
7295 ptr = ctx->sq_sqes;
7296 break;
2b188cc1 7297 default:
6c5c240e 7298 return ERR_PTR(-EINVAL);
2b188cc1
JA
7299 }
7300
7301 page = virt_to_head_page(ptr);
a50b854e 7302 if (sz > page_size(page))
6c5c240e
RP
7303 return ERR_PTR(-EINVAL);
7304
7305 return ptr;
7306}
7307
7308#ifdef CONFIG_MMU
7309
7310static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7311{
7312 size_t sz = vma->vm_end - vma->vm_start;
7313 unsigned long pfn;
7314 void *ptr;
7315
7316 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
7317 if (IS_ERR(ptr))
7318 return PTR_ERR(ptr);
2b188cc1
JA
7319
7320 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
7321 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
7322}
7323
6c5c240e
RP
7324#else /* !CONFIG_MMU */
7325
7326static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7327{
7328 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
7329}
7330
7331static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
7332{
7333 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
7334}
7335
7336static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
7337 unsigned long addr, unsigned long len,
7338 unsigned long pgoff, unsigned long flags)
7339{
7340 void *ptr;
7341
7342 ptr = io_uring_validate_mmap_request(file, pgoff, len);
7343 if (IS_ERR(ptr))
7344 return PTR_ERR(ptr);
7345
7346 return (unsigned long) ptr;
7347}
7348
7349#endif /* !CONFIG_MMU */
7350
2b188cc1
JA
7351SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
7352 u32, min_complete, u32, flags, const sigset_t __user *, sig,
7353 size_t, sigsz)
7354{
7355 struct io_ring_ctx *ctx;
7356 long ret = -EBADF;
7357 int submitted = 0;
7358 struct fd f;
7359
b41e9852
JA
7360 if (current->task_works)
7361 task_work_run();
7362
6c271ce2 7363 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
2b188cc1
JA
7364 return -EINVAL;
7365
7366 f = fdget(fd);
7367 if (!f.file)
7368 return -EBADF;
7369
7370 ret = -EOPNOTSUPP;
7371 if (f.file->f_op != &io_uring_fops)
7372 goto out_fput;
7373
7374 ret = -ENXIO;
7375 ctx = f.file->private_data;
7376 if (!percpu_ref_tryget(&ctx->refs))
7377 goto out_fput;
7378
6c271ce2
JA
7379 /*
7380 * For SQ polling, the thread will do all submissions and completions.
7381 * Just return the requested submit count, and wake the thread if
7382 * we were asked to.
7383 */
b2a9eada 7384 ret = 0;
6c271ce2 7385 if (ctx->flags & IORING_SETUP_SQPOLL) {
c1edbf5f
JA
7386 if (!list_empty_careful(&ctx->cq_overflow_list))
7387 io_cqring_overflow_flush(ctx, false);
6c271ce2
JA
7388 if (flags & IORING_ENTER_SQ_WAKEUP)
7389 wake_up(&ctx->sqo_wait);
7390 submitted = to_submit;
b2a9eada 7391 } else if (to_submit) {
ae9428ca 7392 struct mm_struct *cur_mm;
2b188cc1
JA
7393
7394 mutex_lock(&ctx->uring_lock);
ae9428ca
PB
7395 /* already have mm, so io_submit_sqes() won't try to grab it */
7396 cur_mm = ctx->sqo_mm;
7397 submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
7398 &cur_mm, false);
2b188cc1 7399 mutex_unlock(&ctx->uring_lock);
7c504e65
PB
7400
7401 if (submitted != to_submit)
7402 goto out;
2b188cc1
JA
7403 }
7404 if (flags & IORING_ENTER_GETEVENTS) {
def596e9
JA
7405 unsigned nr_events = 0;
7406
2b188cc1
JA
7407 min_complete = min(min_complete, ctx->cq_entries);
7408
32b2244a
XW
7409 /*
7410 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
7411 * space applications don't need to do io completion events
7412 * polling again, they can rely on io_sq_thread to do polling
7413 * work, which can reduce cpu usage and uring_lock contention.
7414 */
7415 if (ctx->flags & IORING_SETUP_IOPOLL &&
7416 !(ctx->flags & IORING_SETUP_SQPOLL)) {
def596e9 7417 ret = io_iopoll_check(ctx, &nr_events, min_complete);
def596e9
JA
7418 } else {
7419 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
7420 }
2b188cc1
JA
7421 }
7422
7c504e65 7423out:
6805b32e 7424 percpu_ref_put(&ctx->refs);
2b188cc1
JA
7425out_fput:
7426 fdput(f);
7427 return submitted ? submitted : ret;
7428}
7429
bebdb65e 7430#ifdef CONFIG_PROC_FS
87ce955b
JA
7431static int io_uring_show_cred(int id, void *p, void *data)
7432{
7433 const struct cred *cred = p;
7434 struct seq_file *m = data;
7435 struct user_namespace *uns = seq_user_ns(m);
7436 struct group_info *gi;
7437 kernel_cap_t cap;
7438 unsigned __capi;
7439 int g;
7440
7441 seq_printf(m, "%5d\n", id);
7442 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
7443 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
7444 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
7445 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
7446 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
7447 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
7448 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
7449 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
7450 seq_puts(m, "\n\tGroups:\t");
7451 gi = cred->group_info;
7452 for (g = 0; g < gi->ngroups; g++) {
7453 seq_put_decimal_ull(m, g ? " " : "",
7454 from_kgid_munged(uns, gi->gid[g]));
7455 }
7456 seq_puts(m, "\n\tCapEff:\t");
7457 cap = cred->cap_effective;
7458 CAP_FOR_EACH_U32(__capi)
7459 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
7460 seq_putc(m, '\n');
7461 return 0;
7462}
7463
7464static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
7465{
7466 int i;
7467
7468 mutex_lock(&ctx->uring_lock);
7469 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
7470 for (i = 0; i < ctx->nr_user_files; i++) {
7471 struct fixed_file_table *table;
7472 struct file *f;
7473
7474 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7475 f = table->files[i & IORING_FILE_TABLE_MASK];
7476 if (f)
7477 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
7478 else
7479 seq_printf(m, "%5u: <none>\n", i);
7480 }
7481 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
7482 for (i = 0; i < ctx->nr_user_bufs; i++) {
7483 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
7484
7485 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
7486 (unsigned int) buf->len);
7487 }
7488 if (!idr_is_empty(&ctx->personality_idr)) {
7489 seq_printf(m, "Personalities:\n");
7490 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
7491 }
d7718a9d
JA
7492 seq_printf(m, "PollList:\n");
7493 spin_lock_irq(&ctx->completion_lock);
7494 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
7495 struct hlist_head *list = &ctx->cancel_hash[i];
7496 struct io_kiocb *req;
7497
7498 hlist_for_each_entry(req, list, hash_node)
7499 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
7500 req->task->task_works != NULL);
7501 }
7502 spin_unlock_irq(&ctx->completion_lock);
87ce955b
JA
7503 mutex_unlock(&ctx->uring_lock);
7504}
7505
7506static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
7507{
7508 struct io_ring_ctx *ctx = f->private_data;
7509
7510 if (percpu_ref_tryget(&ctx->refs)) {
7511 __io_uring_show_fdinfo(ctx, m);
7512 percpu_ref_put(&ctx->refs);
7513 }
7514}
bebdb65e 7515#endif
87ce955b 7516
2b188cc1
JA
7517static const struct file_operations io_uring_fops = {
7518 .release = io_uring_release,
fcb323cc 7519 .flush = io_uring_flush,
2b188cc1 7520 .mmap = io_uring_mmap,
6c5c240e
RP
7521#ifndef CONFIG_MMU
7522 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
7523 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
7524#endif
2b188cc1
JA
7525 .poll = io_uring_poll,
7526 .fasync = io_uring_fasync,
bebdb65e 7527#ifdef CONFIG_PROC_FS
87ce955b 7528 .show_fdinfo = io_uring_show_fdinfo,
bebdb65e 7529#endif
2b188cc1
JA
7530};
7531
7532static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
7533 struct io_uring_params *p)
7534{
75b28aff
HV
7535 struct io_rings *rings;
7536 size_t size, sq_array_offset;
2b188cc1 7537
75b28aff
HV
7538 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
7539 if (size == SIZE_MAX)
7540 return -EOVERFLOW;
7541
7542 rings = io_mem_alloc(size);
7543 if (!rings)
2b188cc1
JA
7544 return -ENOMEM;
7545
75b28aff
HV
7546 ctx->rings = rings;
7547 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
7548 rings->sq_ring_mask = p->sq_entries - 1;
7549 rings->cq_ring_mask = p->cq_entries - 1;
7550 rings->sq_ring_entries = p->sq_entries;
7551 rings->cq_ring_entries = p->cq_entries;
7552 ctx->sq_mask = rings->sq_ring_mask;
7553 ctx->cq_mask = rings->cq_ring_mask;
7554 ctx->sq_entries = rings->sq_ring_entries;
7555 ctx->cq_entries = rings->cq_ring_entries;
2b188cc1
JA
7556
7557 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
7558 if (size == SIZE_MAX) {
7559 io_mem_free(ctx->rings);
7560 ctx->rings = NULL;
2b188cc1 7561 return -EOVERFLOW;
eb065d30 7562 }
2b188cc1
JA
7563
7564 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
7565 if (!ctx->sq_sqes) {
7566 io_mem_free(ctx->rings);
7567 ctx->rings = NULL;
2b188cc1 7568 return -ENOMEM;
eb065d30 7569 }
2b188cc1 7570
2b188cc1
JA
7571 return 0;
7572}
7573
7574/*
7575 * Allocate an anonymous fd, this is what constitutes the application
7576 * visible backing of an io_uring instance. The application mmaps this
7577 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
7578 * we have to tie this fd to a socket for file garbage collection purposes.
7579 */
7580static int io_uring_get_fd(struct io_ring_ctx *ctx)
7581{
7582 struct file *file;
7583 int ret;
7584
7585#if defined(CONFIG_UNIX)
7586 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
7587 &ctx->ring_sock);
7588 if (ret)
7589 return ret;
7590#endif
7591
7592 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
7593 if (ret < 0)
7594 goto err;
7595
7596 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
7597 O_RDWR | O_CLOEXEC);
7598 if (IS_ERR(file)) {
7599 put_unused_fd(ret);
7600 ret = PTR_ERR(file);
7601 goto err;
7602 }
7603
7604#if defined(CONFIG_UNIX)
7605 ctx->ring_sock->file = file;
7606#endif
7607 fd_install(ret, file);
7608 return ret;
7609err:
7610#if defined(CONFIG_UNIX)
7611 sock_release(ctx->ring_sock);
7612 ctx->ring_sock = NULL;
7613#endif
7614 return ret;
7615}
7616
7617static int io_uring_create(unsigned entries, struct io_uring_params *p)
7618{
7619 struct user_struct *user = NULL;
7620 struct io_ring_ctx *ctx;
7621 bool account_mem;
7622 int ret;
7623
8110c1a6 7624 if (!entries)
2b188cc1 7625 return -EINVAL;
8110c1a6
JA
7626 if (entries > IORING_MAX_ENTRIES) {
7627 if (!(p->flags & IORING_SETUP_CLAMP))
7628 return -EINVAL;
7629 entries = IORING_MAX_ENTRIES;
7630 }
2b188cc1
JA
7631
7632 /*
7633 * Use twice as many entries for the CQ ring. It's possible for the
7634 * application to drive a higher depth than the size of the SQ ring,
7635 * since the sqes are only used at submission time. This allows for
33a107f0
JA
7636 * some flexibility in overcommitting a bit. If the application has
7637 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
7638 * of CQ ring entries manually.
2b188cc1
JA
7639 */
7640 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
7641 if (p->flags & IORING_SETUP_CQSIZE) {
7642 /*
7643 * If IORING_SETUP_CQSIZE is set, we do the same roundup
7644 * to a power-of-two, if it isn't already. We do NOT impose
7645 * any cq vs sq ring sizing.
7646 */
8110c1a6 7647 if (p->cq_entries < p->sq_entries)
33a107f0 7648 return -EINVAL;
8110c1a6
JA
7649 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
7650 if (!(p->flags & IORING_SETUP_CLAMP))
7651 return -EINVAL;
7652 p->cq_entries = IORING_MAX_CQ_ENTRIES;
7653 }
33a107f0
JA
7654 p->cq_entries = roundup_pow_of_two(p->cq_entries);
7655 } else {
7656 p->cq_entries = 2 * p->sq_entries;
7657 }
2b188cc1
JA
7658
7659 user = get_uid(current_user());
7660 account_mem = !capable(CAP_IPC_LOCK);
7661
7662 if (account_mem) {
7663 ret = io_account_mem(user,
7664 ring_pages(p->sq_entries, p->cq_entries));
7665 if (ret) {
7666 free_uid(user);
7667 return ret;
7668 }
7669 }
7670
7671 ctx = io_ring_ctx_alloc(p);
7672 if (!ctx) {
7673 if (account_mem)
7674 io_unaccount_mem(user, ring_pages(p->sq_entries,
7675 p->cq_entries));
7676 free_uid(user);
7677 return -ENOMEM;
7678 }
7679 ctx->compat = in_compat_syscall();
7680 ctx->account_mem = account_mem;
7681 ctx->user = user;
0b8c0ec7 7682 ctx->creds = get_current_cred();
2b188cc1
JA
7683
7684 ret = io_allocate_scq_urings(ctx, p);
7685 if (ret)
7686 goto err;
7687
6c271ce2 7688 ret = io_sq_offload_start(ctx, p);
2b188cc1
JA
7689 if (ret)
7690 goto err;
7691
2b188cc1 7692 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
7693 p->sq_off.head = offsetof(struct io_rings, sq.head);
7694 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
7695 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
7696 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
7697 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
7698 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
7699 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
7700
7701 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
7702 p->cq_off.head = offsetof(struct io_rings, cq.head);
7703 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
7704 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
7705 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
7706 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
7707 p->cq_off.cqes = offsetof(struct io_rings, cqes);
ac90f249 7708
044c1ab3
JA
7709 /*
7710 * Install ring fd as the very last thing, so we don't risk someone
7711 * having closed it before we finish setup
7712 */
7713 ret = io_uring_get_fd(ctx);
7714 if (ret < 0)
7715 goto err;
7716
da8c9690 7717 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
cccf0ee8 7718 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
d7718a9d 7719 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL;
c826bd7a 7720 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
7721 return ret;
7722err:
7723 io_ring_ctx_wait_and_kill(ctx);
7724 return ret;
7725}
7726
7727/*
7728 * Sets up an aio uring context, and returns the fd. Applications asks for a
7729 * ring size, we return the actual sq/cq ring sizes (among other things) in the
7730 * params structure passed in.
7731 */
7732static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
7733{
7734 struct io_uring_params p;
7735 long ret;
7736 int i;
7737
7738 if (copy_from_user(&p, params, sizeof(p)))
7739 return -EFAULT;
7740 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
7741 if (p.resv[i])
7742 return -EINVAL;
7743 }
7744
6c271ce2 7745 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 7746 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
24369c2e 7747 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ))
2b188cc1
JA
7748 return -EINVAL;
7749
7750 ret = io_uring_create(entries, &p);
7751 if (ret < 0)
7752 return ret;
7753
7754 if (copy_to_user(params, &p, sizeof(p)))
7755 return -EFAULT;
7756
7757 return ret;
7758}
7759
7760SYSCALL_DEFINE2(io_uring_setup, u32, entries,
7761 struct io_uring_params __user *, params)
7762{
7763 return io_uring_setup(entries, params);
7764}
7765
66f4af93
JA
7766static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
7767{
7768 struct io_uring_probe *p;
7769 size_t size;
7770 int i, ret;
7771
7772 size = struct_size(p, ops, nr_args);
7773 if (size == SIZE_MAX)
7774 return -EOVERFLOW;
7775 p = kzalloc(size, GFP_KERNEL);
7776 if (!p)
7777 return -ENOMEM;
7778
7779 ret = -EFAULT;
7780 if (copy_from_user(p, arg, size))
7781 goto out;
7782 ret = -EINVAL;
7783 if (memchr_inv(p, 0, size))
7784 goto out;
7785
7786 p->last_op = IORING_OP_LAST - 1;
7787 if (nr_args > IORING_OP_LAST)
7788 nr_args = IORING_OP_LAST;
7789
7790 for (i = 0; i < nr_args; i++) {
7791 p->ops[i].op = i;
7792 if (!io_op_defs[i].not_supported)
7793 p->ops[i].flags = IO_URING_OP_SUPPORTED;
7794 }
7795 p->ops_len = i;
7796
7797 ret = 0;
7798 if (copy_to_user(arg, p, size))
7799 ret = -EFAULT;
7800out:
7801 kfree(p);
7802 return ret;
7803}
7804
071698e1
JA
7805static int io_register_personality(struct io_ring_ctx *ctx)
7806{
7807 const struct cred *creds = get_current_cred();
7808 int id;
7809
7810 id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
7811 USHRT_MAX, GFP_KERNEL);
7812 if (id < 0)
7813 put_cred(creds);
7814 return id;
7815}
7816
7817static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
7818{
7819 const struct cred *old_creds;
7820
7821 old_creds = idr_remove(&ctx->personality_idr, id);
7822 if (old_creds) {
7823 put_cred(old_creds);
7824 return 0;
7825 }
7826
7827 return -EINVAL;
7828}
7829
7830static bool io_register_op_must_quiesce(int op)
7831{
7832 switch (op) {
7833 case IORING_UNREGISTER_FILES:
7834 case IORING_REGISTER_FILES_UPDATE:
7835 case IORING_REGISTER_PROBE:
7836 case IORING_REGISTER_PERSONALITY:
7837 case IORING_UNREGISTER_PERSONALITY:
7838 return false;
7839 default:
7840 return true;
7841 }
7842}
7843
edafccee
JA
7844static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
7845 void __user *arg, unsigned nr_args)
b19062a5
JA
7846 __releases(ctx->uring_lock)
7847 __acquires(ctx->uring_lock)
edafccee
JA
7848{
7849 int ret;
7850
35fa71a0
JA
7851 /*
7852 * We're inside the ring mutex, if the ref is already dying, then
7853 * someone else killed the ctx or is already going through
7854 * io_uring_register().
7855 */
7856 if (percpu_ref_is_dying(&ctx->refs))
7857 return -ENXIO;
7858
071698e1 7859 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 7860 percpu_ref_kill(&ctx->refs);
b19062a5 7861
05f3fb3c
JA
7862 /*
7863 * Drop uring mutex before waiting for references to exit. If
7864 * another thread is currently inside io_uring_enter() it might
7865 * need to grab the uring_lock to make progress. If we hold it
7866 * here across the drain wait, then we can deadlock. It's safe
7867 * to drop the mutex here, since no new references will come in
7868 * after we've killed the percpu ref.
7869 */
7870 mutex_unlock(&ctx->uring_lock);
c150368b 7871 ret = wait_for_completion_interruptible(&ctx->completions[0]);
05f3fb3c 7872 mutex_lock(&ctx->uring_lock);
c150368b
JA
7873 if (ret) {
7874 percpu_ref_resurrect(&ctx->refs);
7875 ret = -EINTR;
7876 goto out;
7877 }
05f3fb3c 7878 }
edafccee
JA
7879
7880 switch (opcode) {
7881 case IORING_REGISTER_BUFFERS:
7882 ret = io_sqe_buffer_register(ctx, arg, nr_args);
7883 break;
7884 case IORING_UNREGISTER_BUFFERS:
7885 ret = -EINVAL;
7886 if (arg || nr_args)
7887 break;
7888 ret = io_sqe_buffer_unregister(ctx);
7889 break;
6b06314c
JA
7890 case IORING_REGISTER_FILES:
7891 ret = io_sqe_files_register(ctx, arg, nr_args);
7892 break;
7893 case IORING_UNREGISTER_FILES:
7894 ret = -EINVAL;
7895 if (arg || nr_args)
7896 break;
7897 ret = io_sqe_files_unregister(ctx);
7898 break;
c3a31e60
JA
7899 case IORING_REGISTER_FILES_UPDATE:
7900 ret = io_sqe_files_update(ctx, arg, nr_args);
7901 break;
9b402849 7902 case IORING_REGISTER_EVENTFD:
f2842ab5 7903 case IORING_REGISTER_EVENTFD_ASYNC:
9b402849
JA
7904 ret = -EINVAL;
7905 if (nr_args != 1)
7906 break;
7907 ret = io_eventfd_register(ctx, arg);
f2842ab5
JA
7908 if (ret)
7909 break;
7910 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
7911 ctx->eventfd_async = 1;
7912 else
7913 ctx->eventfd_async = 0;
9b402849
JA
7914 break;
7915 case IORING_UNREGISTER_EVENTFD:
7916 ret = -EINVAL;
7917 if (arg || nr_args)
7918 break;
7919 ret = io_eventfd_unregister(ctx);
7920 break;
66f4af93
JA
7921 case IORING_REGISTER_PROBE:
7922 ret = -EINVAL;
7923 if (!arg || nr_args > 256)
7924 break;
7925 ret = io_probe(ctx, arg, nr_args);
7926 break;
071698e1
JA
7927 case IORING_REGISTER_PERSONALITY:
7928 ret = -EINVAL;
7929 if (arg || nr_args)
7930 break;
7931 ret = io_register_personality(ctx);
7932 break;
7933 case IORING_UNREGISTER_PERSONALITY:
7934 ret = -EINVAL;
7935 if (arg)
7936 break;
7937 ret = io_unregister_personality(ctx, nr_args);
7938 break;
edafccee
JA
7939 default:
7940 ret = -EINVAL;
7941 break;
7942 }
7943
071698e1 7944 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 7945 /* bring the ctx back to life */
05f3fb3c 7946 percpu_ref_reinit(&ctx->refs);
c150368b
JA
7947out:
7948 reinit_completion(&ctx->completions[0]);
05f3fb3c 7949 }
edafccee
JA
7950 return ret;
7951}
7952
7953SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
7954 void __user *, arg, unsigned int, nr_args)
7955{
7956 struct io_ring_ctx *ctx;
7957 long ret = -EBADF;
7958 struct fd f;
7959
7960 f = fdget(fd);
7961 if (!f.file)
7962 return -EBADF;
7963
7964 ret = -EOPNOTSUPP;
7965 if (f.file->f_op != &io_uring_fops)
7966 goto out_fput;
7967
7968 ctx = f.file->private_data;
7969
7970 mutex_lock(&ctx->uring_lock);
7971 ret = __io_uring_register(ctx, opcode, arg, nr_args);
7972 mutex_unlock(&ctx->uring_lock);
c826bd7a
DD
7973 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
7974 ctx->cq_ev_fd != NULL, ret);
edafccee
JA
7975out_fput:
7976 fdput(f);
7977 return ret;
7978}
7979
2b188cc1
JA
7980static int __init io_uring_init(void)
7981{
d7f62e82
SM
7982#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
7983 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
7984 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
7985} while (0)
7986
7987#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
7988 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
7989 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
7990 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
7991 BUILD_BUG_SQE_ELEM(1, __u8, flags);
7992 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
7993 BUILD_BUG_SQE_ELEM(4, __s32, fd);
7994 BUILD_BUG_SQE_ELEM(8, __u64, off);
7995 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
7996 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7d67af2c 7997 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
d7f62e82
SM
7998 BUILD_BUG_SQE_ELEM(24, __u32, len);
7999 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
8000 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
8001 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
8002 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
8003 BUILD_BUG_SQE_ELEM(28, __u16, poll_events);
8004 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
8005 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
8006 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
8007 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
8008 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
8009 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
8010 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
8011 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7d67af2c 8012 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
d7f62e82
SM
8013 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
8014 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
8015 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7d67af2c 8016 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
d7f62e82 8017
d3656344 8018 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
84557871 8019 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
2b188cc1
JA
8020 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
8021 return 0;
8022};
8023__initcall(io_uring_init);