Commit | Line | Data |
---|---|---|
f3b44f92 JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/kernel.h> | |
3 | #include <linux/errno.h> | |
4 | #include <linux/fs.h> | |
5 | #include <linux/file.h> | |
6 | #include <linux/blk-mq.h> | |
7 | #include <linux/mm.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/fsnotify.h> | |
10 | #include <linux/poll.h> | |
11 | #include <linux/nospec.h> | |
12 | #include <linux/compat.h> | |
b66509b8 | 13 | #include <linux/io_uring/cmd.h> |
3fb1764c | 14 | #include <linux/indirect_call_wrapper.h> |
f3b44f92 JA |
15 | |
16 | #include <uapi/linux/io_uring.h> | |
17 | ||
f3b44f92 JA |
18 | #include "io_uring.h" |
19 | #include "opdef.h" | |
20 | #include "kbuf.h" | |
414d0f45 | 21 | #include "alloc_cache.h" |
f3b44f92 | 22 | #include "rsrc.h" |
c79f52f0 | 23 | #include "poll.h" |
f3b44f92 JA |
24 | #include "rw.h" |
25 | ||
74f3e875 PB |
26 | static void io_complete_rw(struct kiocb *kiocb, long res); |
27 | static void io_complete_rw_iopoll(struct kiocb *kiocb, long res); | |
28 | ||
f3b44f92 JA |
29 | struct io_rw { |
30 | /* NOTE: kiocb has the file as the first member, so don't do it here */ | |
31 | struct kiocb kiocb; | |
32 | u64 addr; | |
33 | u32 len; | |
34 | rwf_t flags; | |
35 | }; | |
36 | ||
f7c91343 | 37 | static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask) |
f3b44f92 | 38 | { |
f7c91343 JA |
39 | /* If FMODE_NOWAIT is set for a file, we're golden */ |
40 | if (req->flags & REQ_F_SUPPORT_NOWAIT) | |
41 | return true; | |
42 | /* No FMODE_NOWAIT, if we can poll, check the status */ | |
43 | if (io_file_can_poll(req)) { | |
44 | struct poll_table_struct pt = { ._key = mask }; | |
45 | ||
46 | return vfs_poll(req->file, &pt) & mask; | |
47 | } | |
48 | /* No FMODE_NOWAIT support, and file isn't pollable. Tough luck. */ | |
49 | return false; | |
f3b44f92 JA |
50 | } |
51 | ||
4ab9d465 DY |
52 | static int io_iov_compat_buffer_select_prep(struct io_rw *rw) |
53 | { | |
52524b28 PB |
54 | struct compat_iovec __user *uiov = u64_to_user_ptr(rw->addr); |
55 | struct compat_iovec iov; | |
4ab9d465 | 56 | |
52524b28 | 57 | if (copy_from_user(&iov, uiov, sizeof(iov))) |
4ab9d465 | 58 | return -EFAULT; |
52524b28 | 59 | rw->len = iov.iov_len; |
4ab9d465 DY |
60 | return 0; |
61 | } | |
4ab9d465 DY |
62 | |
63 | static int io_iov_buffer_select_prep(struct io_kiocb *req) | |
64 | { | |
65 | struct iovec __user *uiov; | |
66 | struct iovec iov; | |
67 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); | |
68 | ||
69 | if (rw->len != 1) | |
70 | return -EINVAL; | |
71 | ||
82d187d3 | 72 | if (io_is_compat(req->ctx)) |
4ab9d465 | 73 | return io_iov_compat_buffer_select_prep(rw); |
4ab9d465 DY |
74 | |
75 | uiov = u64_to_user_ptr(rw->addr); | |
76 | if (copy_from_user(&iov, uiov, sizeof(*uiov))) | |
77 | return -EFAULT; | |
78 | rw->len = iov.iov_len; | |
79 | return 0; | |
80 | } | |
81 | ||
99fab047 PB |
82 | static int io_import_vec(int ddir, struct io_kiocb *req, |
83 | struct io_async_rw *io, | |
84 | const struct iovec __user *uvec, | |
85 | size_t uvec_segs) | |
a9165b83 | 86 | { |
99fab047 | 87 | int ret, nr_segs; |
d6f911a6 | 88 | struct iovec *iov; |
a9165b83 | 89 | |
e1d49959 PB |
90 | if (io->vec.iovec) { |
91 | nr_segs = io->vec.nr; | |
92 | iov = io->vec.iovec; | |
d6f911a6 | 93 | } else { |
d6f911a6 | 94 | nr_segs = 1; |
99fab047 | 95 | iov = &io->fast_iov; |
d6f911a6 | 96 | } |
99fab047 PB |
97 | |
98 | ret = __import_iovec(ddir, uvec, uvec_segs, nr_segs, &iov, &io->iter, | |
99 | io_is_compat(req->ctx)); | |
d6f911a6 JA |
100 | if (unlikely(ret < 0)) |
101 | return ret; | |
102 | if (iov) { | |
103 | req->flags |= REQ_F_NEED_CLEANUP; | |
e1d49959 | 104 | io_vec_reset_iovec(&io->vec, iov, io->iter.nr_segs); |
d6f911a6 JA |
105 | } |
106 | return 0; | |
a9165b83 JA |
107 | } |
108 | ||
99fab047 PB |
109 | static int __io_import_rw_buffer(int ddir, struct io_kiocb *req, |
110 | struct io_async_rw *io, | |
111 | unsigned int issue_flags) | |
112 | { | |
113 | const struct io_issue_def *def = &io_issue_defs[req->opcode]; | |
114 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); | |
115 | void __user *buf = u64_to_user_ptr(rw->addr); | |
116 | size_t sqe_len = rw->len; | |
117 | ||
118 | if (def->vectored && !(req->flags & REQ_F_BUFFER_SELECT)) | |
119 | return io_import_vec(ddir, req, io, buf, sqe_len); | |
120 | ||
121 | if (io_do_buffer_select(req)) { | |
c0e96505 | 122 | buf = io_buffer_select(req, &sqe_len, io->buf_group, issue_flags); |
99fab047 PB |
123 | if (!buf) |
124 | return -ENOBUFS; | |
125 | rw->addr = (unsigned long) buf; | |
126 | rw->len = sqe_len; | |
127 | } | |
128 | return import_ubuf(ddir, buf, sqe_len, &io->iter); | |
129 | } | |
130 | ||
74c94249 PB |
131 | static inline int io_import_rw_buffer(int rw, struct io_kiocb *req, |
132 | struct io_async_rw *io, | |
133 | unsigned int issue_flags) | |
a9165b83 JA |
134 | { |
135 | int ret; | |
136 | ||
74c94249 | 137 | ret = __io_import_rw_buffer(rw, req, io, issue_flags); |
a9165b83 JA |
138 | if (unlikely(ret < 0)) |
139 | return ret; | |
140 | ||
0d10bd77 | 141 | iov_iter_save_state(&io->iter, &io->iter_state); |
a9165b83 JA |
142 | return 0; |
143 | } | |
144 | ||
a9165b83 JA |
145 | static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags) |
146 | { | |
147 | struct io_async_rw *rw = req->async_data; | |
148 | ||
d1fdab8c | 149 | if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) |
a9165b83 | 150 | return; |
d1fdab8c | 151 | |
e1d49959 | 152 | io_alloc_cache_vec_kasan(&rw->vec); |
0396ad37 PB |
153 | if (rw->vec.nr > IO_VEC_CACHE_SOFT_CAP) |
154 | io_vec_free(&rw->vec); | |
155 | ||
414d0f45 | 156 | if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) { |
a9165b83 JA |
157 | req->async_data = NULL; |
158 | req->flags &= ~REQ_F_ASYNC_DATA; | |
159 | } | |
160 | } | |
161 | ||
162 | static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags) | |
163 | { | |
164 | /* | |
165 | * Disable quick recycling for anything that's gone through io-wq. | |
166 | * In theory, this should be fine to cleanup. However, some read or | |
167 | * write iter handling touches the iovec AFTER having called into the | |
168 | * handler, eg to reexpand or revert. This means we can have: | |
169 | * | |
170 | * task io-wq | |
171 | * issue | |
172 | * punt to io-wq | |
173 | * issue | |
174 | * blkdev_write_iter() | |
175 | * ->ki_complete() | |
176 | * io_complete_rw() | |
177 | * queue tw complete | |
178 | * run tw | |
179 | * req_rw_cleanup | |
180 | * iov_iter_count() <- look at iov_iter again | |
181 | * | |
182 | * which can lead to a UAF. This is only possible for io-wq offload | |
183 | * as the cleanup can run in parallel. As io-wq is not the fast path, | |
184 | * just leave cleanup to the end. | |
185 | * | |
186 | * This is really a bug in the core code that does this, any issue | |
187 | * path should assume that a successful (or -EIOCBQUEUED) return can | |
188 | * mean that the underlying data can be gone at any time. But that | |
189 | * should be fixed seperately, and then this check could be killed. | |
190 | */ | |
d803d123 | 191 | if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) { |
a9165b83 JA |
192 | req->flags &= ~REQ_F_NEED_CLEANUP; |
193 | io_rw_recycle(req, issue_flags); | |
194 | } | |
195 | } | |
196 | ||
197 | static int io_rw_alloc_async(struct io_kiocb *req) | |
198 | { | |
199 | struct io_ring_ctx *ctx = req->ctx; | |
a9165b83 JA |
200 | struct io_async_rw *rw; |
201 | ||
fa359552 | 202 | rw = io_uring_alloc_async_data(&ctx->rw_cache, req); |
d7f11616 GKB |
203 | if (!rw) |
204 | return -ENOMEM; | |
e1d49959 | 205 | if (rw->vec.iovec) |
d7f11616 | 206 | req->flags |= REQ_F_NEED_CLEANUP; |
c5f71916 | 207 | rw->bytes_done = 0; |
d7f11616 | 208 | return 0; |
a9165b83 JA |
209 | } |
210 | ||
59a7d12a AG |
211 | static inline void io_meta_save_state(struct io_async_rw *io) |
212 | { | |
213 | io->meta_state.seed = io->meta.seed; | |
214 | iov_iter_save_state(&io->meta.iter, &io->meta_state.iter_meta); | |
215 | } | |
216 | ||
217 | static inline void io_meta_restore(struct io_async_rw *io, struct kiocb *kiocb) | |
218 | { | |
219 | if (kiocb->ki_flags & IOCB_HAS_METADATA) { | |
220 | io->meta.seed = io->meta_state.seed; | |
221 | iov_iter_restore(&io->meta.iter, &io->meta_state.iter_meta); | |
222 | } | |
223 | } | |
224 | ||
225 | static int io_prep_rw_pi(struct io_kiocb *req, struct io_rw *rw, int ddir, | |
226 | u64 attr_ptr, u64 attr_type_mask) | |
227 | { | |
228 | struct io_uring_attr_pi pi_attr; | |
229 | struct io_async_rw *io; | |
230 | int ret; | |
231 | ||
232 | if (copy_from_user(&pi_attr, u64_to_user_ptr(attr_ptr), | |
233 | sizeof(pi_attr))) | |
234 | return -EFAULT; | |
235 | ||
236 | if (pi_attr.rsvd) | |
237 | return -EINVAL; | |
238 | ||
239 | io = req->async_data; | |
240 | io->meta.flags = pi_attr.flags; | |
241 | io->meta.app_tag = pi_attr.app_tag; | |
242 | io->meta.seed = pi_attr.seed; | |
243 | ret = import_ubuf(ddir, u64_to_user_ptr(pi_attr.addr), | |
244 | pi_attr.len, &io->meta.iter); | |
a9165b83 JA |
245 | if (unlikely(ret < 0)) |
246 | return ret; | |
1143be17 | 247 | req->flags |= REQ_F_HAS_METADATA; |
59a7d12a AG |
248 | io_meta_save_state(io); |
249 | return ret; | |
a9165b83 JA |
250 | } |
251 | ||
2a61e638 KB |
252 | static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, |
253 | int ddir) | |
f3b44f92 | 254 | { |
f2ccb5ae | 255 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
c0e96505 | 256 | struct io_async_rw *io; |
f3b44f92 | 257 | unsigned ioprio; |
59a7d12a | 258 | u64 attr_type_mask; |
f3b44f92 JA |
259 | int ret; |
260 | ||
c72282dd PB |
261 | if (io_rw_alloc_async(req)) |
262 | return -ENOMEM; | |
c0e96505 | 263 | io = req->async_data; |
c72282dd | 264 | |
f3b44f92 JA |
265 | rw->kiocb.ki_pos = READ_ONCE(sqe->off); |
266 | /* used for fixed read/write too - just read unconditionally */ | |
267 | req->buf_index = READ_ONCE(sqe->buf_index); | |
c0e96505 | 268 | io->buf_group = req->buf_index; |
f3b44f92 | 269 | |
f3b44f92 JA |
270 | ioprio = READ_ONCE(sqe->ioprio); |
271 | if (ioprio) { | |
272 | ret = ioprio_check_cap(ioprio); | |
273 | if (ret) | |
274 | return ret; | |
275 | ||
276 | rw->kiocb.ki_ioprio = ioprio; | |
277 | } else { | |
278 | rw->kiocb.ki_ioprio = get_current_ioprio(); | |
279 | } | |
099ada2c | 280 | rw->kiocb.dio_complete = NULL; |
59a7d12a | 281 | rw->kiocb.ki_flags = 0; |
02040353 | 282 | rw->kiocb.ki_write_stream = READ_ONCE(sqe->write_stream); |
f3b44f92 | 283 | |
74f3e875 PB |
284 | if (req->ctx->flags & IORING_SETUP_IOPOLL) |
285 | rw->kiocb.ki_complete = io_complete_rw_iopoll; | |
286 | else | |
287 | rw->kiocb.ki_complete = io_complete_rw; | |
288 | ||
f3b44f92 JA |
289 | rw->addr = READ_ONCE(sqe->addr); |
290 | rw->len = READ_ONCE(sqe->len); | |
291 | rw->flags = READ_ONCE(sqe->rw_flags); | |
59a7d12a AG |
292 | |
293 | attr_type_mask = READ_ONCE(sqe->attr_type_mask); | |
294 | if (attr_type_mask) { | |
295 | u64 attr_ptr; | |
296 | ||
297 | /* only PI attribute is supported currently */ | |
298 | if (attr_type_mask != IORING_RW_ATTR_FLAG_PI) | |
299 | return -EINVAL; | |
300 | ||
301 | attr_ptr = READ_ONCE(sqe->attr_ptr); | |
7a9b0d69 | 302 | return io_prep_rw_pi(req, rw, ddir, attr_ptr, attr_type_mask); |
59a7d12a | 303 | } |
7a9b0d69 | 304 | return 0; |
0e984ec8 JA |
305 | } |
306 | ||
2a61e638 KB |
307 | static int io_rw_do_import(struct io_kiocb *req, int ddir) |
308 | { | |
309 | if (io_do_buffer_select(req)) | |
310 | return 0; | |
311 | ||
312 | return io_import_rw_buffer(ddir, req, req->async_data, 0); | |
313 | } | |
314 | ||
315 | static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, | |
316 | int ddir) | |
317 | { | |
318 | int ret; | |
319 | ||
320 | ret = __io_prep_rw(req, sqe, ddir); | |
321 | if (unlikely(ret)) | |
322 | return ret; | |
323 | ||
324 | return io_rw_do_import(req, ddir); | |
0e984ec8 JA |
325 | } |
326 | ||
a9165b83 | 327 | int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
0e984ec8 | 328 | { |
2a61e638 | 329 | return io_prep_rw(req, sqe, ITER_DEST); |
0e984ec8 JA |
330 | } |
331 | ||
a9165b83 | 332 | int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
0e984ec8 | 333 | { |
2a61e638 | 334 | return io_prep_rw(req, sqe, ITER_SOURCE); |
a9165b83 JA |
335 | } |
336 | ||
337 | static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe, | |
338 | int ddir) | |
339 | { | |
0e984ec8 | 340 | int ret; |
4ab9d465 | 341 | |
2a61e638 | 342 | ret = io_prep_rw(req, sqe, ddir); |
0e984ec8 JA |
343 | if (unlikely(ret)) |
344 | return ret; | |
2a61e638 | 345 | if (!(req->flags & REQ_F_BUFFER_SELECT)) |
a9165b83 | 346 | return 0; |
0e984ec8 JA |
347 | |
348 | /* | |
349 | * Have to do this validation here, as this is in io_read() rw->len | |
350 | * might have chanaged due to buffer selection | |
4ab9d465 | 351 | */ |
a9165b83 JA |
352 | return io_iov_buffer_select_prep(req); |
353 | } | |
4ab9d465 | 354 | |
a9165b83 JA |
355 | int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
356 | { | |
357 | return io_prep_rwv(req, sqe, ITER_DEST); | |
358 | } | |
359 | ||
360 | int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
361 | { | |
362 | return io_prep_rwv(req, sqe, ITER_SOURCE); | |
f3b44f92 JA |
363 | } |
364 | ||
ff92d824 | 365 | static int io_init_rw_fixed(struct io_kiocb *req, unsigned int issue_flags, |
a9165b83 | 366 | int ddir) |
f688944c | 367 | { |
a9165b83 | 368 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
ff92d824 | 369 | struct io_async_rw *io = req->async_data; |
f688944c JA |
370 | int ret; |
371 | ||
ff92d824 KB |
372 | if (io->bytes_done) |
373 | return 0; | |
a9165b83 | 374 | |
ff92d824 KB |
375 | ret = io_import_reg_buf(req, &io->iter, rw->addr, rw->len, ddir, |
376 | issue_flags); | |
0d10bd77 | 377 | iov_iter_save_state(&io->iter, &io->iter_state); |
a9165b83 JA |
378 | return ret; |
379 | } | |
380 | ||
381 | int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
382 | { | |
ff92d824 | 383 | return __io_prep_rw(req, sqe, ITER_DEST); |
a9165b83 JA |
384 | } |
385 | ||
386 | int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
387 | { | |
ff92d824 | 388 | return __io_prep_rw(req, sqe, ITER_SOURCE); |
f688944c JA |
389 | } |
390 | ||
835c4bdf PB |
391 | static int io_rw_import_reg_vec(struct io_kiocb *req, |
392 | struct io_async_rw *io, | |
393 | int ddir, unsigned int issue_flags) | |
394 | { | |
395 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); | |
396 | unsigned uvec_segs = rw->len; | |
835c4bdf PB |
397 | int ret; |
398 | ||
399 | ret = io_import_reg_vec(ddir, &io->iter, req, &io->vec, | |
146acfd0 | 400 | uvec_segs, issue_flags); |
835c4bdf PB |
401 | if (unlikely(ret)) |
402 | return ret; | |
403 | iov_iter_save_state(&io->iter, &io->iter_state); | |
404 | req->flags &= ~REQ_F_IMPORT_BUFFER; | |
405 | return 0; | |
406 | } | |
407 | ||
408 | static int io_rw_prep_reg_vec(struct io_kiocb *req) | |
bdabba04 PB |
409 | { |
410 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); | |
411 | struct io_async_rw *io = req->async_data; | |
412 | const struct iovec __user *uvec; | |
bdabba04 | 413 | |
bdabba04 | 414 | uvec = u64_to_user_ptr(rw->addr); |
d291fb65 | 415 | return io_prep_reg_iovec(req, &io->vec, uvec, rw->len); |
bdabba04 PB |
416 | } |
417 | ||
418 | int io_prep_readv_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
419 | { | |
420 | int ret; | |
421 | ||
422 | ret = __io_prep_rw(req, sqe, ITER_DEST); | |
423 | if (unlikely(ret)) | |
424 | return ret; | |
835c4bdf | 425 | return io_rw_prep_reg_vec(req); |
bdabba04 PB |
426 | } |
427 | ||
428 | int io_prep_writev_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
429 | { | |
430 | int ret; | |
431 | ||
432 | ret = __io_prep_rw(req, sqe, ITER_SOURCE); | |
433 | if (unlikely(ret)) | |
434 | return ret; | |
835c4bdf | 435 | return io_rw_prep_reg_vec(req); |
bdabba04 PB |
436 | } |
437 | ||
fc68fcda JA |
438 | /* |
439 | * Multishot read is prepared just like a normal read/write request, only | |
440 | * difference is that we set the MULTISHOT flag. | |
441 | */ | |
442 | int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
443 | { | |
49fbe994 | 444 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
fc68fcda JA |
445 | int ret; |
446 | ||
0df96fb7 JA |
447 | /* must be used with provided buffers */ |
448 | if (!(req->flags & REQ_F_BUFFER_SELECT)) | |
449 | return -EINVAL; | |
450 | ||
2a61e638 | 451 | ret = __io_prep_rw(req, sqe, ITER_DEST); |
fc68fcda JA |
452 | if (unlikely(ret)) |
453 | return ret; | |
454 | ||
49fbe994 DY |
455 | if (rw->addr || rw->len) |
456 | return -EINVAL; | |
457 | ||
fc68fcda JA |
458 | req->flags |= REQ_F_APOLL_MULTISHOT; |
459 | return 0; | |
460 | } | |
461 | ||
f3b44f92 JA |
462 | void io_readv_writev_cleanup(struct io_kiocb *req) |
463 | { | |
9ac273ae JA |
464 | lockdep_assert_held(&req->ctx->uring_lock); |
465 | io_rw_recycle(req, 0); | |
f3b44f92 JA |
466 | } |
467 | ||
f3b44f92 JA |
468 | static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) |
469 | { | |
f2ccb5ae | 470 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
471 | |
472 | if (rw->kiocb.ki_pos != -1) | |
473 | return &rw->kiocb.ki_pos; | |
474 | ||
475 | if (!(req->file->f_mode & FMODE_STREAM)) { | |
476 | req->flags |= REQ_F_CUR_POS; | |
477 | rw->kiocb.ki_pos = req->file->f_pos; | |
478 | return &rw->kiocb.ki_pos; | |
479 | } | |
480 | ||
481 | rw->kiocb.ki_pos = 0; | |
482 | return NULL; | |
483 | } | |
484 | ||
f3b44f92 JA |
485 | static bool io_rw_should_reissue(struct io_kiocb *req) |
486 | { | |
d803d123 JA |
487 | #ifdef CONFIG_BLOCK |
488 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); | |
f3b44f92 | 489 | umode_t mode = file_inode(req->file)->i_mode; |
d803d123 | 490 | struct io_async_rw *io = req->async_data; |
f3b44f92 JA |
491 | struct io_ring_ctx *ctx = req->ctx; |
492 | ||
493 | if (!S_ISBLK(mode) && !S_ISREG(mode)) | |
494 | return false; | |
495 | if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && | |
496 | !(ctx->flags & IORING_SETUP_IOPOLL))) | |
497 | return false; | |
498 | /* | |
499 | * If ref is dying, we might be running poll reap from the exit work. | |
500 | * Don't attempt to reissue from that path, just let it fail with | |
501 | * -EAGAIN. | |
502 | */ | |
503 | if (percpu_ref_is_dying(&ctx->refs)) | |
504 | return false; | |
d803d123 JA |
505 | |
506 | io_meta_restore(io, &rw->kiocb); | |
507 | iov_iter_restore(&io->iter, &io->iter_state); | |
f3b44f92 | 508 | return true; |
f3b44f92 | 509 | #else |
f3b44f92 | 510 | return false; |
f3b44f92 | 511 | #endif |
d803d123 | 512 | } |
f3b44f92 | 513 | |
a370167f | 514 | static void io_req_end_write(struct io_kiocb *req) |
f3b44f92 | 515 | { |
f3b44f92 | 516 | if (req->flags & REQ_F_ISREG) { |
e484fd73 | 517 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 | 518 | |
e484fd73 | 519 | kiocb_end_write(&rw->kiocb); |
f3b44f92 JA |
520 | } |
521 | } | |
522 | ||
2ec33a6c JA |
523 | /* |
524 | * Trigger the notifications after having done some IO, and finish the write | |
525 | * accounting, if any. | |
526 | */ | |
527 | static void io_req_io_end(struct io_kiocb *req) | |
528 | { | |
529 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); | |
530 | ||
2ec33a6c | 531 | if (rw->kiocb.ki_flags & IOCB_WRITE) { |
a370167f | 532 | io_req_end_write(req); |
2ec33a6c JA |
533 | fsnotify_modify(req->file); |
534 | } else { | |
535 | fsnotify_access(req->file); | |
536 | } | |
537 | } | |
538 | ||
d803d123 | 539 | static void __io_complete_rw_common(struct io_kiocb *req, long res) |
f3b44f92 | 540 | { |
d803d123 JA |
541 | if (res == req->cqe.res) |
542 | return; | |
543 | if (res == -EAGAIN && io_rw_should_reissue(req)) { | |
544 | req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; | |
545 | } else { | |
f3b44f92 JA |
546 | req_set_fail(req); |
547 | req->cqe.res = res; | |
548 | } | |
f3b44f92 JA |
549 | } |
550 | ||
62bb0647 | 551 | static inline int io_fixup_rw_res(struct io_kiocb *req, long res) |
4d9cb92c PB |
552 | { |
553 | struct io_async_rw *io = req->async_data; | |
554 | ||
555 | /* add previously done IO, if any */ | |
556 | if (req_has_async_data(req) && io->bytes_done > 0) { | |
557 | if (res < 0) | |
558 | res = io->bytes_done; | |
559 | else | |
560 | res += io->bytes_done; | |
561 | } | |
562 | return res; | |
563 | } | |
564 | ||
bcf8a029 | 565 | void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw) |
b000145e | 566 | { |
099ada2c JA |
567 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
568 | struct kiocb *kiocb = &rw->kiocb; | |
569 | ||
570 | if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) { | |
571 | long res = kiocb->dio_complete(rw->kiocb.private); | |
572 | ||
573 | io_req_set_res(req, io_fixup_rw_res(req, res), 0); | |
574 | } | |
575 | ||
2ec33a6c | 576 | io_req_io_end(req); |
3671163b | 577 | |
8e5b3b89 | 578 | if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) |
6733e678 | 579 | req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0); |
3671163b | 580 | |
a9165b83 | 581 | io_req_rw_cleanup(req, 0); |
bcf8a029 | 582 | io_req_task_complete(req, tw); |
b000145e JA |
583 | } |
584 | ||
f3b44f92 JA |
585 | static void io_complete_rw(struct kiocb *kiocb, long res) |
586 | { | |
587 | struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); | |
588 | struct io_kiocb *req = cmd_to_io_kiocb(rw); | |
589 | ||
099ada2c | 590 | if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) { |
d803d123 | 591 | __io_complete_rw_common(req, res); |
099ada2c JA |
592 | io_req_set_res(req, io_fixup_rw_res(req, res), 0); |
593 | } | |
b000145e | 594 | req->io_task_work.func = io_req_rw_complete; |
8751d154 | 595 | __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE); |
f3b44f92 JA |
596 | } |
597 | ||
598 | static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) | |
599 | { | |
600 | struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); | |
601 | struct io_kiocb *req = cmd_to_io_kiocb(rw); | |
602 | ||
603 | if (kiocb->ki_flags & IOCB_WRITE) | |
a370167f | 604 | io_req_end_write(req); |
f3b44f92 | 605 | if (unlikely(res != req->cqe.res)) { |
bcb0fda3 | 606 | if (res == -EAGAIN && io_rw_should_reissue(req)) |
186daf23 | 607 | req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; |
bcb0fda3 JA |
608 | else |
609 | req->cqe.res = res; | |
f3b44f92 JA |
610 | } |
611 | ||
612 | /* order with io_iopoll_complete() checking ->iopoll_completed */ | |
613 | smp_store_release(&req->iopoll_completed, 1); | |
614 | } | |
615 | ||
4e43133c | 616 | static inline void io_rw_done(struct io_kiocb *req, ssize_t ret) |
fe80eb15 | 617 | { |
4e43133c PB |
618 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
619 | ||
fe80eb15 JA |
620 | /* IO was queued async, completion will happen later */ |
621 | if (ret == -EIOCBQUEUED) | |
622 | return; | |
623 | ||
624 | /* transform internal restart error codes */ | |
625 | if (unlikely(ret < 0)) { | |
626 | switch (ret) { | |
627 | case -ERESTARTSYS: | |
628 | case -ERESTARTNOINTR: | |
629 | case -ERESTARTNOHAND: | |
630 | case -ERESTART_RESTARTBLOCK: | |
631 | /* | |
632 | * We can't just restart the syscall, since previously | |
633 | * submitted sqes may already be in progress. Just fail | |
634 | * this IO with EINTR. | |
635 | */ | |
636 | ret = -EINTR; | |
637 | break; | |
638 | } | |
639 | } | |
640 | ||
4e43133c PB |
641 | if (req->ctx->flags & IORING_SETUP_IOPOLL) |
642 | io_complete_rw_iopoll(&rw->kiocb, ret); | |
643 | else | |
644 | io_complete_rw(&rw->kiocb, ret); | |
fe80eb15 JA |
645 | } |
646 | ||
df9830d8 | 647 | static int kiocb_done(struct io_kiocb *req, ssize_t ret, |
f3b44f92 JA |
648 | unsigned int issue_flags) |
649 | { | |
f2ccb5ae | 650 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
4d9cb92c | 651 | unsigned final_ret = io_fixup_rw_res(req, ret); |
f3b44f92 | 652 | |
1939316b | 653 | if (ret >= 0 && req->flags & REQ_F_CUR_POS) |
f3b44f92 | 654 | req->file->f_pos = rw->kiocb.ki_pos; |
4e43133c | 655 | if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) { |
d803d123 JA |
656 | __io_complete_rw_common(req, ret); |
657 | /* | |
658 | * Safe to call io_end from here as we're inline | |
659 | * from the submission path. | |
660 | */ | |
661 | io_req_io_end(req); | |
662 | io_req_set_res(req, final_ret, io_put_kbuf(req, ret, issue_flags)); | |
663 | io_req_rw_cleanup(req, issue_flags); | |
8bb9d6cc | 664 | return IOU_COMPLETE; |
df9830d8 | 665 | } else { |
4e43133c | 666 | io_rw_done(req, ret); |
df9830d8 | 667 | } |
f3b44f92 | 668 | |
df9830d8 | 669 | return IOU_ISSUE_SKIP_COMPLETE; |
f3b44f92 JA |
670 | } |
671 | ||
f3b44f92 JA |
672 | static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) |
673 | { | |
674 | return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; | |
675 | } | |
676 | ||
677 | /* | |
678 | * For files that don't have ->read_iter() and ->write_iter(), handle them | |
679 | * by looping over ->read() or ->write() manually. | |
680 | */ | |
681 | static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter) | |
682 | { | |
27cb27b6 | 683 | struct io_kiocb *req = cmd_to_io_kiocb(rw); |
f3b44f92 JA |
684 | struct kiocb *kiocb = &rw->kiocb; |
685 | struct file *file = kiocb->ki_filp; | |
686 | ssize_t ret = 0; | |
687 | loff_t *ppos; | |
688 | ||
689 | /* | |
690 | * Don't support polled IO through this interface, and we can't | |
691 | * support non-blocking either. For the latter, this just causes | |
692 | * the kiocb to be handled from an async context. | |
693 | */ | |
694 | if (kiocb->ki_flags & IOCB_HIPRI) | |
695 | return -EOPNOTSUPP; | |
696 | if ((kiocb->ki_flags & IOCB_NOWAIT) && | |
697 | !(kiocb->ki_filp->f_flags & O_NONBLOCK)) | |
698 | return -EAGAIN; | |
27cb27b6 KB |
699 | if ((req->flags & REQ_F_BUF_NODE) && req->buf_node->buf->is_kbuf) |
700 | return -EFAULT; | |
f3b44f92 JA |
701 | |
702 | ppos = io_kiocb_ppos(kiocb); | |
703 | ||
704 | while (iov_iter_count(iter)) { | |
95e49cf8 JA |
705 | void __user *addr; |
706 | size_t len; | |
f3b44f92 JA |
707 | ssize_t nr; |
708 | ||
1e23db45 | 709 | if (iter_is_ubuf(iter)) { |
95e49cf8 JA |
710 | addr = iter->ubuf + iter->iov_offset; |
711 | len = iov_iter_count(iter); | |
1e23db45 | 712 | } else if (!iov_iter_is_bvec(iter)) { |
95e49cf8 JA |
713 | addr = iter_iov_addr(iter); |
714 | len = iter_iov_len(iter); | |
f3b44f92 | 715 | } else { |
95e49cf8 JA |
716 | addr = u64_to_user_ptr(rw->addr); |
717 | len = rw->len; | |
f3b44f92 JA |
718 | } |
719 | ||
95e49cf8 JA |
720 | if (ddir == READ) |
721 | nr = file->f_op->read(file, addr, len, ppos); | |
722 | else | |
723 | nr = file->f_op->write(file, addr, len, ppos); | |
f3b44f92 JA |
724 | |
725 | if (nr < 0) { | |
726 | if (!ret) | |
727 | ret = nr; | |
728 | break; | |
729 | } | |
730 | ret += nr; | |
731 | if (!iov_iter_is_bvec(iter)) { | |
732 | iov_iter_advance(iter, nr); | |
733 | } else { | |
734 | rw->addr += nr; | |
735 | rw->len -= nr; | |
736 | if (!rw->len) | |
737 | break; | |
738 | } | |
95e49cf8 | 739 | if (nr != len) |
f3b44f92 JA |
740 | break; |
741 | } | |
742 | ||
743 | return ret; | |
744 | } | |
745 | ||
f3b44f92 JA |
746 | /* |
747 | * This is our waitqueue callback handler, registered through __folio_lock_async() | |
748 | * when we initially tried to do the IO with the iocb armed our waitqueue. | |
749 | * This gets called when the page is unlocked, and we generally expect that to | |
750 | * happen when the page IO is completed and the page is now uptodate. This will | |
751 | * queue a task_work based retry of the operation, attempting to copy the data | |
752 | * again. If the latter fails because the page was NOT uptodate, then we will | |
753 | * do a thread based blocking retry of the operation. That's the unexpected | |
754 | * slow path. | |
755 | */ | |
756 | static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, | |
757 | int sync, void *arg) | |
758 | { | |
759 | struct wait_page_queue *wpq; | |
760 | struct io_kiocb *req = wait->private; | |
f2ccb5ae | 761 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
762 | struct wait_page_key *key = arg; |
763 | ||
764 | wpq = container_of(wait, struct wait_page_queue, wait); | |
765 | ||
766 | if (!wake_page_match(wpq, key)) | |
767 | return 0; | |
768 | ||
769 | rw->kiocb.ki_flags &= ~IOCB_WAITQ; | |
770 | list_del_init(&wait->entry); | |
771 | io_req_task_queue(req); | |
772 | return 1; | |
773 | } | |
774 | ||
775 | /* | |
776 | * This controls whether a given IO request should be armed for async page | |
777 | * based retry. If we return false here, the request is handed to the async | |
778 | * worker threads for retry. If we're doing buffered reads on a regular file, | |
779 | * we prepare a private wait_page_queue entry and retry the operation. This | |
780 | * will either succeed because the page is now uptodate and unlocked, or it | |
781 | * will register a callback when the page is unlocked at IO completion. Through | |
782 | * that callback, io_uring uses task_work to setup a retry of the operation. | |
783 | * That retry will attempt the buffered read again. The retry will generally | |
784 | * succeed, or in rare cases where it fails, we then fall back to using the | |
785 | * async worker threads for a blocking retry. | |
786 | */ | |
787 | static bool io_rw_should_retry(struct io_kiocb *req) | |
788 | { | |
789 | struct io_async_rw *io = req->async_data; | |
790 | struct wait_page_queue *wait = &io->wpq; | |
f2ccb5ae | 791 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
792 | struct kiocb *kiocb = &rw->kiocb; |
793 | ||
1143be17 JA |
794 | /* |
795 | * Never retry for NOWAIT or a request with metadata, we just complete | |
796 | * with -EAGAIN. | |
797 | */ | |
798 | if (req->flags & (REQ_F_NOWAIT | REQ_F_HAS_METADATA)) | |
f3b44f92 JA |
799 | return false; |
800 | ||
801 | /* Only for buffered IO */ | |
802 | if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) | |
803 | return false; | |
804 | ||
805 | /* | |
806 | * just use poll if we can, and don't attempt if the fs doesn't | |
807 | * support callback based unlocks | |
808 | */ | |
210a03c9 CB |
809 | if (io_file_can_poll(req) || |
810 | !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC)) | |
f3b44f92 JA |
811 | return false; |
812 | ||
813 | wait->wait.func = io_async_buf_func; | |
814 | wait->wait.private = req; | |
815 | wait->wait.flags = 0; | |
816 | INIT_LIST_HEAD(&wait->wait.entry); | |
817 | kiocb->ki_flags |= IOCB_WAITQ; | |
818 | kiocb->ki_flags &= ~IOCB_NOWAIT; | |
819 | kiocb->ki_waitq = wait; | |
820 | return true; | |
821 | } | |
822 | ||
823 | static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter) | |
824 | { | |
825 | struct file *file = rw->kiocb.ki_filp; | |
826 | ||
827 | if (likely(file->f_op->read_iter)) | |
7c98f7cb | 828 | return file->f_op->read_iter(&rw->kiocb, iter); |
f3b44f92 JA |
829 | else if (file->f_op->read) |
830 | return loop_rw_iter(READ, rw, iter); | |
831 | else | |
832 | return -EINVAL; | |
833 | } | |
834 | ||
4e17aaab | 835 | static bool need_complete_io(struct io_kiocb *req) |
f3b44f92 JA |
836 | { |
837 | return req->flags & REQ_F_ISREG || | |
838 | S_ISBLK(file_inode(req->file)->i_mode); | |
839 | } | |
840 | ||
c34fc6f2 | 841 | static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type) |
f3b44f92 | 842 | { |
f2ccb5ae | 843 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
844 | struct kiocb *kiocb = &rw->kiocb; |
845 | struct io_ring_ctx *ctx = req->ctx; | |
846 | struct file *file = req->file; | |
847 | int ret; | |
848 | ||
949249e2 | 849 | if (unlikely(!(file->f_mode & mode))) |
f3b44f92 JA |
850 | return -EBADF; |
851 | ||
3beed235 | 852 | if (!(req->flags & REQ_F_FIXED_FILE)) |
8487f083 | 853 | req->flags |= io_file_get_flags(file); |
f3b44f92 | 854 | |
5264406c | 855 | kiocb->ki_flags = file->f_iocb_flags; |
c34fc6f2 | 856 | ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type); |
f3b44f92 JA |
857 | if (unlikely(ret)) |
858 | return ret; | |
12e4e8c7 | 859 | kiocb->ki_flags |= IOCB_ALLOC_CACHE; |
f3b44f92 JA |
860 | |
861 | /* | |
862 | * If the file is marked O_NONBLOCK, still allow retry for it if it | |
863 | * supports async. Otherwise it's impossible to use O_NONBLOCK files | |
864 | * reliably. If not, or it IOCB_NOWAIT is set, don't retry. | |
865 | */ | |
f7c91343 | 866 | if (kiocb->ki_flags & IOCB_NOWAIT || |
ae6a888a | 867 | ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT)))) |
f3b44f92 JA |
868 | req->flags |= REQ_F_NOWAIT; |
869 | ||
870 | if (ctx->flags & IORING_SETUP_IOPOLL) { | |
871 | if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) | |
872 | return -EOPNOTSUPP; | |
f3b44f92 | 873 | kiocb->private = NULL; |
12e4e8c7 | 874 | kiocb->ki_flags |= IOCB_HIPRI; |
f3b44f92 | 875 | req->iopoll_completed = 0; |
01ee194d | 876 | if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) { |
877 | /* make sure every req only blocks once*/ | |
878 | req->flags &= ~REQ_F_IOPOLL_STATE; | |
879 | req->iopoll_start = ktime_get_ns(); | |
880 | } | |
f3b44f92 JA |
881 | } else { |
882 | if (kiocb->ki_flags & IOCB_HIPRI) | |
883 | return -EINVAL; | |
f3b44f92 JA |
884 | } |
885 | ||
1143be17 | 886 | if (req->flags & REQ_F_HAS_METADATA) { |
59a7d12a AG |
887 | struct io_async_rw *io = req->async_data; |
888 | ||
889 | /* | |
890 | * We have a union of meta fields with wpq used for buffered-io | |
891 | * in io_async_rw, so fail it here. | |
892 | */ | |
893 | if (!(req->file->f_flags & O_DIRECT)) | |
894 | return -EOPNOTSUPP; | |
1143be17 | 895 | kiocb->ki_flags |= IOCB_HAS_METADATA; |
59a7d12a AG |
896 | kiocb->private = &io->meta; |
897 | } | |
898 | ||
f3b44f92 JA |
899 | return 0; |
900 | } | |
901 | ||
a08d195b | 902 | static int __io_read(struct io_kiocb *req, unsigned int issue_flags) |
f3b44f92 | 903 | { |
a9165b83 | 904 | bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; |
f2ccb5ae | 905 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
a9165b83 | 906 | struct io_async_rw *io = req->async_data; |
f3b44f92 | 907 | struct kiocb *kiocb = &rw->kiocb; |
a9165b83 | 908 | ssize_t ret; |
f3b44f92 JA |
909 | loff_t *ppos; |
910 | ||
835c4bdf PB |
911 | if (req->flags & REQ_F_IMPORT_BUFFER) { |
912 | ret = io_rw_import_reg_vec(req, io, ITER_DEST, issue_flags); | |
913 | if (unlikely(ret)) | |
914 | return ret; | |
915 | } else if (io_do_buffer_select(req)) { | |
74c94249 | 916 | ret = io_import_rw_buffer(ITER_DEST, req, io, issue_flags); |
f3b44f92 JA |
917 | if (unlikely(ret < 0)) |
918 | return ret; | |
f3b44f92 | 919 | } |
c34fc6f2 | 920 | ret = io_rw_init_file(req, FMODE_READ, READ); |
a9165b83 | 921 | if (unlikely(ret)) |
f3b44f92 | 922 | return ret; |
0d10bd77 | 923 | req->cqe.res = iov_iter_count(&io->iter); |
f3b44f92 JA |
924 | |
925 | if (force_nonblock) { | |
926 | /* If the file doesn't support async, just async punt */ | |
f7c91343 | 927 | if (unlikely(!io_file_supports_nowait(req, EPOLLIN))) |
a9165b83 | 928 | return -EAGAIN; |
f3b44f92 JA |
929 | kiocb->ki_flags |= IOCB_NOWAIT; |
930 | } else { | |
931 | /* Ensure we clear previously set non-block flag */ | |
932 | kiocb->ki_flags &= ~IOCB_NOWAIT; | |
933 | } | |
934 | ||
935 | ppos = io_kiocb_update_pos(req); | |
936 | ||
937 | ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); | |
a9165b83 | 938 | if (unlikely(ret)) |
f3b44f92 | 939 | return ret; |
f3b44f92 | 940 | |
4614de74 | 941 | ret = io_iter_do_read(rw, &io->iter); |
f3b44f92 | 942 | |
c0a9d496 JA |
943 | /* |
944 | * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT | |
945 | * issue, even though they should be returning -EAGAIN. To be safe, | |
946 | * retry from blocking context for either. | |
947 | */ | |
948 | if (ret == -EOPNOTSUPP && force_nonblock) | |
949 | ret = -EAGAIN; | |
950 | ||
d803d123 | 951 | if (ret == -EAGAIN) { |
a9165b83 JA |
952 | /* If we can poll, just do that. */ |
953 | if (io_file_can_poll(req)) | |
f3b44f92 JA |
954 | return -EAGAIN; |
955 | /* IOPOLL retry should happen for io-wq threads */ | |
956 | if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) | |
957 | goto done; | |
958 | /* no retry on NONBLOCK nor RWF_NOWAIT */ | |
959 | if (req->flags & REQ_F_NOWAIT) | |
960 | goto done; | |
961 | ret = 0; | |
962 | } else if (ret == -EIOCBQUEUED) { | |
df9830d8 | 963 | return IOU_ISSUE_SKIP_COMPLETE; |
f3b44f92 | 964 | } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || |
67b0025d PB |
965 | (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) || |
966 | (issue_flags & IO_URING_F_MULTISHOT)) { | |
f3b44f92 JA |
967 | /* read all, failed, already did sync or don't want to retry */ |
968 | goto done; | |
969 | } | |
970 | ||
971 | /* | |
972 | * Don't depend on the iter state matching what was consumed, or being | |
973 | * untouched in case of error. Restore it and we'll advance it | |
974 | * manually if we need to. | |
975 | */ | |
0d10bd77 | 976 | iov_iter_restore(&io->iter, &io->iter_state); |
59a7d12a | 977 | io_meta_restore(io, kiocb); |
f3b44f92 JA |
978 | |
979 | do { | |
980 | /* | |
981 | * We end up here because of a partial read, either from | |
982 | * above or inside this loop. Advance the iter by the bytes | |
983 | * that were consumed. | |
984 | */ | |
0d10bd77 JA |
985 | iov_iter_advance(&io->iter, ret); |
986 | if (!iov_iter_count(&io->iter)) | |
f3b44f92 JA |
987 | break; |
988 | io->bytes_done += ret; | |
0d10bd77 | 989 | iov_iter_save_state(&io->iter, &io->iter_state); |
f3b44f92 JA |
990 | |
991 | /* if we can retry, do so with the callbacks armed */ | |
992 | if (!io_rw_should_retry(req)) { | |
993 | kiocb->ki_flags &= ~IOCB_WAITQ; | |
994 | return -EAGAIN; | |
995 | } | |
996 | ||
0d10bd77 | 997 | req->cqe.res = iov_iter_count(&io->iter); |
f3b44f92 JA |
998 | /* |
999 | * Now retry read with the IOCB_WAITQ parts set in the iocb. If | |
1000 | * we get -EIOCBQUEUED, then we'll get a notification when the | |
1001 | * desired page gets unlocked. We can also get a partial read | |
1002 | * here, and if we do, then just retry at the new offset. | |
1003 | */ | |
0d10bd77 | 1004 | ret = io_iter_do_read(rw, &io->iter); |
f3b44f92 JA |
1005 | if (ret == -EIOCBQUEUED) |
1006 | return IOU_ISSUE_SKIP_COMPLETE; | |
1007 | /* we got some bytes, but not all. retry. */ | |
1008 | kiocb->ki_flags &= ~IOCB_WAITQ; | |
0d10bd77 | 1009 | iov_iter_restore(&io->iter, &io->iter_state); |
f3b44f92 JA |
1010 | } while (ret > 0); |
1011 | done: | |
f3b44f92 | 1012 | /* it's faster to check here then delegate to kfree */ |
a08d195b JA |
1013 | return ret; |
1014 | } | |
1015 | ||
1016 | int io_read(struct io_kiocb *req, unsigned int issue_flags) | |
1017 | { | |
1018 | int ret; | |
1019 | ||
1020 | ret = __io_read(req, issue_flags); | |
1021 | if (ret >= 0) | |
1022 | return kiocb_done(req, ret, issue_flags); | |
1023 | ||
1024 | return ret; | |
f3b44f92 JA |
1025 | } |
1026 | ||
fc68fcda JA |
1027 | int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) |
1028 | { | |
e5375929 | 1029 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
fc68fcda JA |
1030 | unsigned int cflags = 0; |
1031 | int ret; | |
1032 | ||
1033 | /* | |
1034 | * Multishot MUST be used on a pollable file | |
1035 | */ | |
95041b93 | 1036 | if (!io_file_can_poll(req)) |
fc68fcda JA |
1037 | return -EBADFD; |
1038 | ||
4614de74 PB |
1039 | /* make it sync, multishot doesn't support async execution */ |
1040 | rw->kiocb.ki_complete = NULL; | |
fc68fcda JA |
1041 | ret = __io_read(req, issue_flags); |
1042 | ||
1043 | /* | |
1044 | * If we get -EAGAIN, recycle our buffer and just let normal poll | |
1045 | * handling arm it. | |
1046 | */ | |
1047 | if (ret == -EAGAIN) { | |
e5375929 DY |
1048 | /* |
1049 | * Reset rw->len to 0 again to avoid clamping future mshot | |
1050 | * reads, in case the buffer size varies. | |
1051 | */ | |
1052 | if (io_kbuf_recycle(req, issue_flags)) | |
1053 | rw->len = 0; | |
7a9dcb05 | 1054 | return IOU_RETRY; |
c9d952b9 JA |
1055 | } else if (ret <= 0) { |
1056 | io_kbuf_recycle(req, issue_flags); | |
1057 | if (ret < 0) | |
1058 | req_set_fail(req); | |
38fc96a5 PB |
1059 | } else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { |
1060 | cflags = io_put_kbuf(req, ret, issue_flags); | |
c9d952b9 | 1061 | } else { |
fc68fcda | 1062 | /* |
c9d952b9 JA |
1063 | * Any successful return value will keep the multishot read |
1064 | * armed, if it's still set. Put our buffer and post a CQE. If | |
1065 | * we fail to post a CQE, or multishot is no longer set, then | |
fc68fcda JA |
1066 | * jump to the termination path. This request is then done. |
1067 | */ | |
6733e678 | 1068 | cflags = io_put_kbuf(req, ret, issue_flags); |
e5375929 | 1069 | rw->len = 0; /* similarly to above, reset len to 0 */ |
fc68fcda | 1070 | |
e5c12945 | 1071 | if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { |
7a9dcb05 | 1072 | if (issue_flags & IO_URING_F_MULTISHOT) |
c79f52f0 JA |
1073 | /* |
1074 | * Force retry, as we might have more data to | |
1075 | * be read and otherwise it won't get retried | |
1076 | * until (if ever) another poll is triggered. | |
1077 | */ | |
1078 | io_poll_multishot_retry(req); | |
7a9dcb05 PB |
1079 | |
1080 | return IOU_RETRY; | |
fc68fcda JA |
1081 | } |
1082 | } | |
1083 | ||
1084 | /* | |
1085 | * Either an error, or we've hit overflow posting the CQE. For any | |
1086 | * multishot request, hitting overflow will terminate it. | |
1087 | */ | |
1088 | io_req_set_res(req, ret, cflags); | |
a9165b83 | 1089 | io_req_rw_cleanup(req, issue_flags); |
5027d024 | 1090 | return IOU_COMPLETE; |
f3b44f92 JA |
1091 | } |
1092 | ||
1d60d74e JA |
1093 | static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb) |
1094 | { | |
1095 | struct inode *inode; | |
1096 | bool ret; | |
1097 | ||
1098 | if (!(req->flags & REQ_F_ISREG)) | |
1099 | return true; | |
1100 | if (!(kiocb->ki_flags & IOCB_NOWAIT)) { | |
1101 | kiocb_start_write(kiocb); | |
1102 | return true; | |
1103 | } | |
1104 | ||
1105 | inode = file_inode(kiocb->ki_filp); | |
1106 | ret = sb_start_write_trylock(inode->i_sb); | |
1107 | if (ret) | |
1108 | __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE); | |
1109 | return ret; | |
1110 | } | |
1111 | ||
f3b44f92 JA |
1112 | int io_write(struct io_kiocb *req, unsigned int issue_flags) |
1113 | { | |
a9165b83 | 1114 | bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; |
f2ccb5ae | 1115 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
a9165b83 | 1116 | struct io_async_rw *io = req->async_data; |
f3b44f92 | 1117 | struct kiocb *kiocb = &rw->kiocb; |
f3b44f92 JA |
1118 | ssize_t ret, ret2; |
1119 | loff_t *ppos; | |
1120 | ||
835c4bdf PB |
1121 | if (req->flags & REQ_F_IMPORT_BUFFER) { |
1122 | ret = io_rw_import_reg_vec(req, io, ITER_SOURCE, issue_flags); | |
1123 | if (unlikely(ret)) | |
1124 | return ret; | |
1125 | } | |
1126 | ||
c34fc6f2 | 1127 | ret = io_rw_init_file(req, FMODE_WRITE, WRITE); |
a9165b83 | 1128 | if (unlikely(ret)) |
f3b44f92 | 1129 | return ret; |
0d10bd77 | 1130 | req->cqe.res = iov_iter_count(&io->iter); |
f3b44f92 JA |
1131 | |
1132 | if (force_nonblock) { | |
1133 | /* If the file doesn't support async, just async punt */ | |
f7c91343 | 1134 | if (unlikely(!io_file_supports_nowait(req, EPOLLOUT))) |
a9165b83 | 1135 | goto ret_eagain; |
f3b44f92 | 1136 | |
210a03c9 | 1137 | /* Check if we can support NOWAIT. */ |
4e17aaab | 1138 | if (!(kiocb->ki_flags & IOCB_DIRECT) && |
210a03c9 CB |
1139 | !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) && |
1140 | (req->flags & REQ_F_ISREG)) | |
a9165b83 | 1141 | goto ret_eagain; |
f3b44f92 JA |
1142 | |
1143 | kiocb->ki_flags |= IOCB_NOWAIT; | |
1144 | } else { | |
1145 | /* Ensure we clear previously set non-block flag */ | |
1146 | kiocb->ki_flags &= ~IOCB_NOWAIT; | |
1147 | } | |
1148 | ||
1149 | ppos = io_kiocb_update_pos(req); | |
1150 | ||
1151 | ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); | |
a9165b83 | 1152 | if (unlikely(ret)) |
df9830d8 | 1153 | return ret; |
f3b44f92 | 1154 | |
1d60d74e JA |
1155 | if (unlikely(!io_kiocb_start_write(req, kiocb))) |
1156 | return -EAGAIN; | |
f3b44f92 JA |
1157 | kiocb->ki_flags |= IOCB_WRITE; |
1158 | ||
1159 | if (likely(req->file->f_op->write_iter)) | |
b6394d6f | 1160 | ret2 = req->file->f_op->write_iter(kiocb, &io->iter); |
f3b44f92 | 1161 | else if (req->file->f_op->write) |
0d10bd77 | 1162 | ret2 = loop_rw_iter(WRITE, rw, &io->iter); |
f3b44f92 JA |
1163 | else |
1164 | ret2 = -EINVAL; | |
1165 | ||
f3b44f92 JA |
1166 | /* |
1167 | * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just | |
1168 | * retry them without IOCB_NOWAIT. | |
1169 | */ | |
1170 | if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) | |
1171 | ret2 = -EAGAIN; | |
1172 | /* no retry on NONBLOCK nor RWF_NOWAIT */ | |
1173 | if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) | |
1174 | goto done; | |
1175 | if (!force_nonblock || ret2 != -EAGAIN) { | |
1176 | /* IOPOLL retry should happen for io-wq threads */ | |
1177 | if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) | |
a9165b83 | 1178 | goto ret_eagain; |
4e17aaab SR |
1179 | |
1180 | if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { | |
1c849b48 SR |
1181 | trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, |
1182 | req->cqe.res, ret2); | |
1183 | ||
4e17aaab SR |
1184 | /* This is a partial write. The file pos has already been |
1185 | * updated, setup the async struct to complete the request | |
1186 | * in the worker. Also update bytes_done to account for | |
1187 | * the bytes already written. | |
1188 | */ | |
0d10bd77 | 1189 | iov_iter_save_state(&io->iter, &io->iter_state); |
a9165b83 | 1190 | io->bytes_done += ret2; |
4e17aaab | 1191 | |
e053aaf4 | 1192 | if (kiocb->ki_flags & IOCB_WRITE) |
a370167f | 1193 | io_req_end_write(req); |
a9165b83 | 1194 | return -EAGAIN; |
4e17aaab | 1195 | } |
f3b44f92 | 1196 | done: |
cca65713 | 1197 | return kiocb_done(req, ret2, issue_flags); |
f3b44f92 | 1198 | } else { |
a9165b83 | 1199 | ret_eagain: |
0d10bd77 | 1200 | iov_iter_restore(&io->iter, &io->iter_state); |
59a7d12a | 1201 | io_meta_restore(io, kiocb); |
a9165b83 JA |
1202 | if (kiocb->ki_flags & IOCB_WRITE) |
1203 | io_req_end_write(req); | |
1204 | return -EAGAIN; | |
f3b44f92 | 1205 | } |
f3b44f92 JA |
1206 | } |
1207 | ||
ff92d824 KB |
1208 | int io_read_fixed(struct io_kiocb *req, unsigned int issue_flags) |
1209 | { | |
1210 | int ret; | |
1211 | ||
1212 | ret = io_init_rw_fixed(req, issue_flags, ITER_DEST); | |
1213 | if (unlikely(ret)) | |
1214 | return ret; | |
1215 | ||
1216 | return io_read(req, issue_flags); | |
1217 | } | |
1218 | ||
1219 | int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags) | |
1220 | { | |
1221 | int ret; | |
1222 | ||
1223 | ret = io_init_rw_fixed(req, issue_flags, ITER_SOURCE); | |
1224 | if (unlikely(ret)) | |
1225 | return ret; | |
1226 | ||
1227 | return io_write(req, issue_flags); | |
1228 | } | |
1229 | ||
47b4c686 PB |
1230 | void io_rw_fail(struct io_kiocb *req) |
1231 | { | |
1232 | int res; | |
1233 | ||
1234 | res = io_fixup_rw_res(req, req->cqe.res); | |
1235 | io_req_set_res(req, res, req->cqe.flags); | |
1236 | } | |
1237 | ||
01ee194d | 1238 | static int io_uring_classic_poll(struct io_kiocb *req, struct io_comp_batch *iob, |
1239 | unsigned int poll_flags) | |
1240 | { | |
1241 | struct file *file = req->file; | |
1242 | ||
1243 | if (req->opcode == IORING_OP_URING_CMD) { | |
1244 | struct io_uring_cmd *ioucmd; | |
1245 | ||
1246 | ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); | |
1247 | return file->f_op->uring_cmd_iopoll(ioucmd, iob, poll_flags); | |
1248 | } else { | |
1249 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); | |
1250 | ||
1251 | return file->f_op->iopoll(&rw->kiocb, iob, poll_flags); | |
1252 | } | |
1253 | } | |
1254 | ||
1255 | static u64 io_hybrid_iopoll_delay(struct io_ring_ctx *ctx, struct io_kiocb *req) | |
1256 | { | |
1257 | struct hrtimer_sleeper timer; | |
1258 | enum hrtimer_mode mode; | |
1259 | ktime_t kt; | |
1260 | u64 sleep_time; | |
1261 | ||
1262 | if (req->flags & REQ_F_IOPOLL_STATE) | |
1263 | return 0; | |
1264 | ||
1265 | if (ctx->hybrid_poll_time == LLONG_MAX) | |
1266 | return 0; | |
1267 | ||
1268 | /* Using half the running time to do schedule */ | |
1269 | sleep_time = ctx->hybrid_poll_time / 2; | |
1270 | ||
1271 | kt = ktime_set(0, sleep_time); | |
1272 | req->flags |= REQ_F_IOPOLL_STATE; | |
1273 | ||
1274 | mode = HRTIMER_MODE_REL; | |
bf9aa14f | 1275 | hrtimer_setup_sleeper_on_stack(&timer, CLOCK_MONOTONIC, mode); |
01ee194d | 1276 | hrtimer_set_expires(&timer.timer, kt); |
1277 | set_current_state(TASK_INTERRUPTIBLE); | |
1278 | hrtimer_sleeper_start_expires(&timer, mode); | |
1279 | ||
1280 | if (timer.task) | |
1281 | io_schedule(); | |
1282 | ||
1283 | hrtimer_cancel(&timer.timer); | |
1284 | __set_current_state(TASK_RUNNING); | |
1285 | destroy_hrtimer_on_stack(&timer.timer); | |
1286 | return sleep_time; | |
1287 | } | |
1288 | ||
1289 | static int io_uring_hybrid_poll(struct io_kiocb *req, | |
1290 | struct io_comp_batch *iob, unsigned int poll_flags) | |
1291 | { | |
1292 | struct io_ring_ctx *ctx = req->ctx; | |
1293 | u64 runtime, sleep_time; | |
1294 | int ret; | |
1295 | ||
1296 | sleep_time = io_hybrid_iopoll_delay(ctx, req); | |
1297 | ret = io_uring_classic_poll(req, iob, poll_flags); | |
1298 | runtime = ktime_get_ns() - req->iopoll_start - sleep_time; | |
1299 | ||
1300 | /* | |
1301 | * Use minimum sleep time if we're polling devices with different | |
1302 | * latencies. We could get more completions from the faster ones. | |
1303 | */ | |
1304 | if (ctx->hybrid_poll_time > runtime) | |
1305 | ctx->hybrid_poll_time = runtime; | |
1306 | ||
1307 | return ret; | |
1308 | } | |
1309 | ||
f3b44f92 JA |
1310 | int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) |
1311 | { | |
1312 | struct io_wq_work_node *pos, *start, *prev; | |
54bdd67d | 1313 | unsigned int poll_flags = 0; |
f3b44f92 JA |
1314 | DEFINE_IO_COMP_BATCH(iob); |
1315 | int nr_events = 0; | |
1316 | ||
1317 | /* | |
1318 | * Only spin for completions if we don't have multiple devices hanging | |
1319 | * off our complete list. | |
1320 | */ | |
1321 | if (ctx->poll_multi_queue || force_nonspin) | |
1322 | poll_flags |= BLK_POLL_ONESHOT; | |
1323 | ||
1324 | wq_list_for_each(pos, start, &ctx->iopoll_list) { | |
1325 | struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); | |
f3b44f92 JA |
1326 | int ret; |
1327 | ||
1328 | /* | |
1329 | * Move completed and retryable entries to our local lists. | |
1330 | * If we find a request that requires polling, break out | |
1331 | * and complete those lists first, if we have entries there. | |
1332 | */ | |
1333 | if (READ_ONCE(req->iopoll_completed)) | |
1334 | break; | |
1335 | ||
01ee194d | 1336 | if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) |
1337 | ret = io_uring_hybrid_poll(req, &iob, poll_flags); | |
1338 | else | |
1339 | ret = io_uring_classic_poll(req, &iob, poll_flags); | |
a1119fb0 | 1340 | |
f3b44f92 JA |
1341 | if (unlikely(ret < 0)) |
1342 | return ret; | |
1343 | else if (ret) | |
1344 | poll_flags |= BLK_POLL_ONESHOT; | |
1345 | ||
1346 | /* iopoll may have completed current req */ | |
a3396b99 | 1347 | if (!rq_list_empty(&iob.req_list) || |
f3b44f92 JA |
1348 | READ_ONCE(req->iopoll_completed)) |
1349 | break; | |
1350 | } | |
1351 | ||
a3396b99 | 1352 | if (!rq_list_empty(&iob.req_list)) |
f3b44f92 JA |
1353 | iob.complete(&iob); |
1354 | else if (!pos) | |
1355 | return 0; | |
1356 | ||
1357 | prev = start; | |
1358 | wq_list_for_each_resume(pos, prev) { | |
1359 | struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); | |
1360 | ||
1361 | /* order with io_complete_rw_iopoll(), e.g. ->result updates */ | |
1362 | if (!smp_load_acquire(&req->iopoll_completed)) | |
1363 | break; | |
1364 | nr_events++; | |
6733e678 | 1365 | req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0); |
a9165b83 JA |
1366 | if (req->opcode != IORING_OP_URING_CMD) |
1367 | io_req_rw_cleanup(req, 0); | |
f3b44f92 | 1368 | } |
f3b44f92 JA |
1369 | if (unlikely(!nr_events)) |
1370 | return 0; | |
1371 | ||
f3b44f92 JA |
1372 | pos = start ? start->next : ctx->iopoll_list.first; |
1373 | wq_list_cut(&ctx->iopoll_list, prev, start); | |
ec26c225 PB |
1374 | |
1375 | if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs))) | |
1376 | return 0; | |
1377 | ctx->submit_state.compl_reqs.first = pos; | |
1378 | __io_submit_flush_completions(ctx); | |
f3b44f92 JA |
1379 | return nr_events; |
1380 | } | |
a9165b83 | 1381 | |
414d0f45 | 1382 | void io_rw_cache_free(const void *entry) |
a9165b83 | 1383 | { |
414d0f45 | 1384 | struct io_async_rw *rw = (struct io_async_rw *) entry; |
a9165b83 | 1385 | |
e1d49959 | 1386 | io_vec_free(&rw->vec); |
a9165b83 JA |
1387 | kfree(rw); |
1388 | } |