Commit | Line | Data |
---|---|---|
f3b44f92 JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/kernel.h> | |
3 | #include <linux/errno.h> | |
4 | #include <linux/fs.h> | |
5 | #include <linux/file.h> | |
6 | #include <linux/blk-mq.h> | |
7 | #include <linux/mm.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/fsnotify.h> | |
10 | #include <linux/poll.h> | |
11 | #include <linux/nospec.h> | |
12 | #include <linux/compat.h> | |
b66509b8 | 13 | #include <linux/io_uring/cmd.h> |
3fb1764c | 14 | #include <linux/indirect_call_wrapper.h> |
f3b44f92 JA |
15 | |
16 | #include <uapi/linux/io_uring.h> | |
17 | ||
f3b44f92 JA |
18 | #include "io_uring.h" |
19 | #include "opdef.h" | |
20 | #include "kbuf.h" | |
414d0f45 | 21 | #include "alloc_cache.h" |
f3b44f92 | 22 | #include "rsrc.h" |
c79f52f0 | 23 | #include "poll.h" |
f3b44f92 JA |
24 | #include "rw.h" |
25 | ||
26 | struct io_rw { | |
27 | /* NOTE: kiocb has the file as the first member, so don't do it here */ | |
28 | struct kiocb kiocb; | |
29 | u64 addr; | |
30 | u32 len; | |
31 | rwf_t flags; | |
32 | }; | |
33 | ||
34 | static inline bool io_file_supports_nowait(struct io_kiocb *req) | |
35 | { | |
36 | return req->flags & REQ_F_SUPPORT_NOWAIT; | |
37 | } | |
38 | ||
4ab9d465 DY |
39 | #ifdef CONFIG_COMPAT |
40 | static int io_iov_compat_buffer_select_prep(struct io_rw *rw) | |
41 | { | |
42 | struct compat_iovec __user *uiov; | |
43 | compat_ssize_t clen; | |
44 | ||
45 | uiov = u64_to_user_ptr(rw->addr); | |
46 | if (!access_ok(uiov, sizeof(*uiov))) | |
47 | return -EFAULT; | |
48 | if (__get_user(clen, &uiov->iov_len)) | |
49 | return -EFAULT; | |
50 | if (clen < 0) | |
51 | return -EINVAL; | |
52 | ||
53 | rw->len = clen; | |
54 | return 0; | |
55 | } | |
56 | #endif | |
57 | ||
58 | static int io_iov_buffer_select_prep(struct io_kiocb *req) | |
59 | { | |
60 | struct iovec __user *uiov; | |
61 | struct iovec iov; | |
62 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); | |
63 | ||
64 | if (rw->len != 1) | |
65 | return -EINVAL; | |
66 | ||
67 | #ifdef CONFIG_COMPAT | |
68 | if (req->ctx->compat) | |
69 | return io_iov_compat_buffer_select_prep(rw); | |
70 | #endif | |
71 | ||
72 | uiov = u64_to_user_ptr(rw->addr); | |
73 | if (copy_from_user(&iov, uiov, sizeof(*uiov))) | |
74 | return -EFAULT; | |
75 | rw->len = iov.iov_len; | |
76 | return 0; | |
77 | } | |
78 | ||
a9165b83 JA |
79 | static int __io_import_iovec(int ddir, struct io_kiocb *req, |
80 | struct io_async_rw *io, | |
81 | unsigned int issue_flags) | |
82 | { | |
83 | const struct io_issue_def *def = &io_issue_defs[req->opcode]; | |
84 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); | |
d6f911a6 | 85 | struct iovec *iov; |
a9165b83 | 86 | void __user *buf; |
d6f911a6 | 87 | int nr_segs, ret; |
a9165b83 JA |
88 | size_t sqe_len; |
89 | ||
90 | buf = u64_to_user_ptr(rw->addr); | |
91 | sqe_len = rw->len; | |
92 | ||
93 | if (!def->vectored || req->flags & REQ_F_BUFFER_SELECT) { | |
94 | if (io_do_buffer_select(req)) { | |
95 | buf = io_buffer_select(req, &sqe_len, issue_flags); | |
96 | if (!buf) | |
97 | return -ENOBUFS; | |
98 | rw->addr = (unsigned long) buf; | |
99 | rw->len = sqe_len; | |
100 | } | |
101 | ||
0d10bd77 | 102 | return import_ubuf(ddir, buf, sqe_len, &io->iter); |
a9165b83 JA |
103 | } |
104 | ||
d6f911a6 JA |
105 | if (io->free_iovec) { |
106 | nr_segs = io->free_iov_nr; | |
107 | iov = io->free_iovec; | |
108 | } else { | |
109 | iov = &io->fast_iov; | |
110 | nr_segs = 1; | |
111 | } | |
112 | ret = __import_iovec(ddir, buf, sqe_len, nr_segs, &iov, &io->iter, | |
113 | req->ctx->compat); | |
114 | if (unlikely(ret < 0)) | |
115 | return ret; | |
116 | if (iov) { | |
117 | req->flags |= REQ_F_NEED_CLEANUP; | |
118 | io->free_iov_nr = io->iter.nr_segs; | |
119 | kfree(io->free_iovec); | |
120 | io->free_iovec = iov; | |
121 | } | |
122 | return 0; | |
a9165b83 JA |
123 | } |
124 | ||
125 | static inline int io_import_iovec(int rw, struct io_kiocb *req, | |
126 | struct io_async_rw *io, | |
127 | unsigned int issue_flags) | |
128 | { | |
129 | int ret; | |
130 | ||
131 | ret = __io_import_iovec(rw, req, io, issue_flags); | |
132 | if (unlikely(ret < 0)) | |
133 | return ret; | |
134 | ||
0d10bd77 | 135 | iov_iter_save_state(&io->iter, &io->iter_state); |
a9165b83 JA |
136 | return 0; |
137 | } | |
138 | ||
139 | static void io_rw_iovec_free(struct io_async_rw *rw) | |
140 | { | |
141 | if (rw->free_iovec) { | |
142 | kfree(rw->free_iovec); | |
d6f911a6 | 143 | rw->free_iov_nr = 0; |
a9165b83 JA |
144 | rw->free_iovec = NULL; |
145 | } | |
146 | } | |
147 | ||
148 | static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags) | |
149 | { | |
150 | struct io_async_rw *rw = req->async_data; | |
d6f911a6 | 151 | struct iovec *iov; |
a9165b83 JA |
152 | |
153 | if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) { | |
154 | io_rw_iovec_free(rw); | |
155 | return; | |
156 | } | |
d6f911a6 | 157 | iov = rw->free_iovec; |
414d0f45 | 158 | if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) { |
d6f911a6 JA |
159 | if (iov) |
160 | kasan_mempool_poison_object(iov); | |
a9165b83 JA |
161 | req->async_data = NULL; |
162 | req->flags &= ~REQ_F_ASYNC_DATA; | |
163 | } | |
164 | } | |
165 | ||
166 | static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags) | |
167 | { | |
168 | /* | |
169 | * Disable quick recycling for anything that's gone through io-wq. | |
170 | * In theory, this should be fine to cleanup. However, some read or | |
171 | * write iter handling touches the iovec AFTER having called into the | |
172 | * handler, eg to reexpand or revert. This means we can have: | |
173 | * | |
174 | * task io-wq | |
175 | * issue | |
176 | * punt to io-wq | |
177 | * issue | |
178 | * blkdev_write_iter() | |
179 | * ->ki_complete() | |
180 | * io_complete_rw() | |
181 | * queue tw complete | |
182 | * run tw | |
183 | * req_rw_cleanup | |
184 | * iov_iter_count() <- look at iov_iter again | |
185 | * | |
186 | * which can lead to a UAF. This is only possible for io-wq offload | |
187 | * as the cleanup can run in parallel. As io-wq is not the fast path, | |
188 | * just leave cleanup to the end. | |
189 | * | |
190 | * This is really a bug in the core code that does this, any issue | |
191 | * path should assume that a successful (or -EIOCBQUEUED) return can | |
192 | * mean that the underlying data can be gone at any time. But that | |
193 | * should be fixed seperately, and then this check could be killed. | |
194 | */ | |
195 | if (!(req->flags & REQ_F_REFCOUNT)) { | |
196 | req->flags &= ~REQ_F_NEED_CLEANUP; | |
197 | io_rw_recycle(req, issue_flags); | |
198 | } | |
199 | } | |
200 | ||
201 | static int io_rw_alloc_async(struct io_kiocb *req) | |
202 | { | |
203 | struct io_ring_ctx *ctx = req->ctx; | |
a9165b83 JA |
204 | struct io_async_rw *rw; |
205 | ||
414d0f45 JA |
206 | rw = io_alloc_cache_get(&ctx->rw_cache); |
207 | if (rw) { | |
d6f911a6 JA |
208 | if (rw->free_iovec) { |
209 | kasan_mempool_unpoison_object(rw->free_iovec, | |
210 | rw->free_iov_nr * sizeof(struct iovec)); | |
211 | req->flags |= REQ_F_NEED_CLEANUP; | |
212 | } | |
a9165b83 JA |
213 | req->flags |= REQ_F_ASYNC_DATA; |
214 | req->async_data = rw; | |
215 | goto done; | |
216 | } | |
217 | ||
218 | if (!io_alloc_async_data(req)) { | |
219 | rw = req->async_data; | |
a9165b83 | 220 | rw->free_iovec = NULL; |
d6f911a6 JA |
221 | rw->free_iov_nr = 0; |
222 | done: | |
a9165b83 JA |
223 | rw->bytes_done = 0; |
224 | return 0; | |
225 | } | |
226 | ||
227 | return -ENOMEM; | |
228 | } | |
229 | ||
230 | static int io_prep_rw_setup(struct io_kiocb *req, int ddir, bool do_import) | |
231 | { | |
232 | struct io_async_rw *rw; | |
233 | int ret; | |
234 | ||
235 | if (io_rw_alloc_async(req)) | |
236 | return -ENOMEM; | |
237 | ||
238 | if (!do_import || io_do_buffer_select(req)) | |
239 | return 0; | |
240 | ||
241 | rw = req->async_data; | |
242 | ret = io_import_iovec(ddir, req, rw, 0); | |
243 | if (unlikely(ret < 0)) | |
244 | return ret; | |
245 | ||
0d10bd77 | 246 | iov_iter_save_state(&rw->iter, &rw->iter_state); |
a9165b83 JA |
247 | return 0; |
248 | } | |
249 | ||
250 | static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, | |
251 | int ddir, bool do_import) | |
f3b44f92 | 252 | { |
f2ccb5ae | 253 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
254 | unsigned ioprio; |
255 | int ret; | |
256 | ||
257 | rw->kiocb.ki_pos = READ_ONCE(sqe->off); | |
258 | /* used for fixed read/write too - just read unconditionally */ | |
259 | req->buf_index = READ_ONCE(sqe->buf_index); | |
260 | ||
f3b44f92 JA |
261 | ioprio = READ_ONCE(sqe->ioprio); |
262 | if (ioprio) { | |
263 | ret = ioprio_check_cap(ioprio); | |
264 | if (ret) | |
265 | return ret; | |
266 | ||
267 | rw->kiocb.ki_ioprio = ioprio; | |
268 | } else { | |
269 | rw->kiocb.ki_ioprio = get_current_ioprio(); | |
270 | } | |
099ada2c | 271 | rw->kiocb.dio_complete = NULL; |
f3b44f92 JA |
272 | |
273 | rw->addr = READ_ONCE(sqe->addr); | |
274 | rw->len = READ_ONCE(sqe->len); | |
275 | rw->flags = READ_ONCE(sqe->rw_flags); | |
a9165b83 | 276 | return io_prep_rw_setup(req, ddir, do_import); |
0e984ec8 JA |
277 | } |
278 | ||
a9165b83 | 279 | int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
0e984ec8 | 280 | { |
a9165b83 | 281 | return io_prep_rw(req, sqe, ITER_DEST, true); |
0e984ec8 JA |
282 | } |
283 | ||
a9165b83 | 284 | int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
0e984ec8 | 285 | { |
a9165b83 JA |
286 | return io_prep_rw(req, sqe, ITER_SOURCE, true); |
287 | } | |
288 | ||
289 | static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe, | |
290 | int ddir) | |
291 | { | |
292 | const bool do_import = !(req->flags & REQ_F_BUFFER_SELECT); | |
0e984ec8 | 293 | int ret; |
4ab9d465 | 294 | |
a9165b83 | 295 | ret = io_prep_rw(req, sqe, ddir, do_import); |
0e984ec8 JA |
296 | if (unlikely(ret)) |
297 | return ret; | |
a9165b83 JA |
298 | if (do_import) |
299 | return 0; | |
0e984ec8 JA |
300 | |
301 | /* | |
302 | * Have to do this validation here, as this is in io_read() rw->len | |
303 | * might have chanaged due to buffer selection | |
4ab9d465 | 304 | */ |
a9165b83 JA |
305 | return io_iov_buffer_select_prep(req); |
306 | } | |
4ab9d465 | 307 | |
a9165b83 JA |
308 | int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
309 | { | |
310 | return io_prep_rwv(req, sqe, ITER_DEST); | |
311 | } | |
312 | ||
313 | int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
314 | { | |
315 | return io_prep_rwv(req, sqe, ITER_SOURCE); | |
f3b44f92 JA |
316 | } |
317 | ||
a9165b83 JA |
318 | static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe, |
319 | int ddir) | |
f688944c | 320 | { |
a9165b83 | 321 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f688944c | 322 | struct io_ring_ctx *ctx = req->ctx; |
a9165b83 | 323 | struct io_async_rw *io; |
f688944c JA |
324 | u16 index; |
325 | int ret; | |
326 | ||
a9165b83 | 327 | ret = io_prep_rw(req, sqe, ddir, false); |
f688944c JA |
328 | if (unlikely(ret)) |
329 | return ret; | |
330 | ||
331 | if (unlikely(req->buf_index >= ctx->nr_user_bufs)) | |
332 | return -EFAULT; | |
333 | index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); | |
334 | req->imu = ctx->user_bufs[index]; | |
335 | io_req_set_rsrc_node(req, ctx, 0); | |
a9165b83 JA |
336 | |
337 | io = req->async_data; | |
0d10bd77 JA |
338 | ret = io_import_fixed(ddir, &io->iter, req->imu, rw->addr, rw->len); |
339 | iov_iter_save_state(&io->iter, &io->iter_state); | |
a9165b83 JA |
340 | return ret; |
341 | } | |
342 | ||
343 | int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
344 | { | |
345 | return io_prep_rw_fixed(req, sqe, ITER_DEST); | |
346 | } | |
347 | ||
348 | int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
349 | { | |
350 | return io_prep_rw_fixed(req, sqe, ITER_SOURCE); | |
f688944c JA |
351 | } |
352 | ||
fc68fcda JA |
353 | /* |
354 | * Multishot read is prepared just like a normal read/write request, only | |
355 | * difference is that we set the MULTISHOT flag. | |
356 | */ | |
357 | int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
358 | { | |
49fbe994 | 359 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
fc68fcda JA |
360 | int ret; |
361 | ||
0df96fb7 JA |
362 | /* must be used with provided buffers */ |
363 | if (!(req->flags & REQ_F_BUFFER_SELECT)) | |
364 | return -EINVAL; | |
365 | ||
a9165b83 | 366 | ret = io_prep_rw(req, sqe, ITER_DEST, false); |
fc68fcda JA |
367 | if (unlikely(ret)) |
368 | return ret; | |
369 | ||
49fbe994 DY |
370 | if (rw->addr || rw->len) |
371 | return -EINVAL; | |
372 | ||
fc68fcda JA |
373 | req->flags |= REQ_F_APOLL_MULTISHOT; |
374 | return 0; | |
375 | } | |
376 | ||
f3b44f92 JA |
377 | void io_readv_writev_cleanup(struct io_kiocb *req) |
378 | { | |
a9165b83 | 379 | io_rw_iovec_free(req->async_data); |
f3b44f92 JA |
380 | } |
381 | ||
f3b44f92 JA |
382 | static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) |
383 | { | |
f2ccb5ae | 384 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
385 | |
386 | if (rw->kiocb.ki_pos != -1) | |
387 | return &rw->kiocb.ki_pos; | |
388 | ||
389 | if (!(req->file->f_mode & FMODE_STREAM)) { | |
390 | req->flags |= REQ_F_CUR_POS; | |
391 | rw->kiocb.ki_pos = req->file->f_pos; | |
392 | return &rw->kiocb.ki_pos; | |
393 | } | |
394 | ||
395 | rw->kiocb.ki_pos = 0; | |
396 | return NULL; | |
397 | } | |
398 | ||
f3b44f92 | 399 | #ifdef CONFIG_BLOCK |
039a2e80 | 400 | static void io_resubmit_prep(struct io_kiocb *req) |
f3b44f92 JA |
401 | { |
402 | struct io_async_rw *io = req->async_data; | |
403 | ||
039a2e80 | 404 | iov_iter_restore(&io->iter, &io->iter_state); |
f3b44f92 JA |
405 | } |
406 | ||
407 | static bool io_rw_should_reissue(struct io_kiocb *req) | |
408 | { | |
409 | umode_t mode = file_inode(req->file)->i_mode; | |
410 | struct io_ring_ctx *ctx = req->ctx; | |
411 | ||
412 | if (!S_ISBLK(mode) && !S_ISREG(mode)) | |
413 | return false; | |
414 | if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && | |
415 | !(ctx->flags & IORING_SETUP_IOPOLL))) | |
416 | return false; | |
417 | /* | |
418 | * If ref is dying, we might be running poll reap from the exit work. | |
419 | * Don't attempt to reissue from that path, just let it fail with | |
420 | * -EAGAIN. | |
421 | */ | |
422 | if (percpu_ref_is_dying(&ctx->refs)) | |
423 | return false; | |
424 | /* | |
425 | * Play it safe and assume not safe to re-import and reissue if we're | |
426 | * not in the original thread group (or in task context). | |
427 | */ | |
428 | if (!same_thread_group(req->task, current) || !in_task()) | |
429 | return false; | |
430 | return true; | |
431 | } | |
432 | #else | |
039a2e80 | 433 | static void io_resubmit_prep(struct io_kiocb *req) |
f3b44f92 | 434 | { |
f3b44f92 JA |
435 | } |
436 | static bool io_rw_should_reissue(struct io_kiocb *req) | |
437 | { | |
438 | return false; | |
439 | } | |
440 | #endif | |
441 | ||
a370167f | 442 | static void io_req_end_write(struct io_kiocb *req) |
f3b44f92 | 443 | { |
f3b44f92 | 444 | if (req->flags & REQ_F_ISREG) { |
e484fd73 | 445 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 | 446 | |
e484fd73 | 447 | kiocb_end_write(&rw->kiocb); |
f3b44f92 JA |
448 | } |
449 | } | |
450 | ||
2ec33a6c JA |
451 | /* |
452 | * Trigger the notifications after having done some IO, and finish the write | |
453 | * accounting, if any. | |
454 | */ | |
455 | static void io_req_io_end(struct io_kiocb *req) | |
456 | { | |
457 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); | |
458 | ||
2ec33a6c | 459 | if (rw->kiocb.ki_flags & IOCB_WRITE) { |
a370167f | 460 | io_req_end_write(req); |
2ec33a6c JA |
461 | fsnotify_modify(req->file); |
462 | } else { | |
463 | fsnotify_access(req->file); | |
464 | } | |
465 | } | |
466 | ||
f3b44f92 JA |
467 | static bool __io_complete_rw_common(struct io_kiocb *req, long res) |
468 | { | |
f3b44f92 | 469 | if (unlikely(res != req->cqe.res)) { |
90bfb28d | 470 | if (res == -EAGAIN && io_rw_should_reissue(req)) { |
2ec33a6c JA |
471 | /* |
472 | * Reissue will start accounting again, finish the | |
473 | * current cycle. | |
474 | */ | |
475 | io_req_io_end(req); | |
186daf23 | 476 | req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; |
f3b44f92 JA |
477 | return true; |
478 | } | |
479 | req_set_fail(req); | |
480 | req->cqe.res = res; | |
481 | } | |
482 | return false; | |
483 | } | |
484 | ||
62bb0647 | 485 | static inline int io_fixup_rw_res(struct io_kiocb *req, long res) |
4d9cb92c PB |
486 | { |
487 | struct io_async_rw *io = req->async_data; | |
488 | ||
489 | /* add previously done IO, if any */ | |
490 | if (req_has_async_data(req) && io->bytes_done > 0) { | |
491 | if (res < 0) | |
492 | res = io->bytes_done; | |
493 | else | |
494 | res += io->bytes_done; | |
495 | } | |
496 | return res; | |
497 | } | |
498 | ||
c92fcfc2 | 499 | void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts) |
b000145e | 500 | { |
099ada2c JA |
501 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
502 | struct kiocb *kiocb = &rw->kiocb; | |
503 | ||
504 | if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) { | |
505 | long res = kiocb->dio_complete(rw->kiocb.private); | |
506 | ||
507 | io_req_set_res(req, io_fixup_rw_res(req, res), 0); | |
508 | } | |
509 | ||
2ec33a6c | 510 | io_req_io_end(req); |
3671163b | 511 | |
8e5b3b89 | 512 | if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) |
6733e678 | 513 | req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0); |
3671163b | 514 | |
a9165b83 | 515 | io_req_rw_cleanup(req, 0); |
a282967c | 516 | io_req_task_complete(req, ts); |
b000145e JA |
517 | } |
518 | ||
f3b44f92 JA |
519 | static void io_complete_rw(struct kiocb *kiocb, long res) |
520 | { | |
521 | struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); | |
522 | struct io_kiocb *req = cmd_to_io_kiocb(rw); | |
523 | ||
099ada2c JA |
524 | if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) { |
525 | if (__io_complete_rw_common(req, res)) | |
526 | return; | |
527 | io_req_set_res(req, io_fixup_rw_res(req, res), 0); | |
528 | } | |
b000145e | 529 | req->io_task_work.func = io_req_rw_complete; |
8751d154 | 530 | __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE); |
f3b44f92 JA |
531 | } |
532 | ||
533 | static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) | |
534 | { | |
535 | struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); | |
536 | struct io_kiocb *req = cmd_to_io_kiocb(rw); | |
537 | ||
538 | if (kiocb->ki_flags & IOCB_WRITE) | |
a370167f | 539 | io_req_end_write(req); |
f3b44f92 JA |
540 | if (unlikely(res != req->cqe.res)) { |
541 | if (res == -EAGAIN && io_rw_should_reissue(req)) { | |
186daf23 | 542 | req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; |
f3b44f92 JA |
543 | return; |
544 | } | |
545 | req->cqe.res = res; | |
546 | } | |
547 | ||
548 | /* order with io_iopoll_complete() checking ->iopoll_completed */ | |
549 | smp_store_release(&req->iopoll_completed, 1); | |
550 | } | |
551 | ||
fe80eb15 JA |
552 | static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) |
553 | { | |
554 | /* IO was queued async, completion will happen later */ | |
555 | if (ret == -EIOCBQUEUED) | |
556 | return; | |
557 | ||
558 | /* transform internal restart error codes */ | |
559 | if (unlikely(ret < 0)) { | |
560 | switch (ret) { | |
561 | case -ERESTARTSYS: | |
562 | case -ERESTARTNOINTR: | |
563 | case -ERESTARTNOHAND: | |
564 | case -ERESTART_RESTARTBLOCK: | |
565 | /* | |
566 | * We can't just restart the syscall, since previously | |
567 | * submitted sqes may already be in progress. Just fail | |
568 | * this IO with EINTR. | |
569 | */ | |
570 | ret = -EINTR; | |
571 | break; | |
572 | } | |
573 | } | |
574 | ||
575 | INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll, | |
576 | io_complete_rw, kiocb, ret); | |
577 | } | |
578 | ||
df9830d8 | 579 | static int kiocb_done(struct io_kiocb *req, ssize_t ret, |
f3b44f92 JA |
580 | unsigned int issue_flags) |
581 | { | |
f2ccb5ae | 582 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
4d9cb92c | 583 | unsigned final_ret = io_fixup_rw_res(req, ret); |
f3b44f92 | 584 | |
1939316b | 585 | if (ret >= 0 && req->flags & REQ_F_CUR_POS) |
f3b44f92 | 586 | req->file->f_pos = rw->kiocb.ki_pos; |
df9830d8 PB |
587 | if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) { |
588 | if (!__io_complete_rw_common(req, ret)) { | |
2ec33a6c JA |
589 | /* |
590 | * Safe to call io_end from here as we're inline | |
591 | * from the submission path. | |
592 | */ | |
593 | io_req_io_end(req); | |
4d9cb92c | 594 | io_req_set_res(req, final_ret, |
6733e678 | 595 | io_put_kbuf(req, ret, issue_flags)); |
a9165b83 | 596 | io_req_rw_cleanup(req, issue_flags); |
df9830d8 PB |
597 | return IOU_OK; |
598 | } | |
599 | } else { | |
f3b44f92 | 600 | io_rw_done(&rw->kiocb, ret); |
df9830d8 | 601 | } |
f3b44f92 JA |
602 | |
603 | if (req->flags & REQ_F_REISSUE) { | |
604 | req->flags &= ~REQ_F_REISSUE; | |
039a2e80 | 605 | io_resubmit_prep(req); |
a9165b83 | 606 | return -EAGAIN; |
f3b44f92 | 607 | } |
df9830d8 | 608 | return IOU_ISSUE_SKIP_COMPLETE; |
f3b44f92 JA |
609 | } |
610 | ||
f3b44f92 JA |
611 | static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) |
612 | { | |
613 | return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; | |
614 | } | |
615 | ||
616 | /* | |
617 | * For files that don't have ->read_iter() and ->write_iter(), handle them | |
618 | * by looping over ->read() or ->write() manually. | |
619 | */ | |
620 | static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter) | |
621 | { | |
622 | struct kiocb *kiocb = &rw->kiocb; | |
623 | struct file *file = kiocb->ki_filp; | |
624 | ssize_t ret = 0; | |
625 | loff_t *ppos; | |
626 | ||
627 | /* | |
628 | * Don't support polled IO through this interface, and we can't | |
629 | * support non-blocking either. For the latter, this just causes | |
630 | * the kiocb to be handled from an async context. | |
631 | */ | |
632 | if (kiocb->ki_flags & IOCB_HIPRI) | |
633 | return -EOPNOTSUPP; | |
634 | if ((kiocb->ki_flags & IOCB_NOWAIT) && | |
635 | !(kiocb->ki_filp->f_flags & O_NONBLOCK)) | |
636 | return -EAGAIN; | |
637 | ||
638 | ppos = io_kiocb_ppos(kiocb); | |
639 | ||
640 | while (iov_iter_count(iter)) { | |
95e49cf8 JA |
641 | void __user *addr; |
642 | size_t len; | |
f3b44f92 JA |
643 | ssize_t nr; |
644 | ||
1e23db45 | 645 | if (iter_is_ubuf(iter)) { |
95e49cf8 JA |
646 | addr = iter->ubuf + iter->iov_offset; |
647 | len = iov_iter_count(iter); | |
1e23db45 | 648 | } else if (!iov_iter_is_bvec(iter)) { |
95e49cf8 JA |
649 | addr = iter_iov_addr(iter); |
650 | len = iter_iov_len(iter); | |
f3b44f92 | 651 | } else { |
95e49cf8 JA |
652 | addr = u64_to_user_ptr(rw->addr); |
653 | len = rw->len; | |
f3b44f92 JA |
654 | } |
655 | ||
95e49cf8 JA |
656 | if (ddir == READ) |
657 | nr = file->f_op->read(file, addr, len, ppos); | |
658 | else | |
659 | nr = file->f_op->write(file, addr, len, ppos); | |
f3b44f92 JA |
660 | |
661 | if (nr < 0) { | |
662 | if (!ret) | |
663 | ret = nr; | |
664 | break; | |
665 | } | |
666 | ret += nr; | |
667 | if (!iov_iter_is_bvec(iter)) { | |
668 | iov_iter_advance(iter, nr); | |
669 | } else { | |
670 | rw->addr += nr; | |
671 | rw->len -= nr; | |
672 | if (!rw->len) | |
673 | break; | |
674 | } | |
95e49cf8 | 675 | if (nr != len) |
f3b44f92 JA |
676 | break; |
677 | } | |
678 | ||
679 | return ret; | |
680 | } | |
681 | ||
f3b44f92 JA |
682 | /* |
683 | * This is our waitqueue callback handler, registered through __folio_lock_async() | |
684 | * when we initially tried to do the IO with the iocb armed our waitqueue. | |
685 | * This gets called when the page is unlocked, and we generally expect that to | |
686 | * happen when the page IO is completed and the page is now uptodate. This will | |
687 | * queue a task_work based retry of the operation, attempting to copy the data | |
688 | * again. If the latter fails because the page was NOT uptodate, then we will | |
689 | * do a thread based blocking retry of the operation. That's the unexpected | |
690 | * slow path. | |
691 | */ | |
692 | static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, | |
693 | int sync, void *arg) | |
694 | { | |
695 | struct wait_page_queue *wpq; | |
696 | struct io_kiocb *req = wait->private; | |
f2ccb5ae | 697 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
698 | struct wait_page_key *key = arg; |
699 | ||
700 | wpq = container_of(wait, struct wait_page_queue, wait); | |
701 | ||
702 | if (!wake_page_match(wpq, key)) | |
703 | return 0; | |
704 | ||
705 | rw->kiocb.ki_flags &= ~IOCB_WAITQ; | |
706 | list_del_init(&wait->entry); | |
707 | io_req_task_queue(req); | |
708 | return 1; | |
709 | } | |
710 | ||
711 | /* | |
712 | * This controls whether a given IO request should be armed for async page | |
713 | * based retry. If we return false here, the request is handed to the async | |
714 | * worker threads for retry. If we're doing buffered reads on a regular file, | |
715 | * we prepare a private wait_page_queue entry and retry the operation. This | |
716 | * will either succeed because the page is now uptodate and unlocked, or it | |
717 | * will register a callback when the page is unlocked at IO completion. Through | |
718 | * that callback, io_uring uses task_work to setup a retry of the operation. | |
719 | * That retry will attempt the buffered read again. The retry will generally | |
720 | * succeed, or in rare cases where it fails, we then fall back to using the | |
721 | * async worker threads for a blocking retry. | |
722 | */ | |
723 | static bool io_rw_should_retry(struct io_kiocb *req) | |
724 | { | |
725 | struct io_async_rw *io = req->async_data; | |
726 | struct wait_page_queue *wait = &io->wpq; | |
f2ccb5ae | 727 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
728 | struct kiocb *kiocb = &rw->kiocb; |
729 | ||
730 | /* never retry for NOWAIT, we just complete with -EAGAIN */ | |
731 | if (req->flags & REQ_F_NOWAIT) | |
732 | return false; | |
733 | ||
734 | /* Only for buffered IO */ | |
735 | if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) | |
736 | return false; | |
737 | ||
738 | /* | |
739 | * just use poll if we can, and don't attempt if the fs doesn't | |
740 | * support callback based unlocks | |
741 | */ | |
210a03c9 CB |
742 | if (io_file_can_poll(req) || |
743 | !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC)) | |
f3b44f92 JA |
744 | return false; |
745 | ||
746 | wait->wait.func = io_async_buf_func; | |
747 | wait->wait.private = req; | |
748 | wait->wait.flags = 0; | |
749 | INIT_LIST_HEAD(&wait->wait.entry); | |
750 | kiocb->ki_flags |= IOCB_WAITQ; | |
751 | kiocb->ki_flags &= ~IOCB_NOWAIT; | |
752 | kiocb->ki_waitq = wait; | |
753 | return true; | |
754 | } | |
755 | ||
756 | static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter) | |
757 | { | |
758 | struct file *file = rw->kiocb.ki_filp; | |
759 | ||
760 | if (likely(file->f_op->read_iter)) | |
7c98f7cb | 761 | return file->f_op->read_iter(&rw->kiocb, iter); |
f3b44f92 JA |
762 | else if (file->f_op->read) |
763 | return loop_rw_iter(READ, rw, iter); | |
764 | else | |
765 | return -EINVAL; | |
766 | } | |
767 | ||
4e17aaab | 768 | static bool need_complete_io(struct io_kiocb *req) |
f3b44f92 JA |
769 | { |
770 | return req->flags & REQ_F_ISREG || | |
771 | S_ISBLK(file_inode(req->file)->i_mode); | |
772 | } | |
773 | ||
c34fc6f2 | 774 | static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type) |
f3b44f92 | 775 | { |
f2ccb5ae | 776 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
777 | struct kiocb *kiocb = &rw->kiocb; |
778 | struct io_ring_ctx *ctx = req->ctx; | |
779 | struct file *file = req->file; | |
780 | int ret; | |
781 | ||
949249e2 | 782 | if (unlikely(!(file->f_mode & mode))) |
f3b44f92 JA |
783 | return -EBADF; |
784 | ||
3beed235 | 785 | if (!(req->flags & REQ_F_FIXED_FILE)) |
8487f083 | 786 | req->flags |= io_file_get_flags(file); |
f3b44f92 | 787 | |
5264406c | 788 | kiocb->ki_flags = file->f_iocb_flags; |
c34fc6f2 | 789 | ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type); |
f3b44f92 JA |
790 | if (unlikely(ret)) |
791 | return ret; | |
12e4e8c7 | 792 | kiocb->ki_flags |= IOCB_ALLOC_CACHE; |
f3b44f92 JA |
793 | |
794 | /* | |
795 | * If the file is marked O_NONBLOCK, still allow retry for it if it | |
796 | * supports async. Otherwise it's impossible to use O_NONBLOCK files | |
797 | * reliably. If not, or it IOCB_NOWAIT is set, don't retry. | |
798 | */ | |
799 | if ((kiocb->ki_flags & IOCB_NOWAIT) || | |
800 | ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req))) | |
801 | req->flags |= REQ_F_NOWAIT; | |
802 | ||
803 | if (ctx->flags & IORING_SETUP_IOPOLL) { | |
804 | if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) | |
805 | return -EOPNOTSUPP; | |
806 | ||
807 | kiocb->private = NULL; | |
12e4e8c7 | 808 | kiocb->ki_flags |= IOCB_HIPRI; |
f3b44f92 JA |
809 | kiocb->ki_complete = io_complete_rw_iopoll; |
810 | req->iopoll_completed = 0; | |
811 | } else { | |
812 | if (kiocb->ki_flags & IOCB_HIPRI) | |
813 | return -EINVAL; | |
814 | kiocb->ki_complete = io_complete_rw; | |
815 | } | |
816 | ||
817 | return 0; | |
818 | } | |
819 | ||
a08d195b | 820 | static int __io_read(struct io_kiocb *req, unsigned int issue_flags) |
f3b44f92 | 821 | { |
a9165b83 | 822 | bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; |
f2ccb5ae | 823 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
a9165b83 | 824 | struct io_async_rw *io = req->async_data; |
f3b44f92 | 825 | struct kiocb *kiocb = &rw->kiocb; |
a9165b83 | 826 | ssize_t ret; |
f3b44f92 JA |
827 | loff_t *ppos; |
828 | ||
a9165b83 JA |
829 | if (io_do_buffer_select(req)) { |
830 | ret = io_import_iovec(ITER_DEST, req, io, issue_flags); | |
f3b44f92 JA |
831 | if (unlikely(ret < 0)) |
832 | return ret; | |
f3b44f92 | 833 | } |
c34fc6f2 | 834 | ret = io_rw_init_file(req, FMODE_READ, READ); |
a9165b83 | 835 | if (unlikely(ret)) |
f3b44f92 | 836 | return ret; |
0d10bd77 | 837 | req->cqe.res = iov_iter_count(&io->iter); |
f3b44f92 JA |
838 | |
839 | if (force_nonblock) { | |
840 | /* If the file doesn't support async, just async punt */ | |
a9165b83 JA |
841 | if (unlikely(!io_file_supports_nowait(req))) |
842 | return -EAGAIN; | |
f3b44f92 JA |
843 | kiocb->ki_flags |= IOCB_NOWAIT; |
844 | } else { | |
845 | /* Ensure we clear previously set non-block flag */ | |
846 | kiocb->ki_flags &= ~IOCB_NOWAIT; | |
847 | } | |
848 | ||
849 | ppos = io_kiocb_update_pos(req); | |
850 | ||
851 | ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); | |
a9165b83 | 852 | if (unlikely(ret)) |
f3b44f92 | 853 | return ret; |
f3b44f92 | 854 | |
0d10bd77 | 855 | ret = io_iter_do_read(rw, &io->iter); |
f3b44f92 | 856 | |
c0a9d496 JA |
857 | /* |
858 | * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT | |
859 | * issue, even though they should be returning -EAGAIN. To be safe, | |
860 | * retry from blocking context for either. | |
861 | */ | |
862 | if (ret == -EOPNOTSUPP && force_nonblock) | |
863 | ret = -EAGAIN; | |
864 | ||
f3b44f92 JA |
865 | if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { |
866 | req->flags &= ~REQ_F_REISSUE; | |
a9165b83 JA |
867 | /* If we can poll, just do that. */ |
868 | if (io_file_can_poll(req)) | |
f3b44f92 JA |
869 | return -EAGAIN; |
870 | /* IOPOLL retry should happen for io-wq threads */ | |
871 | if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) | |
872 | goto done; | |
873 | /* no retry on NONBLOCK nor RWF_NOWAIT */ | |
874 | if (req->flags & REQ_F_NOWAIT) | |
875 | goto done; | |
876 | ret = 0; | |
877 | } else if (ret == -EIOCBQUEUED) { | |
df9830d8 | 878 | return IOU_ISSUE_SKIP_COMPLETE; |
f3b44f92 | 879 | } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || |
4e17aaab | 880 | (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) { |
f3b44f92 JA |
881 | /* read all, failed, already did sync or don't want to retry */ |
882 | goto done; | |
883 | } | |
884 | ||
885 | /* | |
886 | * Don't depend on the iter state matching what was consumed, or being | |
887 | * untouched in case of error. Restore it and we'll advance it | |
888 | * manually if we need to. | |
889 | */ | |
0d10bd77 | 890 | iov_iter_restore(&io->iter, &io->iter_state); |
f3b44f92 JA |
891 | |
892 | do { | |
893 | /* | |
894 | * We end up here because of a partial read, either from | |
895 | * above or inside this loop. Advance the iter by the bytes | |
896 | * that were consumed. | |
897 | */ | |
0d10bd77 JA |
898 | iov_iter_advance(&io->iter, ret); |
899 | if (!iov_iter_count(&io->iter)) | |
f3b44f92 JA |
900 | break; |
901 | io->bytes_done += ret; | |
0d10bd77 | 902 | iov_iter_save_state(&io->iter, &io->iter_state); |
f3b44f92 JA |
903 | |
904 | /* if we can retry, do so with the callbacks armed */ | |
905 | if (!io_rw_should_retry(req)) { | |
906 | kiocb->ki_flags &= ~IOCB_WAITQ; | |
907 | return -EAGAIN; | |
908 | } | |
909 | ||
0d10bd77 | 910 | req->cqe.res = iov_iter_count(&io->iter); |
f3b44f92 JA |
911 | /* |
912 | * Now retry read with the IOCB_WAITQ parts set in the iocb. If | |
913 | * we get -EIOCBQUEUED, then we'll get a notification when the | |
914 | * desired page gets unlocked. We can also get a partial read | |
915 | * here, and if we do, then just retry at the new offset. | |
916 | */ | |
0d10bd77 | 917 | ret = io_iter_do_read(rw, &io->iter); |
f3b44f92 JA |
918 | if (ret == -EIOCBQUEUED) |
919 | return IOU_ISSUE_SKIP_COMPLETE; | |
920 | /* we got some bytes, but not all. retry. */ | |
921 | kiocb->ki_flags &= ~IOCB_WAITQ; | |
0d10bd77 | 922 | iov_iter_restore(&io->iter, &io->iter_state); |
f3b44f92 JA |
923 | } while (ret > 0); |
924 | done: | |
f3b44f92 | 925 | /* it's faster to check here then delegate to kfree */ |
a08d195b JA |
926 | return ret; |
927 | } | |
928 | ||
929 | int io_read(struct io_kiocb *req, unsigned int issue_flags) | |
930 | { | |
931 | int ret; | |
932 | ||
933 | ret = __io_read(req, issue_flags); | |
934 | if (ret >= 0) | |
935 | return kiocb_done(req, ret, issue_flags); | |
936 | ||
937 | return ret; | |
f3b44f92 JA |
938 | } |
939 | ||
fc68fcda JA |
940 | int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) |
941 | { | |
e5375929 | 942 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
fc68fcda JA |
943 | unsigned int cflags = 0; |
944 | int ret; | |
945 | ||
946 | /* | |
947 | * Multishot MUST be used on a pollable file | |
948 | */ | |
95041b93 | 949 | if (!io_file_can_poll(req)) |
fc68fcda JA |
950 | return -EBADFD; |
951 | ||
952 | ret = __io_read(req, issue_flags); | |
953 | ||
2a975d42 JA |
954 | /* |
955 | * If the file doesn't support proper NOWAIT, then disable multishot | |
956 | * and stay in single shot mode. | |
957 | */ | |
958 | if (!io_file_supports_nowait(req)) | |
959 | req->flags &= ~REQ_F_APOLL_MULTISHOT; | |
960 | ||
fc68fcda JA |
961 | /* |
962 | * If we get -EAGAIN, recycle our buffer and just let normal poll | |
963 | * handling arm it. | |
964 | */ | |
965 | if (ret == -EAGAIN) { | |
e5375929 DY |
966 | /* |
967 | * Reset rw->len to 0 again to avoid clamping future mshot | |
968 | * reads, in case the buffer size varies. | |
969 | */ | |
970 | if (io_kbuf_recycle(req, issue_flags)) | |
971 | rw->len = 0; | |
0a3737db JA |
972 | if (issue_flags & IO_URING_F_MULTISHOT) |
973 | return IOU_ISSUE_SKIP_COMPLETE; | |
fc68fcda JA |
974 | return -EAGAIN; |
975 | } | |
976 | ||
977 | /* | |
978 | * Any successful return value will keep the multishot read armed. | |
979 | */ | |
2a975d42 | 980 | if (ret > 0 && req->flags & REQ_F_APOLL_MULTISHOT) { |
fc68fcda JA |
981 | /* |
982 | * Put our buffer and post a CQE. If we fail to post a CQE, then | |
983 | * jump to the termination path. This request is then done. | |
984 | */ | |
6733e678 | 985 | cflags = io_put_kbuf(req, ret, issue_flags); |
e5375929 | 986 | rw->len = 0; /* similarly to above, reset len to 0 */ |
fc68fcda | 987 | |
e5c12945 | 988 | if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { |
c79f52f0 JA |
989 | if (issue_flags & IO_URING_F_MULTISHOT) { |
990 | /* | |
991 | * Force retry, as we might have more data to | |
992 | * be read and otherwise it won't get retried | |
993 | * until (if ever) another poll is triggered. | |
994 | */ | |
995 | io_poll_multishot_retry(req); | |
fc68fcda | 996 | return IOU_ISSUE_SKIP_COMPLETE; |
c79f52f0 | 997 | } |
fc68fcda JA |
998 | return -EAGAIN; |
999 | } | |
1000 | } | |
1001 | ||
1002 | /* | |
1003 | * Either an error, or we've hit overflow posting the CQE. For any | |
1004 | * multishot request, hitting overflow will terminate it. | |
1005 | */ | |
1006 | io_req_set_res(req, ret, cflags); | |
a9165b83 | 1007 | io_req_rw_cleanup(req, issue_flags); |
fc68fcda JA |
1008 | if (issue_flags & IO_URING_F_MULTISHOT) |
1009 | return IOU_STOP_MULTISHOT; | |
1010 | return IOU_OK; | |
f3b44f92 JA |
1011 | } |
1012 | ||
1013 | int io_write(struct io_kiocb *req, unsigned int issue_flags) | |
1014 | { | |
a9165b83 | 1015 | bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; |
f2ccb5ae | 1016 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
a9165b83 | 1017 | struct io_async_rw *io = req->async_data; |
f3b44f92 | 1018 | struct kiocb *kiocb = &rw->kiocb; |
f3b44f92 JA |
1019 | ssize_t ret, ret2; |
1020 | loff_t *ppos; | |
1021 | ||
c34fc6f2 | 1022 | ret = io_rw_init_file(req, FMODE_WRITE, WRITE); |
a9165b83 | 1023 | if (unlikely(ret)) |
f3b44f92 | 1024 | return ret; |
0d10bd77 | 1025 | req->cqe.res = iov_iter_count(&io->iter); |
f3b44f92 JA |
1026 | |
1027 | if (force_nonblock) { | |
1028 | /* If the file doesn't support async, just async punt */ | |
1029 | if (unlikely(!io_file_supports_nowait(req))) | |
a9165b83 | 1030 | goto ret_eagain; |
f3b44f92 | 1031 | |
210a03c9 | 1032 | /* Check if we can support NOWAIT. */ |
4e17aaab | 1033 | if (!(kiocb->ki_flags & IOCB_DIRECT) && |
210a03c9 CB |
1034 | !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) && |
1035 | (req->flags & REQ_F_ISREG)) | |
a9165b83 | 1036 | goto ret_eagain; |
f3b44f92 JA |
1037 | |
1038 | kiocb->ki_flags |= IOCB_NOWAIT; | |
1039 | } else { | |
1040 | /* Ensure we clear previously set non-block flag */ | |
1041 | kiocb->ki_flags &= ~IOCB_NOWAIT; | |
1042 | } | |
1043 | ||
1044 | ppos = io_kiocb_update_pos(req); | |
1045 | ||
1046 | ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); | |
a9165b83 | 1047 | if (unlikely(ret)) |
df9830d8 | 1048 | return ret; |
f3b44f92 | 1049 | |
e484fd73 AG |
1050 | if (req->flags & REQ_F_ISREG) |
1051 | kiocb_start_write(kiocb); | |
f3b44f92 JA |
1052 | kiocb->ki_flags |= IOCB_WRITE; |
1053 | ||
1054 | if (likely(req->file->f_op->write_iter)) | |
b6394d6f | 1055 | ret2 = req->file->f_op->write_iter(kiocb, &io->iter); |
f3b44f92 | 1056 | else if (req->file->f_op->write) |
0d10bd77 | 1057 | ret2 = loop_rw_iter(WRITE, rw, &io->iter); |
f3b44f92 JA |
1058 | else |
1059 | ret2 = -EINVAL; | |
1060 | ||
1061 | if (req->flags & REQ_F_REISSUE) { | |
1062 | req->flags &= ~REQ_F_REISSUE; | |
1063 | ret2 = -EAGAIN; | |
1064 | } | |
1065 | ||
1066 | /* | |
1067 | * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just | |
1068 | * retry them without IOCB_NOWAIT. | |
1069 | */ | |
1070 | if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) | |
1071 | ret2 = -EAGAIN; | |
1072 | /* no retry on NONBLOCK nor RWF_NOWAIT */ | |
1073 | if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) | |
1074 | goto done; | |
1075 | if (!force_nonblock || ret2 != -EAGAIN) { | |
1076 | /* IOPOLL retry should happen for io-wq threads */ | |
1077 | if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) | |
a9165b83 | 1078 | goto ret_eagain; |
4e17aaab SR |
1079 | |
1080 | if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { | |
1c849b48 SR |
1081 | trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, |
1082 | req->cqe.res, ret2); | |
1083 | ||
4e17aaab SR |
1084 | /* This is a partial write. The file pos has already been |
1085 | * updated, setup the async struct to complete the request | |
1086 | * in the worker. Also update bytes_done to account for | |
1087 | * the bytes already written. | |
1088 | */ | |
0d10bd77 | 1089 | iov_iter_save_state(&io->iter, &io->iter_state); |
a9165b83 | 1090 | io->bytes_done += ret2; |
4e17aaab | 1091 | |
e053aaf4 | 1092 | if (kiocb->ki_flags & IOCB_WRITE) |
a370167f | 1093 | io_req_end_write(req); |
a9165b83 | 1094 | return -EAGAIN; |
4e17aaab | 1095 | } |
f3b44f92 | 1096 | done: |
cca65713 | 1097 | return kiocb_done(req, ret2, issue_flags); |
f3b44f92 | 1098 | } else { |
a9165b83 | 1099 | ret_eagain: |
0d10bd77 | 1100 | iov_iter_restore(&io->iter, &io->iter_state); |
a9165b83 JA |
1101 | if (kiocb->ki_flags & IOCB_WRITE) |
1102 | io_req_end_write(req); | |
1103 | return -EAGAIN; | |
f3b44f92 | 1104 | } |
f3b44f92 JA |
1105 | } |
1106 | ||
47b4c686 PB |
1107 | void io_rw_fail(struct io_kiocb *req) |
1108 | { | |
1109 | int res; | |
1110 | ||
1111 | res = io_fixup_rw_res(req, req->cqe.res); | |
1112 | io_req_set_res(req, res, req->cqe.flags); | |
1113 | } | |
1114 | ||
f3b44f92 JA |
1115 | int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) |
1116 | { | |
1117 | struct io_wq_work_node *pos, *start, *prev; | |
54bdd67d | 1118 | unsigned int poll_flags = 0; |
f3b44f92 JA |
1119 | DEFINE_IO_COMP_BATCH(iob); |
1120 | int nr_events = 0; | |
1121 | ||
1122 | /* | |
1123 | * Only spin for completions if we don't have multiple devices hanging | |
1124 | * off our complete list. | |
1125 | */ | |
1126 | if (ctx->poll_multi_queue || force_nonspin) | |
1127 | poll_flags |= BLK_POLL_ONESHOT; | |
1128 | ||
1129 | wq_list_for_each(pos, start, &ctx->iopoll_list) { | |
1130 | struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); | |
a1119fb0 | 1131 | struct file *file = req->file; |
f3b44f92 JA |
1132 | int ret; |
1133 | ||
1134 | /* | |
1135 | * Move completed and retryable entries to our local lists. | |
1136 | * If we find a request that requires polling, break out | |
1137 | * and complete those lists first, if we have entries there. | |
1138 | */ | |
1139 | if (READ_ONCE(req->iopoll_completed)) | |
1140 | break; | |
1141 | ||
5756a3a7 | 1142 | if (req->opcode == IORING_OP_URING_CMD) { |
a1119fb0 | 1143 | struct io_uring_cmd *ioucmd; |
5756a3a7 | 1144 | |
a1119fb0 | 1145 | ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); |
de97fcb3 JA |
1146 | ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob, |
1147 | poll_flags); | |
a1119fb0 JA |
1148 | } else { |
1149 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); | |
1150 | ||
1151 | ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags); | |
1152 | } | |
f3b44f92 JA |
1153 | if (unlikely(ret < 0)) |
1154 | return ret; | |
1155 | else if (ret) | |
1156 | poll_flags |= BLK_POLL_ONESHOT; | |
1157 | ||
1158 | /* iopoll may have completed current req */ | |
1159 | if (!rq_list_empty(iob.req_list) || | |
1160 | READ_ONCE(req->iopoll_completed)) | |
1161 | break; | |
1162 | } | |
1163 | ||
1164 | if (!rq_list_empty(iob.req_list)) | |
1165 | iob.complete(&iob); | |
1166 | else if (!pos) | |
1167 | return 0; | |
1168 | ||
1169 | prev = start; | |
1170 | wq_list_for_each_resume(pos, prev) { | |
1171 | struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); | |
1172 | ||
1173 | /* order with io_complete_rw_iopoll(), e.g. ->result updates */ | |
1174 | if (!smp_load_acquire(&req->iopoll_completed)) | |
1175 | break; | |
1176 | nr_events++; | |
6733e678 | 1177 | req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0); |
a9165b83 JA |
1178 | if (req->opcode != IORING_OP_URING_CMD) |
1179 | io_req_rw_cleanup(req, 0); | |
f3b44f92 | 1180 | } |
f3b44f92 JA |
1181 | if (unlikely(!nr_events)) |
1182 | return 0; | |
1183 | ||
f3b44f92 JA |
1184 | pos = start ? start->next : ctx->iopoll_list.first; |
1185 | wq_list_cut(&ctx->iopoll_list, prev, start); | |
ec26c225 PB |
1186 | |
1187 | if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs))) | |
1188 | return 0; | |
1189 | ctx->submit_state.compl_reqs.first = pos; | |
1190 | __io_submit_flush_completions(ctx); | |
f3b44f92 JA |
1191 | return nr_events; |
1192 | } | |
a9165b83 | 1193 | |
414d0f45 | 1194 | void io_rw_cache_free(const void *entry) |
a9165b83 | 1195 | { |
414d0f45 | 1196 | struct io_async_rw *rw = (struct io_async_rw *) entry; |
a9165b83 | 1197 | |
d6f911a6 JA |
1198 | if (rw->free_iovec) { |
1199 | kasan_mempool_unpoison_object(rw->free_iovec, | |
1200 | rw->free_iov_nr * sizeof(struct iovec)); | |
1201 | io_rw_iovec_free(rw); | |
1202 | } | |
a9165b83 JA |
1203 | kfree(rw); |
1204 | } |