Commit | Line | Data |
---|---|---|
f3b44f92 JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/kernel.h> | |
3 | #include <linux/errno.h> | |
4 | #include <linux/fs.h> | |
5 | #include <linux/file.h> | |
6 | #include <linux/blk-mq.h> | |
7 | #include <linux/mm.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/fsnotify.h> | |
10 | #include <linux/poll.h> | |
11 | #include <linux/nospec.h> | |
12 | #include <linux/compat.h> | |
13 | #include <linux/io_uring.h> | |
14 | ||
15 | #include <uapi/linux/io_uring.h> | |
16 | ||
f3b44f92 JA |
17 | #include "io_uring.h" |
18 | #include "opdef.h" | |
19 | #include "kbuf.h" | |
20 | #include "rsrc.h" | |
21 | #include "rw.h" | |
22 | ||
23 | struct io_rw { | |
24 | /* NOTE: kiocb has the file as the first member, so don't do it here */ | |
25 | struct kiocb kiocb; | |
26 | u64 addr; | |
27 | u32 len; | |
28 | rwf_t flags; | |
29 | }; | |
30 | ||
31 | static inline bool io_file_supports_nowait(struct io_kiocb *req) | |
32 | { | |
33 | return req->flags & REQ_F_SUPPORT_NOWAIT; | |
34 | } | |
35 | ||
4ab9d465 DY |
36 | #ifdef CONFIG_COMPAT |
37 | static int io_iov_compat_buffer_select_prep(struct io_rw *rw) | |
38 | { | |
39 | struct compat_iovec __user *uiov; | |
40 | compat_ssize_t clen; | |
41 | ||
42 | uiov = u64_to_user_ptr(rw->addr); | |
43 | if (!access_ok(uiov, sizeof(*uiov))) | |
44 | return -EFAULT; | |
45 | if (__get_user(clen, &uiov->iov_len)) | |
46 | return -EFAULT; | |
47 | if (clen < 0) | |
48 | return -EINVAL; | |
49 | ||
50 | rw->len = clen; | |
51 | return 0; | |
52 | } | |
53 | #endif | |
54 | ||
55 | static int io_iov_buffer_select_prep(struct io_kiocb *req) | |
56 | { | |
57 | struct iovec __user *uiov; | |
58 | struct iovec iov; | |
59 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); | |
60 | ||
61 | if (rw->len != 1) | |
62 | return -EINVAL; | |
63 | ||
64 | #ifdef CONFIG_COMPAT | |
65 | if (req->ctx->compat) | |
66 | return io_iov_compat_buffer_select_prep(rw); | |
67 | #endif | |
68 | ||
69 | uiov = u64_to_user_ptr(rw->addr); | |
70 | if (copy_from_user(&iov, uiov, sizeof(*uiov))) | |
71 | return -EFAULT; | |
72 | rw->len = iov.iov_len; | |
73 | return 0; | |
74 | } | |
75 | ||
f3b44f92 JA |
76 | int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
77 | { | |
f2ccb5ae | 78 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
79 | unsigned ioprio; |
80 | int ret; | |
81 | ||
82 | rw->kiocb.ki_pos = READ_ONCE(sqe->off); | |
83 | /* used for fixed read/write too - just read unconditionally */ | |
84 | req->buf_index = READ_ONCE(sqe->buf_index); | |
85 | ||
86 | if (req->opcode == IORING_OP_READ_FIXED || | |
87 | req->opcode == IORING_OP_WRITE_FIXED) { | |
88 | struct io_ring_ctx *ctx = req->ctx; | |
89 | u16 index; | |
90 | ||
91 | if (unlikely(req->buf_index >= ctx->nr_user_bufs)) | |
92 | return -EFAULT; | |
93 | index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); | |
94 | req->imu = ctx->user_bufs[index]; | |
95 | io_req_set_rsrc_node(req, ctx, 0); | |
96 | } | |
97 | ||
98 | ioprio = READ_ONCE(sqe->ioprio); | |
99 | if (ioprio) { | |
100 | ret = ioprio_check_cap(ioprio); | |
101 | if (ret) | |
102 | return ret; | |
103 | ||
104 | rw->kiocb.ki_ioprio = ioprio; | |
105 | } else { | |
106 | rw->kiocb.ki_ioprio = get_current_ioprio(); | |
107 | } | |
108 | ||
109 | rw->addr = READ_ONCE(sqe->addr); | |
110 | rw->len = READ_ONCE(sqe->len); | |
111 | rw->flags = READ_ONCE(sqe->rw_flags); | |
4ab9d465 DY |
112 | |
113 | /* Have to do this validation here, as this is in io_read() rw->len might | |
114 | * have chanaged due to buffer selection | |
115 | */ | |
116 | if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) { | |
117 | ret = io_iov_buffer_select_prep(req); | |
118 | if (ret) | |
119 | return ret; | |
120 | } | |
121 | ||
f3b44f92 JA |
122 | return 0; |
123 | } | |
124 | ||
125 | void io_readv_writev_cleanup(struct io_kiocb *req) | |
126 | { | |
127 | struct io_async_rw *io = req->async_data; | |
128 | ||
129 | kfree(io->free_iovec); | |
130 | } | |
131 | ||
132 | static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) | |
133 | { | |
134 | switch (ret) { | |
135 | case -EIOCBQUEUED: | |
136 | break; | |
137 | case -ERESTARTSYS: | |
138 | case -ERESTARTNOINTR: | |
139 | case -ERESTARTNOHAND: | |
140 | case -ERESTART_RESTARTBLOCK: | |
141 | /* | |
142 | * We can't just restart the syscall, since previously | |
143 | * submitted sqes may already be in progress. Just fail this | |
144 | * IO with EINTR. | |
145 | */ | |
146 | ret = -EINTR; | |
147 | fallthrough; | |
148 | default: | |
149 | kiocb->ki_complete(kiocb, ret); | |
150 | } | |
151 | } | |
152 | ||
153 | static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) | |
154 | { | |
f2ccb5ae | 155 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
156 | |
157 | if (rw->kiocb.ki_pos != -1) | |
158 | return &rw->kiocb.ki_pos; | |
159 | ||
160 | if (!(req->file->f_mode & FMODE_STREAM)) { | |
161 | req->flags |= REQ_F_CUR_POS; | |
162 | rw->kiocb.ki_pos = req->file->f_pos; | |
163 | return &rw->kiocb.ki_pos; | |
164 | } | |
165 | ||
166 | rw->kiocb.ki_pos = 0; | |
167 | return NULL; | |
168 | } | |
169 | ||
170 | static void io_req_task_queue_reissue(struct io_kiocb *req) | |
171 | { | |
172 | req->io_task_work.func = io_queue_iowq; | |
173 | io_req_task_work_add(req); | |
174 | } | |
175 | ||
176 | #ifdef CONFIG_BLOCK | |
177 | static bool io_resubmit_prep(struct io_kiocb *req) | |
178 | { | |
179 | struct io_async_rw *io = req->async_data; | |
180 | ||
181 | if (!req_has_async_data(req)) | |
182 | return !io_req_prep_async(req); | |
183 | iov_iter_restore(&io->s.iter, &io->s.iter_state); | |
184 | return true; | |
185 | } | |
186 | ||
187 | static bool io_rw_should_reissue(struct io_kiocb *req) | |
188 | { | |
189 | umode_t mode = file_inode(req->file)->i_mode; | |
190 | struct io_ring_ctx *ctx = req->ctx; | |
191 | ||
192 | if (!S_ISBLK(mode) && !S_ISREG(mode)) | |
193 | return false; | |
194 | if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && | |
195 | !(ctx->flags & IORING_SETUP_IOPOLL))) | |
196 | return false; | |
197 | /* | |
198 | * If ref is dying, we might be running poll reap from the exit work. | |
199 | * Don't attempt to reissue from that path, just let it fail with | |
200 | * -EAGAIN. | |
201 | */ | |
202 | if (percpu_ref_is_dying(&ctx->refs)) | |
203 | return false; | |
204 | /* | |
205 | * Play it safe and assume not safe to re-import and reissue if we're | |
206 | * not in the original thread group (or in task context). | |
207 | */ | |
208 | if (!same_thread_group(req->task, current) || !in_task()) | |
209 | return false; | |
210 | return true; | |
211 | } | |
212 | #else | |
213 | static bool io_resubmit_prep(struct io_kiocb *req) | |
214 | { | |
215 | return false; | |
216 | } | |
217 | static bool io_rw_should_reissue(struct io_kiocb *req) | |
218 | { | |
219 | return false; | |
220 | } | |
221 | #endif | |
222 | ||
223 | static void kiocb_end_write(struct io_kiocb *req) | |
224 | { | |
225 | /* | |
226 | * Tell lockdep we inherited freeze protection from submission | |
227 | * thread. | |
228 | */ | |
229 | if (req->flags & REQ_F_ISREG) { | |
230 | struct super_block *sb = file_inode(req->file)->i_sb; | |
231 | ||
232 | __sb_writers_acquired(sb, SB_FREEZE_WRITE); | |
233 | sb_end_write(sb); | |
234 | } | |
235 | } | |
236 | ||
237 | static bool __io_complete_rw_common(struct io_kiocb *req, long res) | |
238 | { | |
f2ccb5ae | 239 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
240 | |
241 | if (rw->kiocb.ki_flags & IOCB_WRITE) { | |
242 | kiocb_end_write(req); | |
243 | fsnotify_modify(req->file); | |
244 | } else { | |
245 | fsnotify_access(req->file); | |
246 | } | |
247 | if (unlikely(res != req->cqe.res)) { | |
248 | if ((res == -EAGAIN || res == -EOPNOTSUPP) && | |
249 | io_rw_should_reissue(req)) { | |
250 | req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; | |
251 | return true; | |
252 | } | |
253 | req_set_fail(req); | |
254 | req->cqe.res = res; | |
255 | } | |
256 | return false; | |
257 | } | |
258 | ||
62bb0647 | 259 | static inline int io_fixup_rw_res(struct io_kiocb *req, long res) |
4d9cb92c PB |
260 | { |
261 | struct io_async_rw *io = req->async_data; | |
262 | ||
263 | /* add previously done IO, if any */ | |
264 | if (req_has_async_data(req) && io->bytes_done > 0) { | |
265 | if (res < 0) | |
266 | res = io->bytes_done; | |
267 | else | |
268 | res += io->bytes_done; | |
269 | } | |
270 | return res; | |
271 | } | |
272 | ||
f3b44f92 JA |
273 | static void io_complete_rw(struct kiocb *kiocb, long res) |
274 | { | |
275 | struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); | |
276 | struct io_kiocb *req = cmd_to_io_kiocb(rw); | |
277 | ||
278 | if (__io_complete_rw_common(req, res)) | |
279 | return; | |
4d9cb92c | 280 | io_req_set_res(req, io_fixup_rw_res(req, res), 0); |
f3b44f92 | 281 | req->io_task_work.func = io_req_task_complete; |
ed5ccb3b | 282 | io_req_task_work_add(req); |
f3b44f92 JA |
283 | } |
284 | ||
285 | static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) | |
286 | { | |
287 | struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); | |
288 | struct io_kiocb *req = cmd_to_io_kiocb(rw); | |
289 | ||
290 | if (kiocb->ki_flags & IOCB_WRITE) | |
291 | kiocb_end_write(req); | |
292 | if (unlikely(res != req->cqe.res)) { | |
293 | if (res == -EAGAIN && io_rw_should_reissue(req)) { | |
294 | req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; | |
295 | return; | |
296 | } | |
297 | req->cqe.res = res; | |
298 | } | |
299 | ||
300 | /* order with io_iopoll_complete() checking ->iopoll_completed */ | |
301 | smp_store_release(&req->iopoll_completed, 1); | |
302 | } | |
303 | ||
df9830d8 | 304 | static int kiocb_done(struct io_kiocb *req, ssize_t ret, |
f3b44f92 JA |
305 | unsigned int issue_flags) |
306 | { | |
f2ccb5ae | 307 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
4d9cb92c | 308 | unsigned final_ret = io_fixup_rw_res(req, ret); |
f3b44f92 JA |
309 | |
310 | if (req->flags & REQ_F_CUR_POS) | |
311 | req->file->f_pos = rw->kiocb.ki_pos; | |
df9830d8 PB |
312 | if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) { |
313 | if (!__io_complete_rw_common(req, ret)) { | |
4d9cb92c | 314 | io_req_set_res(req, final_ret, |
df9830d8 PB |
315 | io_put_kbuf(req, issue_flags)); |
316 | return IOU_OK; | |
317 | } | |
318 | } else { | |
f3b44f92 | 319 | io_rw_done(&rw->kiocb, ret); |
df9830d8 | 320 | } |
f3b44f92 JA |
321 | |
322 | if (req->flags & REQ_F_REISSUE) { | |
323 | req->flags &= ~REQ_F_REISSUE; | |
324 | if (io_resubmit_prep(req)) | |
325 | io_req_task_queue_reissue(req); | |
326 | else | |
4d9cb92c | 327 | io_req_task_queue_fail(req, final_ret); |
f3b44f92 | 328 | } |
df9830d8 | 329 | return IOU_ISSUE_SKIP_COMPLETE; |
f3b44f92 JA |
330 | } |
331 | ||
f3b44f92 JA |
332 | static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req, |
333 | struct io_rw_state *s, | |
334 | unsigned int issue_flags) | |
335 | { | |
f2ccb5ae | 336 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
337 | struct iov_iter *iter = &s->iter; |
338 | u8 opcode = req->opcode; | |
339 | struct iovec *iovec; | |
340 | void __user *buf; | |
341 | size_t sqe_len; | |
342 | ssize_t ret; | |
343 | ||
344 | if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { | |
f337a84d | 345 | ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len); |
f3b44f92 JA |
346 | if (ret) |
347 | return ERR_PTR(ret); | |
348 | return NULL; | |
349 | } | |
350 | ||
351 | buf = u64_to_user_ptr(rw->addr); | |
352 | sqe_len = rw->len; | |
353 | ||
4ab9d465 DY |
354 | if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE || |
355 | (req->flags & REQ_F_BUFFER_SELECT)) { | |
f3b44f92 JA |
356 | if (io_do_buffer_select(req)) { |
357 | buf = io_buffer_select(req, &sqe_len, issue_flags); | |
358 | if (!buf) | |
359 | return ERR_PTR(-ENOBUFS); | |
360 | rw->addr = (unsigned long) buf; | |
361 | rw->len = sqe_len; | |
362 | } | |
363 | ||
364 | ret = import_single_range(ddir, buf, sqe_len, s->fast_iov, iter); | |
365 | if (ret) | |
366 | return ERR_PTR(ret); | |
367 | return NULL; | |
368 | } | |
369 | ||
370 | iovec = s->fast_iov; | |
f3b44f92 JA |
371 | ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter, |
372 | req->ctx->compat); | |
373 | if (unlikely(ret < 0)) | |
374 | return ERR_PTR(ret); | |
375 | return iovec; | |
376 | } | |
377 | ||
378 | static inline int io_import_iovec(int rw, struct io_kiocb *req, | |
379 | struct iovec **iovec, struct io_rw_state *s, | |
380 | unsigned int issue_flags) | |
381 | { | |
382 | *iovec = __io_import_iovec(rw, req, s, issue_flags); | |
383 | if (unlikely(IS_ERR(*iovec))) | |
384 | return PTR_ERR(*iovec); | |
385 | ||
386 | iov_iter_save_state(&s->iter, &s->iter_state); | |
387 | return 0; | |
388 | } | |
389 | ||
390 | static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) | |
391 | { | |
392 | return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; | |
393 | } | |
394 | ||
395 | /* | |
396 | * For files that don't have ->read_iter() and ->write_iter(), handle them | |
397 | * by looping over ->read() or ->write() manually. | |
398 | */ | |
399 | static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter) | |
400 | { | |
401 | struct kiocb *kiocb = &rw->kiocb; | |
402 | struct file *file = kiocb->ki_filp; | |
403 | ssize_t ret = 0; | |
404 | loff_t *ppos; | |
405 | ||
406 | /* | |
407 | * Don't support polled IO through this interface, and we can't | |
408 | * support non-blocking either. For the latter, this just causes | |
409 | * the kiocb to be handled from an async context. | |
410 | */ | |
411 | if (kiocb->ki_flags & IOCB_HIPRI) | |
412 | return -EOPNOTSUPP; | |
413 | if ((kiocb->ki_flags & IOCB_NOWAIT) && | |
414 | !(kiocb->ki_filp->f_flags & O_NONBLOCK)) | |
415 | return -EAGAIN; | |
416 | ||
417 | ppos = io_kiocb_ppos(kiocb); | |
418 | ||
419 | while (iov_iter_count(iter)) { | |
420 | struct iovec iovec; | |
421 | ssize_t nr; | |
422 | ||
423 | if (!iov_iter_is_bvec(iter)) { | |
424 | iovec = iov_iter_iovec(iter); | |
425 | } else { | |
426 | iovec.iov_base = u64_to_user_ptr(rw->addr); | |
427 | iovec.iov_len = rw->len; | |
428 | } | |
429 | ||
430 | if (ddir == READ) { | |
431 | nr = file->f_op->read(file, iovec.iov_base, | |
432 | iovec.iov_len, ppos); | |
433 | } else { | |
434 | nr = file->f_op->write(file, iovec.iov_base, | |
435 | iovec.iov_len, ppos); | |
436 | } | |
437 | ||
438 | if (nr < 0) { | |
439 | if (!ret) | |
440 | ret = nr; | |
441 | break; | |
442 | } | |
443 | ret += nr; | |
444 | if (!iov_iter_is_bvec(iter)) { | |
445 | iov_iter_advance(iter, nr); | |
446 | } else { | |
447 | rw->addr += nr; | |
448 | rw->len -= nr; | |
449 | if (!rw->len) | |
450 | break; | |
451 | } | |
452 | if (nr != iovec.iov_len) | |
453 | break; | |
454 | } | |
455 | ||
456 | return ret; | |
457 | } | |
458 | ||
459 | static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, | |
460 | const struct iovec *fast_iov, struct iov_iter *iter) | |
461 | { | |
462 | struct io_async_rw *io = req->async_data; | |
463 | ||
464 | memcpy(&io->s.iter, iter, sizeof(*iter)); | |
465 | io->free_iovec = iovec; | |
466 | io->bytes_done = 0; | |
467 | /* can only be fixed buffers, no need to do anything */ | |
468 | if (iov_iter_is_bvec(iter)) | |
469 | return; | |
470 | if (!iovec) { | |
471 | unsigned iov_off = 0; | |
472 | ||
473 | io->s.iter.iov = io->s.fast_iov; | |
474 | if (iter->iov != fast_iov) { | |
475 | iov_off = iter->iov - fast_iov; | |
476 | io->s.iter.iov += iov_off; | |
477 | } | |
478 | if (io->s.fast_iov != fast_iov) | |
479 | memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off, | |
480 | sizeof(struct iovec) * iter->nr_segs); | |
481 | } else { | |
482 | req->flags |= REQ_F_NEED_CLEANUP; | |
483 | } | |
484 | } | |
485 | ||
486 | static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, | |
487 | struct io_rw_state *s, bool force) | |
488 | { | |
489 | if (!force && !io_op_defs[req->opcode].prep_async) | |
490 | return 0; | |
491 | if (!req_has_async_data(req)) { | |
492 | struct io_async_rw *iorw; | |
493 | ||
494 | if (io_alloc_async_data(req)) { | |
495 | kfree(iovec); | |
496 | return -ENOMEM; | |
497 | } | |
498 | ||
499 | io_req_map_rw(req, iovec, s->fast_iov, &s->iter); | |
500 | iorw = req->async_data; | |
501 | /* we've copied and mapped the iter, ensure state is saved */ | |
502 | iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state); | |
503 | } | |
504 | return 0; | |
505 | } | |
506 | ||
507 | static inline int io_rw_prep_async(struct io_kiocb *req, int rw) | |
508 | { | |
509 | struct io_async_rw *iorw = req->async_data; | |
510 | struct iovec *iov; | |
511 | int ret; | |
512 | ||
513 | /* submission path, ->uring_lock should already be taken */ | |
514 | ret = io_import_iovec(rw, req, &iov, &iorw->s, 0); | |
515 | if (unlikely(ret < 0)) | |
516 | return ret; | |
517 | ||
518 | iorw->bytes_done = 0; | |
519 | iorw->free_iovec = iov; | |
520 | if (iov) | |
521 | req->flags |= REQ_F_NEED_CLEANUP; | |
522 | return 0; | |
523 | } | |
524 | ||
525 | int io_readv_prep_async(struct io_kiocb *req) | |
526 | { | |
527 | return io_rw_prep_async(req, READ); | |
528 | } | |
529 | ||
530 | int io_writev_prep_async(struct io_kiocb *req) | |
531 | { | |
532 | return io_rw_prep_async(req, WRITE); | |
533 | } | |
534 | ||
535 | /* | |
536 | * This is our waitqueue callback handler, registered through __folio_lock_async() | |
537 | * when we initially tried to do the IO with the iocb armed our waitqueue. | |
538 | * This gets called when the page is unlocked, and we generally expect that to | |
539 | * happen when the page IO is completed and the page is now uptodate. This will | |
540 | * queue a task_work based retry of the operation, attempting to copy the data | |
541 | * again. If the latter fails because the page was NOT uptodate, then we will | |
542 | * do a thread based blocking retry of the operation. That's the unexpected | |
543 | * slow path. | |
544 | */ | |
545 | static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, | |
546 | int sync, void *arg) | |
547 | { | |
548 | struct wait_page_queue *wpq; | |
549 | struct io_kiocb *req = wait->private; | |
f2ccb5ae | 550 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
551 | struct wait_page_key *key = arg; |
552 | ||
553 | wpq = container_of(wait, struct wait_page_queue, wait); | |
554 | ||
555 | if (!wake_page_match(wpq, key)) | |
556 | return 0; | |
557 | ||
558 | rw->kiocb.ki_flags &= ~IOCB_WAITQ; | |
559 | list_del_init(&wait->entry); | |
560 | io_req_task_queue(req); | |
561 | return 1; | |
562 | } | |
563 | ||
564 | /* | |
565 | * This controls whether a given IO request should be armed for async page | |
566 | * based retry. If we return false here, the request is handed to the async | |
567 | * worker threads for retry. If we're doing buffered reads on a regular file, | |
568 | * we prepare a private wait_page_queue entry and retry the operation. This | |
569 | * will either succeed because the page is now uptodate and unlocked, or it | |
570 | * will register a callback when the page is unlocked at IO completion. Through | |
571 | * that callback, io_uring uses task_work to setup a retry of the operation. | |
572 | * That retry will attempt the buffered read again. The retry will generally | |
573 | * succeed, or in rare cases where it fails, we then fall back to using the | |
574 | * async worker threads for a blocking retry. | |
575 | */ | |
576 | static bool io_rw_should_retry(struct io_kiocb *req) | |
577 | { | |
578 | struct io_async_rw *io = req->async_data; | |
579 | struct wait_page_queue *wait = &io->wpq; | |
f2ccb5ae | 580 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
581 | struct kiocb *kiocb = &rw->kiocb; |
582 | ||
583 | /* never retry for NOWAIT, we just complete with -EAGAIN */ | |
584 | if (req->flags & REQ_F_NOWAIT) | |
585 | return false; | |
586 | ||
587 | /* Only for buffered IO */ | |
588 | if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) | |
589 | return false; | |
590 | ||
591 | /* | |
592 | * just use poll if we can, and don't attempt if the fs doesn't | |
593 | * support callback based unlocks | |
594 | */ | |
595 | if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) | |
596 | return false; | |
597 | ||
598 | wait->wait.func = io_async_buf_func; | |
599 | wait->wait.private = req; | |
600 | wait->wait.flags = 0; | |
601 | INIT_LIST_HEAD(&wait->wait.entry); | |
602 | kiocb->ki_flags |= IOCB_WAITQ; | |
603 | kiocb->ki_flags &= ~IOCB_NOWAIT; | |
604 | kiocb->ki_waitq = wait; | |
605 | return true; | |
606 | } | |
607 | ||
608 | static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter) | |
609 | { | |
610 | struct file *file = rw->kiocb.ki_filp; | |
611 | ||
612 | if (likely(file->f_op->read_iter)) | |
613 | return call_read_iter(file, &rw->kiocb, iter); | |
614 | else if (file->f_op->read) | |
615 | return loop_rw_iter(READ, rw, iter); | |
616 | else | |
617 | return -EINVAL; | |
618 | } | |
619 | ||
4e17aaab | 620 | static bool need_complete_io(struct io_kiocb *req) |
f3b44f92 JA |
621 | { |
622 | return req->flags & REQ_F_ISREG || | |
623 | S_ISBLK(file_inode(req->file)->i_mode); | |
624 | } | |
625 | ||
f3b44f92 JA |
626 | static int io_rw_init_file(struct io_kiocb *req, fmode_t mode) |
627 | { | |
f2ccb5ae | 628 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
629 | struct kiocb *kiocb = &rw->kiocb; |
630 | struct io_ring_ctx *ctx = req->ctx; | |
631 | struct file *file = req->file; | |
632 | int ret; | |
633 | ||
634 | if (unlikely(!file || !(file->f_mode & mode))) | |
635 | return -EBADF; | |
636 | ||
637 | if (!io_req_ffs_set(req)) | |
638 | req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT; | |
639 | ||
5264406c | 640 | kiocb->ki_flags = file->f_iocb_flags; |
f3b44f92 JA |
641 | ret = kiocb_set_rw_flags(kiocb, rw->flags); |
642 | if (unlikely(ret)) | |
643 | return ret; | |
644 | ||
645 | /* | |
646 | * If the file is marked O_NONBLOCK, still allow retry for it if it | |
647 | * supports async. Otherwise it's impossible to use O_NONBLOCK files | |
648 | * reliably. If not, or it IOCB_NOWAIT is set, don't retry. | |
649 | */ | |
650 | if ((kiocb->ki_flags & IOCB_NOWAIT) || | |
651 | ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req))) | |
652 | req->flags |= REQ_F_NOWAIT; | |
653 | ||
654 | if (ctx->flags & IORING_SETUP_IOPOLL) { | |
655 | if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) | |
656 | return -EOPNOTSUPP; | |
657 | ||
658 | kiocb->private = NULL; | |
659 | kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE; | |
660 | kiocb->ki_complete = io_complete_rw_iopoll; | |
661 | req->iopoll_completed = 0; | |
662 | } else { | |
663 | if (kiocb->ki_flags & IOCB_HIPRI) | |
664 | return -EINVAL; | |
665 | kiocb->ki_complete = io_complete_rw; | |
666 | } | |
667 | ||
668 | return 0; | |
669 | } | |
670 | ||
671 | int io_read(struct io_kiocb *req, unsigned int issue_flags) | |
672 | { | |
f2ccb5ae | 673 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
674 | struct io_rw_state __s, *s = &__s; |
675 | struct iovec *iovec; | |
676 | struct kiocb *kiocb = &rw->kiocb; | |
677 | bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; | |
678 | struct io_async_rw *io; | |
679 | ssize_t ret, ret2; | |
680 | loff_t *ppos; | |
681 | ||
682 | if (!req_has_async_data(req)) { | |
683 | ret = io_import_iovec(READ, req, &iovec, s, issue_flags); | |
684 | if (unlikely(ret < 0)) | |
685 | return ret; | |
686 | } else { | |
687 | io = req->async_data; | |
688 | s = &io->s; | |
689 | ||
690 | /* | |
691 | * Safe and required to re-import if we're using provided | |
692 | * buffers, as we dropped the selected one before retry. | |
693 | */ | |
694 | if (io_do_buffer_select(req)) { | |
695 | ret = io_import_iovec(READ, req, &iovec, s, issue_flags); | |
696 | if (unlikely(ret < 0)) | |
697 | return ret; | |
698 | } | |
699 | ||
700 | /* | |
701 | * We come here from an earlier attempt, restore our state to | |
702 | * match in case it doesn't. It's cheap enough that we don't | |
703 | * need to make this conditional. | |
704 | */ | |
705 | iov_iter_restore(&s->iter, &s->iter_state); | |
706 | iovec = NULL; | |
707 | } | |
708 | ret = io_rw_init_file(req, FMODE_READ); | |
709 | if (unlikely(ret)) { | |
710 | kfree(iovec); | |
711 | return ret; | |
712 | } | |
713 | req->cqe.res = iov_iter_count(&s->iter); | |
714 | ||
715 | if (force_nonblock) { | |
716 | /* If the file doesn't support async, just async punt */ | |
717 | if (unlikely(!io_file_supports_nowait(req))) { | |
718 | ret = io_setup_async_rw(req, iovec, s, true); | |
719 | return ret ?: -EAGAIN; | |
720 | } | |
721 | kiocb->ki_flags |= IOCB_NOWAIT; | |
722 | } else { | |
723 | /* Ensure we clear previously set non-block flag */ | |
724 | kiocb->ki_flags &= ~IOCB_NOWAIT; | |
725 | } | |
726 | ||
727 | ppos = io_kiocb_update_pos(req); | |
728 | ||
729 | ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); | |
730 | if (unlikely(ret)) { | |
731 | kfree(iovec); | |
732 | return ret; | |
733 | } | |
734 | ||
735 | ret = io_iter_do_read(rw, &s->iter); | |
736 | ||
737 | if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { | |
738 | req->flags &= ~REQ_F_REISSUE; | |
739 | /* if we can poll, just do that */ | |
740 | if (req->opcode == IORING_OP_READ && file_can_poll(req->file)) | |
741 | return -EAGAIN; | |
742 | /* IOPOLL retry should happen for io-wq threads */ | |
743 | if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) | |
744 | goto done; | |
745 | /* no retry on NONBLOCK nor RWF_NOWAIT */ | |
746 | if (req->flags & REQ_F_NOWAIT) | |
747 | goto done; | |
748 | ret = 0; | |
749 | } else if (ret == -EIOCBQUEUED) { | |
df9830d8 PB |
750 | if (iovec) |
751 | kfree(iovec); | |
752 | return IOU_ISSUE_SKIP_COMPLETE; | |
f3b44f92 | 753 | } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || |
4e17aaab | 754 | (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) { |
f3b44f92 JA |
755 | /* read all, failed, already did sync or don't want to retry */ |
756 | goto done; | |
757 | } | |
758 | ||
759 | /* | |
760 | * Don't depend on the iter state matching what was consumed, or being | |
761 | * untouched in case of error. Restore it and we'll advance it | |
762 | * manually if we need to. | |
763 | */ | |
764 | iov_iter_restore(&s->iter, &s->iter_state); | |
765 | ||
766 | ret2 = io_setup_async_rw(req, iovec, s, true); | |
767 | if (ret2) | |
768 | return ret2; | |
769 | ||
770 | iovec = NULL; | |
771 | io = req->async_data; | |
772 | s = &io->s; | |
773 | /* | |
774 | * Now use our persistent iterator and state, if we aren't already. | |
775 | * We've restored and mapped the iter to match. | |
776 | */ | |
777 | ||
778 | do { | |
779 | /* | |
780 | * We end up here because of a partial read, either from | |
781 | * above or inside this loop. Advance the iter by the bytes | |
782 | * that were consumed. | |
783 | */ | |
784 | iov_iter_advance(&s->iter, ret); | |
785 | if (!iov_iter_count(&s->iter)) | |
786 | break; | |
787 | io->bytes_done += ret; | |
788 | iov_iter_save_state(&s->iter, &s->iter_state); | |
789 | ||
790 | /* if we can retry, do so with the callbacks armed */ | |
791 | if (!io_rw_should_retry(req)) { | |
792 | kiocb->ki_flags &= ~IOCB_WAITQ; | |
793 | return -EAGAIN; | |
794 | } | |
795 | ||
796 | /* | |
797 | * Now retry read with the IOCB_WAITQ parts set in the iocb. If | |
798 | * we get -EIOCBQUEUED, then we'll get a notification when the | |
799 | * desired page gets unlocked. We can also get a partial read | |
800 | * here, and if we do, then just retry at the new offset. | |
801 | */ | |
802 | ret = io_iter_do_read(rw, &s->iter); | |
803 | if (ret == -EIOCBQUEUED) | |
804 | return IOU_ISSUE_SKIP_COMPLETE; | |
805 | /* we got some bytes, but not all. retry. */ | |
806 | kiocb->ki_flags &= ~IOCB_WAITQ; | |
807 | iov_iter_restore(&s->iter, &s->iter_state); | |
808 | } while (ret > 0); | |
809 | done: | |
f3b44f92 JA |
810 | /* it's faster to check here then delegate to kfree */ |
811 | if (iovec) | |
812 | kfree(iovec); | |
df9830d8 | 813 | return kiocb_done(req, ret, issue_flags); |
f3b44f92 JA |
814 | } |
815 | ||
816 | int io_write(struct io_kiocb *req, unsigned int issue_flags) | |
817 | { | |
f2ccb5ae | 818 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
819 | struct io_rw_state __s, *s = &__s; |
820 | struct iovec *iovec; | |
821 | struct kiocb *kiocb = &rw->kiocb; | |
822 | bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; | |
823 | ssize_t ret, ret2; | |
824 | loff_t *ppos; | |
825 | ||
826 | if (!req_has_async_data(req)) { | |
827 | ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags); | |
828 | if (unlikely(ret < 0)) | |
829 | return ret; | |
830 | } else { | |
831 | struct io_async_rw *io = req->async_data; | |
832 | ||
833 | s = &io->s; | |
834 | iov_iter_restore(&s->iter, &s->iter_state); | |
835 | iovec = NULL; | |
836 | } | |
837 | ret = io_rw_init_file(req, FMODE_WRITE); | |
838 | if (unlikely(ret)) { | |
839 | kfree(iovec); | |
840 | return ret; | |
841 | } | |
842 | req->cqe.res = iov_iter_count(&s->iter); | |
843 | ||
844 | if (force_nonblock) { | |
845 | /* If the file doesn't support async, just async punt */ | |
846 | if (unlikely(!io_file_supports_nowait(req))) | |
847 | goto copy_iov; | |
848 | ||
4e17aaab SR |
849 | /* File path supports NOWAIT for non-direct_IO only for block devices. */ |
850 | if (!(kiocb->ki_flags & IOCB_DIRECT) && | |
851 | !(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) && | |
852 | (req->flags & REQ_F_ISREG)) | |
f3b44f92 JA |
853 | goto copy_iov; |
854 | ||
855 | kiocb->ki_flags |= IOCB_NOWAIT; | |
856 | } else { | |
857 | /* Ensure we clear previously set non-block flag */ | |
858 | kiocb->ki_flags &= ~IOCB_NOWAIT; | |
859 | } | |
860 | ||
861 | ppos = io_kiocb_update_pos(req); | |
862 | ||
863 | ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); | |
df9830d8 PB |
864 | if (unlikely(ret)) { |
865 | kfree(iovec); | |
866 | return ret; | |
867 | } | |
f3b44f92 JA |
868 | |
869 | /* | |
870 | * Open-code file_start_write here to grab freeze protection, | |
871 | * which will be released by another thread in | |
872 | * io_complete_rw(). Fool lockdep by telling it the lock got | |
873 | * released so that it doesn't complain about the held lock when | |
874 | * we return to userspace. | |
875 | */ | |
876 | if (req->flags & REQ_F_ISREG) { | |
877 | sb_start_write(file_inode(req->file)->i_sb); | |
878 | __sb_writers_release(file_inode(req->file)->i_sb, | |
879 | SB_FREEZE_WRITE); | |
880 | } | |
881 | kiocb->ki_flags |= IOCB_WRITE; | |
882 | ||
883 | if (likely(req->file->f_op->write_iter)) | |
884 | ret2 = call_write_iter(req->file, kiocb, &s->iter); | |
885 | else if (req->file->f_op->write) | |
886 | ret2 = loop_rw_iter(WRITE, rw, &s->iter); | |
887 | else | |
888 | ret2 = -EINVAL; | |
889 | ||
890 | if (req->flags & REQ_F_REISSUE) { | |
891 | req->flags &= ~REQ_F_REISSUE; | |
892 | ret2 = -EAGAIN; | |
893 | } | |
894 | ||
895 | /* | |
896 | * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just | |
897 | * retry them without IOCB_NOWAIT. | |
898 | */ | |
899 | if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) | |
900 | ret2 = -EAGAIN; | |
901 | /* no retry on NONBLOCK nor RWF_NOWAIT */ | |
902 | if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) | |
903 | goto done; | |
904 | if (!force_nonblock || ret2 != -EAGAIN) { | |
905 | /* IOPOLL retry should happen for io-wq threads */ | |
906 | if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) | |
907 | goto copy_iov; | |
4e17aaab SR |
908 | |
909 | if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { | |
910 | struct io_async_rw *rw; | |
911 | ||
1c849b48 SR |
912 | trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, |
913 | req->cqe.res, ret2); | |
914 | ||
4e17aaab SR |
915 | /* This is a partial write. The file pos has already been |
916 | * updated, setup the async struct to complete the request | |
917 | * in the worker. Also update bytes_done to account for | |
918 | * the bytes already written. | |
919 | */ | |
920 | iov_iter_save_state(&s->iter, &s->iter_state); | |
921 | ret = io_setup_async_rw(req, iovec, s, true); | |
922 | ||
923 | rw = req->async_data; | |
924 | if (rw) | |
925 | rw->bytes_done += ret2; | |
926 | ||
e053aaf4 JA |
927 | if (kiocb->ki_flags & IOCB_WRITE) |
928 | kiocb_end_write(req); | |
4e17aaab SR |
929 | return ret ? ret : -EAGAIN; |
930 | } | |
f3b44f92 | 931 | done: |
df9830d8 | 932 | ret = kiocb_done(req, ret2, issue_flags); |
f3b44f92 JA |
933 | } else { |
934 | copy_iov: | |
935 | iov_iter_restore(&s->iter, &s->iter_state); | |
936 | ret = io_setup_async_rw(req, iovec, s, false); | |
e053aaf4 JA |
937 | if (!ret) { |
938 | if (kiocb->ki_flags & IOCB_WRITE) | |
939 | kiocb_end_write(req); | |
940 | return -EAGAIN; | |
941 | } | |
942 | return ret; | |
f3b44f92 | 943 | } |
f3b44f92 JA |
944 | /* it's reportedly faster than delegating the null check to kfree() */ |
945 | if (iovec) | |
946 | kfree(iovec); | |
947 | return ret; | |
948 | } | |
949 | ||
950 | static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) | |
951 | { | |
46929b08 | 952 | io_commit_cqring_flush(ctx); |
f3b44f92 JA |
953 | if (ctx->flags & IORING_SETUP_SQPOLL) |
954 | io_cqring_wake(ctx); | |
955 | } | |
956 | ||
47b4c686 PB |
957 | void io_rw_fail(struct io_kiocb *req) |
958 | { | |
959 | int res; | |
960 | ||
961 | res = io_fixup_rw_res(req, req->cqe.res); | |
962 | io_req_set_res(req, res, req->cqe.flags); | |
963 | } | |
964 | ||
f3b44f92 JA |
965 | int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) |
966 | { | |
967 | struct io_wq_work_node *pos, *start, *prev; | |
968 | unsigned int poll_flags = BLK_POLL_NOSLEEP; | |
969 | DEFINE_IO_COMP_BATCH(iob); | |
970 | int nr_events = 0; | |
971 | ||
972 | /* | |
973 | * Only spin for completions if we don't have multiple devices hanging | |
974 | * off our complete list. | |
975 | */ | |
976 | if (ctx->poll_multi_queue || force_nonspin) | |
977 | poll_flags |= BLK_POLL_ONESHOT; | |
978 | ||
979 | wq_list_for_each(pos, start, &ctx->iopoll_list) { | |
980 | struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); | |
a1119fb0 | 981 | struct file *file = req->file; |
f3b44f92 JA |
982 | int ret; |
983 | ||
984 | /* | |
985 | * Move completed and retryable entries to our local lists. | |
986 | * If we find a request that requires polling, break out | |
987 | * and complete those lists first, if we have entries there. | |
988 | */ | |
989 | if (READ_ONCE(req->iopoll_completed)) | |
990 | break; | |
991 | ||
5756a3a7 | 992 | if (req->opcode == IORING_OP_URING_CMD) { |
a1119fb0 | 993 | struct io_uring_cmd *ioucmd; |
5756a3a7 | 994 | |
a1119fb0 | 995 | ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); |
de97fcb3 JA |
996 | ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob, |
997 | poll_flags); | |
a1119fb0 JA |
998 | } else { |
999 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); | |
1000 | ||
1001 | ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags); | |
1002 | } | |
f3b44f92 JA |
1003 | if (unlikely(ret < 0)) |
1004 | return ret; | |
1005 | else if (ret) | |
1006 | poll_flags |= BLK_POLL_ONESHOT; | |
1007 | ||
1008 | /* iopoll may have completed current req */ | |
1009 | if (!rq_list_empty(iob.req_list) || | |
1010 | READ_ONCE(req->iopoll_completed)) | |
1011 | break; | |
1012 | } | |
1013 | ||
1014 | if (!rq_list_empty(iob.req_list)) | |
1015 | iob.complete(&iob); | |
1016 | else if (!pos) | |
1017 | return 0; | |
1018 | ||
1019 | prev = start; | |
1020 | wq_list_for_each_resume(pos, prev) { | |
1021 | struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); | |
1022 | ||
1023 | /* order with io_complete_rw_iopoll(), e.g. ->result updates */ | |
1024 | if (!smp_load_acquire(&req->iopoll_completed)) | |
1025 | break; | |
1026 | nr_events++; | |
1027 | if (unlikely(req->flags & REQ_F_CQE_SKIP)) | |
1028 | continue; | |
1029 | ||
1030 | req->cqe.flags = io_put_kbuf(req, 0); | |
1031 | __io_fill_cqe_req(req->ctx, req); | |
1032 | } | |
1033 | ||
1034 | if (unlikely(!nr_events)) | |
1035 | return 0; | |
1036 | ||
1037 | io_commit_cqring(ctx); | |
1038 | io_cqring_ev_posted_iopoll(ctx); | |
1039 | pos = start ? start->next : ctx->iopoll_list.first; | |
1040 | wq_list_cut(&ctx->iopoll_list, prev, start); | |
1041 | io_free_batch_list(ctx, pos); | |
1042 | return nr_events; | |
1043 | } |