Commit | Line | Data |
---|---|---|
f3b44f92 JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/kernel.h> | |
3 | #include <linux/errno.h> | |
4 | #include <linux/fs.h> | |
5 | #include <linux/file.h> | |
6 | #include <linux/blk-mq.h> | |
7 | #include <linux/mm.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/fsnotify.h> | |
10 | #include <linux/poll.h> | |
11 | #include <linux/nospec.h> | |
12 | #include <linux/compat.h> | |
13 | #include <linux/io_uring.h> | |
14 | ||
15 | #include <uapi/linux/io_uring.h> | |
16 | ||
f3b44f92 JA |
17 | #include "io_uring.h" |
18 | #include "opdef.h" | |
19 | #include "kbuf.h" | |
20 | #include "rsrc.h" | |
21 | #include "rw.h" | |
22 | ||
23 | struct io_rw { | |
24 | /* NOTE: kiocb has the file as the first member, so don't do it here */ | |
25 | struct kiocb kiocb; | |
26 | u64 addr; | |
27 | u32 len; | |
28 | rwf_t flags; | |
29 | }; | |
30 | ||
31 | static inline bool io_file_supports_nowait(struct io_kiocb *req) | |
32 | { | |
33 | return req->flags & REQ_F_SUPPORT_NOWAIT; | |
34 | } | |
35 | ||
36 | int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
37 | { | |
f2ccb5ae | 38 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
39 | unsigned ioprio; |
40 | int ret; | |
41 | ||
42 | rw->kiocb.ki_pos = READ_ONCE(sqe->off); | |
43 | /* used for fixed read/write too - just read unconditionally */ | |
44 | req->buf_index = READ_ONCE(sqe->buf_index); | |
45 | ||
46 | if (req->opcode == IORING_OP_READ_FIXED || | |
47 | req->opcode == IORING_OP_WRITE_FIXED) { | |
48 | struct io_ring_ctx *ctx = req->ctx; | |
49 | u16 index; | |
50 | ||
51 | if (unlikely(req->buf_index >= ctx->nr_user_bufs)) | |
52 | return -EFAULT; | |
53 | index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); | |
54 | req->imu = ctx->user_bufs[index]; | |
55 | io_req_set_rsrc_node(req, ctx, 0); | |
56 | } | |
57 | ||
58 | ioprio = READ_ONCE(sqe->ioprio); | |
59 | if (ioprio) { | |
60 | ret = ioprio_check_cap(ioprio); | |
61 | if (ret) | |
62 | return ret; | |
63 | ||
64 | rw->kiocb.ki_ioprio = ioprio; | |
65 | } else { | |
66 | rw->kiocb.ki_ioprio = get_current_ioprio(); | |
67 | } | |
68 | ||
69 | rw->addr = READ_ONCE(sqe->addr); | |
70 | rw->len = READ_ONCE(sqe->len); | |
71 | rw->flags = READ_ONCE(sqe->rw_flags); | |
72 | return 0; | |
73 | } | |
74 | ||
75 | void io_readv_writev_cleanup(struct io_kiocb *req) | |
76 | { | |
77 | struct io_async_rw *io = req->async_data; | |
78 | ||
79 | kfree(io->free_iovec); | |
80 | } | |
81 | ||
82 | static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) | |
83 | { | |
84 | switch (ret) { | |
85 | case -EIOCBQUEUED: | |
86 | break; | |
87 | case -ERESTARTSYS: | |
88 | case -ERESTARTNOINTR: | |
89 | case -ERESTARTNOHAND: | |
90 | case -ERESTART_RESTARTBLOCK: | |
91 | /* | |
92 | * We can't just restart the syscall, since previously | |
93 | * submitted sqes may already be in progress. Just fail this | |
94 | * IO with EINTR. | |
95 | */ | |
96 | ret = -EINTR; | |
97 | fallthrough; | |
98 | default: | |
99 | kiocb->ki_complete(kiocb, ret); | |
100 | } | |
101 | } | |
102 | ||
103 | static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) | |
104 | { | |
f2ccb5ae | 105 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
106 | |
107 | if (rw->kiocb.ki_pos != -1) | |
108 | return &rw->kiocb.ki_pos; | |
109 | ||
110 | if (!(req->file->f_mode & FMODE_STREAM)) { | |
111 | req->flags |= REQ_F_CUR_POS; | |
112 | rw->kiocb.ki_pos = req->file->f_pos; | |
113 | return &rw->kiocb.ki_pos; | |
114 | } | |
115 | ||
116 | rw->kiocb.ki_pos = 0; | |
117 | return NULL; | |
118 | } | |
119 | ||
120 | static void io_req_task_queue_reissue(struct io_kiocb *req) | |
121 | { | |
122 | req->io_task_work.func = io_queue_iowq; | |
123 | io_req_task_work_add(req); | |
124 | } | |
125 | ||
126 | #ifdef CONFIG_BLOCK | |
127 | static bool io_resubmit_prep(struct io_kiocb *req) | |
128 | { | |
129 | struct io_async_rw *io = req->async_data; | |
130 | ||
131 | if (!req_has_async_data(req)) | |
132 | return !io_req_prep_async(req); | |
133 | iov_iter_restore(&io->s.iter, &io->s.iter_state); | |
134 | return true; | |
135 | } | |
136 | ||
137 | static bool io_rw_should_reissue(struct io_kiocb *req) | |
138 | { | |
139 | umode_t mode = file_inode(req->file)->i_mode; | |
140 | struct io_ring_ctx *ctx = req->ctx; | |
141 | ||
142 | if (!S_ISBLK(mode) && !S_ISREG(mode)) | |
143 | return false; | |
144 | if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && | |
145 | !(ctx->flags & IORING_SETUP_IOPOLL))) | |
146 | return false; | |
147 | /* | |
148 | * If ref is dying, we might be running poll reap from the exit work. | |
149 | * Don't attempt to reissue from that path, just let it fail with | |
150 | * -EAGAIN. | |
151 | */ | |
152 | if (percpu_ref_is_dying(&ctx->refs)) | |
153 | return false; | |
154 | /* | |
155 | * Play it safe and assume not safe to re-import and reissue if we're | |
156 | * not in the original thread group (or in task context). | |
157 | */ | |
158 | if (!same_thread_group(req->task, current) || !in_task()) | |
159 | return false; | |
160 | return true; | |
161 | } | |
162 | #else | |
163 | static bool io_resubmit_prep(struct io_kiocb *req) | |
164 | { | |
165 | return false; | |
166 | } | |
167 | static bool io_rw_should_reissue(struct io_kiocb *req) | |
168 | { | |
169 | return false; | |
170 | } | |
171 | #endif | |
172 | ||
173 | static void kiocb_end_write(struct io_kiocb *req) | |
174 | { | |
175 | /* | |
176 | * Tell lockdep we inherited freeze protection from submission | |
177 | * thread. | |
178 | */ | |
179 | if (req->flags & REQ_F_ISREG) { | |
180 | struct super_block *sb = file_inode(req->file)->i_sb; | |
181 | ||
182 | __sb_writers_acquired(sb, SB_FREEZE_WRITE); | |
183 | sb_end_write(sb); | |
184 | } | |
185 | } | |
186 | ||
187 | static bool __io_complete_rw_common(struct io_kiocb *req, long res) | |
188 | { | |
f2ccb5ae | 189 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
190 | |
191 | if (rw->kiocb.ki_flags & IOCB_WRITE) { | |
192 | kiocb_end_write(req); | |
193 | fsnotify_modify(req->file); | |
194 | } else { | |
195 | fsnotify_access(req->file); | |
196 | } | |
197 | if (unlikely(res != req->cqe.res)) { | |
198 | if ((res == -EAGAIN || res == -EOPNOTSUPP) && | |
199 | io_rw_should_reissue(req)) { | |
200 | req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; | |
201 | return true; | |
202 | } | |
203 | req_set_fail(req); | |
204 | req->cqe.res = res; | |
205 | } | |
206 | return false; | |
207 | } | |
208 | ||
62bb0647 | 209 | static inline int io_fixup_rw_res(struct io_kiocb *req, long res) |
4d9cb92c PB |
210 | { |
211 | struct io_async_rw *io = req->async_data; | |
212 | ||
213 | /* add previously done IO, if any */ | |
214 | if (req_has_async_data(req) && io->bytes_done > 0) { | |
215 | if (res < 0) | |
216 | res = io->bytes_done; | |
217 | else | |
218 | res += io->bytes_done; | |
219 | } | |
220 | return res; | |
221 | } | |
222 | ||
f3b44f92 JA |
223 | static void io_complete_rw(struct kiocb *kiocb, long res) |
224 | { | |
225 | struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); | |
226 | struct io_kiocb *req = cmd_to_io_kiocb(rw); | |
227 | ||
228 | if (__io_complete_rw_common(req, res)) | |
229 | return; | |
4d9cb92c | 230 | io_req_set_res(req, io_fixup_rw_res(req, res), 0); |
f3b44f92 | 231 | req->io_task_work.func = io_req_task_complete; |
ed5ccb3b | 232 | io_req_task_work_add(req); |
f3b44f92 JA |
233 | } |
234 | ||
235 | static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) | |
236 | { | |
237 | struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); | |
238 | struct io_kiocb *req = cmd_to_io_kiocb(rw); | |
239 | ||
240 | if (kiocb->ki_flags & IOCB_WRITE) | |
241 | kiocb_end_write(req); | |
242 | if (unlikely(res != req->cqe.res)) { | |
243 | if (res == -EAGAIN && io_rw_should_reissue(req)) { | |
244 | req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; | |
245 | return; | |
246 | } | |
247 | req->cqe.res = res; | |
248 | } | |
249 | ||
250 | /* order with io_iopoll_complete() checking ->iopoll_completed */ | |
251 | smp_store_release(&req->iopoll_completed, 1); | |
252 | } | |
253 | ||
df9830d8 | 254 | static int kiocb_done(struct io_kiocb *req, ssize_t ret, |
f3b44f92 JA |
255 | unsigned int issue_flags) |
256 | { | |
f2ccb5ae | 257 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
4d9cb92c | 258 | unsigned final_ret = io_fixup_rw_res(req, ret); |
f3b44f92 JA |
259 | |
260 | if (req->flags & REQ_F_CUR_POS) | |
261 | req->file->f_pos = rw->kiocb.ki_pos; | |
df9830d8 PB |
262 | if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) { |
263 | if (!__io_complete_rw_common(req, ret)) { | |
4d9cb92c | 264 | io_req_set_res(req, final_ret, |
df9830d8 PB |
265 | io_put_kbuf(req, issue_flags)); |
266 | return IOU_OK; | |
267 | } | |
268 | } else { | |
f3b44f92 | 269 | io_rw_done(&rw->kiocb, ret); |
df9830d8 | 270 | } |
f3b44f92 JA |
271 | |
272 | if (req->flags & REQ_F_REISSUE) { | |
273 | req->flags &= ~REQ_F_REISSUE; | |
274 | if (io_resubmit_prep(req)) | |
275 | io_req_task_queue_reissue(req); | |
276 | else | |
4d9cb92c | 277 | io_req_task_queue_fail(req, final_ret); |
f3b44f92 | 278 | } |
df9830d8 | 279 | return IOU_ISSUE_SKIP_COMPLETE; |
f3b44f92 JA |
280 | } |
281 | ||
f3b44f92 JA |
282 | #ifdef CONFIG_COMPAT |
283 | static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, | |
284 | unsigned int issue_flags) | |
285 | { | |
f2ccb5ae | 286 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
287 | struct compat_iovec __user *uiov; |
288 | compat_ssize_t clen; | |
289 | void __user *buf; | |
290 | size_t len; | |
291 | ||
292 | uiov = u64_to_user_ptr(rw->addr); | |
293 | if (!access_ok(uiov, sizeof(*uiov))) | |
294 | return -EFAULT; | |
295 | if (__get_user(clen, &uiov->iov_len)) | |
296 | return -EFAULT; | |
297 | if (clen < 0) | |
298 | return -EINVAL; | |
299 | ||
300 | len = clen; | |
301 | buf = io_buffer_select(req, &len, issue_flags); | |
302 | if (!buf) | |
303 | return -ENOBUFS; | |
304 | rw->addr = (unsigned long) buf; | |
305 | iov[0].iov_base = buf; | |
306 | rw->len = iov[0].iov_len = (compat_size_t) len; | |
307 | return 0; | |
308 | } | |
309 | #endif | |
310 | ||
311 | static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, | |
312 | unsigned int issue_flags) | |
313 | { | |
f2ccb5ae | 314 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
315 | struct iovec __user *uiov = u64_to_user_ptr(rw->addr); |
316 | void __user *buf; | |
317 | ssize_t len; | |
318 | ||
319 | if (copy_from_user(iov, uiov, sizeof(*uiov))) | |
320 | return -EFAULT; | |
321 | ||
322 | len = iov[0].iov_len; | |
323 | if (len < 0) | |
324 | return -EINVAL; | |
325 | buf = io_buffer_select(req, &len, issue_flags); | |
326 | if (!buf) | |
327 | return -ENOBUFS; | |
328 | rw->addr = (unsigned long) buf; | |
329 | iov[0].iov_base = buf; | |
330 | rw->len = iov[0].iov_len = len; | |
331 | return 0; | |
332 | } | |
333 | ||
334 | static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, | |
335 | unsigned int issue_flags) | |
336 | { | |
f2ccb5ae | 337 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
338 | |
339 | if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) { | |
340 | iov[0].iov_base = u64_to_user_ptr(rw->addr); | |
341 | iov[0].iov_len = rw->len; | |
342 | return 0; | |
343 | } | |
344 | if (rw->len != 1) | |
345 | return -EINVAL; | |
346 | ||
347 | #ifdef CONFIG_COMPAT | |
348 | if (req->ctx->compat) | |
349 | return io_compat_import(req, iov, issue_flags); | |
350 | #endif | |
351 | ||
352 | return __io_iov_buffer_select(req, iov, issue_flags); | |
353 | } | |
354 | ||
355 | static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req, | |
356 | struct io_rw_state *s, | |
357 | unsigned int issue_flags) | |
358 | { | |
f2ccb5ae | 359 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
360 | struct iov_iter *iter = &s->iter; |
361 | u8 opcode = req->opcode; | |
362 | struct iovec *iovec; | |
363 | void __user *buf; | |
364 | size_t sqe_len; | |
365 | ssize_t ret; | |
366 | ||
367 | if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { | |
f337a84d | 368 | ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len); |
f3b44f92 JA |
369 | if (ret) |
370 | return ERR_PTR(ret); | |
371 | return NULL; | |
372 | } | |
373 | ||
374 | buf = u64_to_user_ptr(rw->addr); | |
375 | sqe_len = rw->len; | |
376 | ||
377 | if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { | |
378 | if (io_do_buffer_select(req)) { | |
379 | buf = io_buffer_select(req, &sqe_len, issue_flags); | |
380 | if (!buf) | |
381 | return ERR_PTR(-ENOBUFS); | |
382 | rw->addr = (unsigned long) buf; | |
383 | rw->len = sqe_len; | |
384 | } | |
385 | ||
386 | ret = import_single_range(ddir, buf, sqe_len, s->fast_iov, iter); | |
387 | if (ret) | |
388 | return ERR_PTR(ret); | |
389 | return NULL; | |
390 | } | |
391 | ||
392 | iovec = s->fast_iov; | |
393 | if (req->flags & REQ_F_BUFFER_SELECT) { | |
394 | ret = io_iov_buffer_select(req, iovec, issue_flags); | |
395 | if (ret) | |
396 | return ERR_PTR(ret); | |
397 | iov_iter_init(iter, ddir, iovec, 1, iovec->iov_len); | |
398 | return NULL; | |
399 | } | |
400 | ||
401 | ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter, | |
402 | req->ctx->compat); | |
403 | if (unlikely(ret < 0)) | |
404 | return ERR_PTR(ret); | |
405 | return iovec; | |
406 | } | |
407 | ||
408 | static inline int io_import_iovec(int rw, struct io_kiocb *req, | |
409 | struct iovec **iovec, struct io_rw_state *s, | |
410 | unsigned int issue_flags) | |
411 | { | |
412 | *iovec = __io_import_iovec(rw, req, s, issue_flags); | |
413 | if (unlikely(IS_ERR(*iovec))) | |
414 | return PTR_ERR(*iovec); | |
415 | ||
416 | iov_iter_save_state(&s->iter, &s->iter_state); | |
417 | return 0; | |
418 | } | |
419 | ||
420 | static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) | |
421 | { | |
422 | return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; | |
423 | } | |
424 | ||
425 | /* | |
426 | * For files that don't have ->read_iter() and ->write_iter(), handle them | |
427 | * by looping over ->read() or ->write() manually. | |
428 | */ | |
429 | static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter) | |
430 | { | |
431 | struct kiocb *kiocb = &rw->kiocb; | |
432 | struct file *file = kiocb->ki_filp; | |
433 | ssize_t ret = 0; | |
434 | loff_t *ppos; | |
435 | ||
436 | /* | |
437 | * Don't support polled IO through this interface, and we can't | |
438 | * support non-blocking either. For the latter, this just causes | |
439 | * the kiocb to be handled from an async context. | |
440 | */ | |
441 | if (kiocb->ki_flags & IOCB_HIPRI) | |
442 | return -EOPNOTSUPP; | |
443 | if ((kiocb->ki_flags & IOCB_NOWAIT) && | |
444 | !(kiocb->ki_filp->f_flags & O_NONBLOCK)) | |
445 | return -EAGAIN; | |
446 | ||
447 | ppos = io_kiocb_ppos(kiocb); | |
448 | ||
449 | while (iov_iter_count(iter)) { | |
450 | struct iovec iovec; | |
451 | ssize_t nr; | |
452 | ||
453 | if (!iov_iter_is_bvec(iter)) { | |
454 | iovec = iov_iter_iovec(iter); | |
455 | } else { | |
456 | iovec.iov_base = u64_to_user_ptr(rw->addr); | |
457 | iovec.iov_len = rw->len; | |
458 | } | |
459 | ||
460 | if (ddir == READ) { | |
461 | nr = file->f_op->read(file, iovec.iov_base, | |
462 | iovec.iov_len, ppos); | |
463 | } else { | |
464 | nr = file->f_op->write(file, iovec.iov_base, | |
465 | iovec.iov_len, ppos); | |
466 | } | |
467 | ||
468 | if (nr < 0) { | |
469 | if (!ret) | |
470 | ret = nr; | |
471 | break; | |
472 | } | |
473 | ret += nr; | |
474 | if (!iov_iter_is_bvec(iter)) { | |
475 | iov_iter_advance(iter, nr); | |
476 | } else { | |
477 | rw->addr += nr; | |
478 | rw->len -= nr; | |
479 | if (!rw->len) | |
480 | break; | |
481 | } | |
482 | if (nr != iovec.iov_len) | |
483 | break; | |
484 | } | |
485 | ||
486 | return ret; | |
487 | } | |
488 | ||
489 | static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, | |
490 | const struct iovec *fast_iov, struct iov_iter *iter) | |
491 | { | |
492 | struct io_async_rw *io = req->async_data; | |
493 | ||
494 | memcpy(&io->s.iter, iter, sizeof(*iter)); | |
495 | io->free_iovec = iovec; | |
496 | io->bytes_done = 0; | |
497 | /* can only be fixed buffers, no need to do anything */ | |
498 | if (iov_iter_is_bvec(iter)) | |
499 | return; | |
500 | if (!iovec) { | |
501 | unsigned iov_off = 0; | |
502 | ||
503 | io->s.iter.iov = io->s.fast_iov; | |
504 | if (iter->iov != fast_iov) { | |
505 | iov_off = iter->iov - fast_iov; | |
506 | io->s.iter.iov += iov_off; | |
507 | } | |
508 | if (io->s.fast_iov != fast_iov) | |
509 | memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off, | |
510 | sizeof(struct iovec) * iter->nr_segs); | |
511 | } else { | |
512 | req->flags |= REQ_F_NEED_CLEANUP; | |
513 | } | |
514 | } | |
515 | ||
516 | static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, | |
517 | struct io_rw_state *s, bool force) | |
518 | { | |
519 | if (!force && !io_op_defs[req->opcode].prep_async) | |
520 | return 0; | |
521 | if (!req_has_async_data(req)) { | |
522 | struct io_async_rw *iorw; | |
523 | ||
524 | if (io_alloc_async_data(req)) { | |
525 | kfree(iovec); | |
526 | return -ENOMEM; | |
527 | } | |
528 | ||
529 | io_req_map_rw(req, iovec, s->fast_iov, &s->iter); | |
530 | iorw = req->async_data; | |
531 | /* we've copied and mapped the iter, ensure state is saved */ | |
532 | iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state); | |
533 | } | |
534 | return 0; | |
535 | } | |
536 | ||
537 | static inline int io_rw_prep_async(struct io_kiocb *req, int rw) | |
538 | { | |
539 | struct io_async_rw *iorw = req->async_data; | |
540 | struct iovec *iov; | |
541 | int ret; | |
542 | ||
543 | /* submission path, ->uring_lock should already be taken */ | |
544 | ret = io_import_iovec(rw, req, &iov, &iorw->s, 0); | |
545 | if (unlikely(ret < 0)) | |
546 | return ret; | |
547 | ||
548 | iorw->bytes_done = 0; | |
549 | iorw->free_iovec = iov; | |
550 | if (iov) | |
551 | req->flags |= REQ_F_NEED_CLEANUP; | |
552 | return 0; | |
553 | } | |
554 | ||
555 | int io_readv_prep_async(struct io_kiocb *req) | |
556 | { | |
557 | return io_rw_prep_async(req, READ); | |
558 | } | |
559 | ||
560 | int io_writev_prep_async(struct io_kiocb *req) | |
561 | { | |
562 | return io_rw_prep_async(req, WRITE); | |
563 | } | |
564 | ||
565 | /* | |
566 | * This is our waitqueue callback handler, registered through __folio_lock_async() | |
567 | * when we initially tried to do the IO with the iocb armed our waitqueue. | |
568 | * This gets called when the page is unlocked, and we generally expect that to | |
569 | * happen when the page IO is completed and the page is now uptodate. This will | |
570 | * queue a task_work based retry of the operation, attempting to copy the data | |
571 | * again. If the latter fails because the page was NOT uptodate, then we will | |
572 | * do a thread based blocking retry of the operation. That's the unexpected | |
573 | * slow path. | |
574 | */ | |
575 | static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, | |
576 | int sync, void *arg) | |
577 | { | |
578 | struct wait_page_queue *wpq; | |
579 | struct io_kiocb *req = wait->private; | |
f2ccb5ae | 580 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
581 | struct wait_page_key *key = arg; |
582 | ||
583 | wpq = container_of(wait, struct wait_page_queue, wait); | |
584 | ||
585 | if (!wake_page_match(wpq, key)) | |
586 | return 0; | |
587 | ||
588 | rw->kiocb.ki_flags &= ~IOCB_WAITQ; | |
589 | list_del_init(&wait->entry); | |
590 | io_req_task_queue(req); | |
591 | return 1; | |
592 | } | |
593 | ||
594 | /* | |
595 | * This controls whether a given IO request should be armed for async page | |
596 | * based retry. If we return false here, the request is handed to the async | |
597 | * worker threads for retry. If we're doing buffered reads on a regular file, | |
598 | * we prepare a private wait_page_queue entry and retry the operation. This | |
599 | * will either succeed because the page is now uptodate and unlocked, or it | |
600 | * will register a callback when the page is unlocked at IO completion. Through | |
601 | * that callback, io_uring uses task_work to setup a retry of the operation. | |
602 | * That retry will attempt the buffered read again. The retry will generally | |
603 | * succeed, or in rare cases where it fails, we then fall back to using the | |
604 | * async worker threads for a blocking retry. | |
605 | */ | |
606 | static bool io_rw_should_retry(struct io_kiocb *req) | |
607 | { | |
608 | struct io_async_rw *io = req->async_data; | |
609 | struct wait_page_queue *wait = &io->wpq; | |
f2ccb5ae | 610 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
611 | struct kiocb *kiocb = &rw->kiocb; |
612 | ||
613 | /* never retry for NOWAIT, we just complete with -EAGAIN */ | |
614 | if (req->flags & REQ_F_NOWAIT) | |
615 | return false; | |
616 | ||
617 | /* Only for buffered IO */ | |
618 | if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) | |
619 | return false; | |
620 | ||
621 | /* | |
622 | * just use poll if we can, and don't attempt if the fs doesn't | |
623 | * support callback based unlocks | |
624 | */ | |
625 | if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) | |
626 | return false; | |
627 | ||
628 | wait->wait.func = io_async_buf_func; | |
629 | wait->wait.private = req; | |
630 | wait->wait.flags = 0; | |
631 | INIT_LIST_HEAD(&wait->wait.entry); | |
632 | kiocb->ki_flags |= IOCB_WAITQ; | |
633 | kiocb->ki_flags &= ~IOCB_NOWAIT; | |
634 | kiocb->ki_waitq = wait; | |
635 | return true; | |
636 | } | |
637 | ||
638 | static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter) | |
639 | { | |
640 | struct file *file = rw->kiocb.ki_filp; | |
641 | ||
642 | if (likely(file->f_op->read_iter)) | |
643 | return call_read_iter(file, &rw->kiocb, iter); | |
644 | else if (file->f_op->read) | |
645 | return loop_rw_iter(READ, rw, iter); | |
646 | else | |
647 | return -EINVAL; | |
648 | } | |
649 | ||
4e17aaab | 650 | static bool need_complete_io(struct io_kiocb *req) |
f3b44f92 JA |
651 | { |
652 | return req->flags & REQ_F_ISREG || | |
653 | S_ISBLK(file_inode(req->file)->i_mode); | |
654 | } | |
655 | ||
f3b44f92 JA |
656 | static int io_rw_init_file(struct io_kiocb *req, fmode_t mode) |
657 | { | |
f2ccb5ae | 658 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
659 | struct kiocb *kiocb = &rw->kiocb; |
660 | struct io_ring_ctx *ctx = req->ctx; | |
661 | struct file *file = req->file; | |
662 | int ret; | |
663 | ||
664 | if (unlikely(!file || !(file->f_mode & mode))) | |
665 | return -EBADF; | |
666 | ||
667 | if (!io_req_ffs_set(req)) | |
668 | req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT; | |
669 | ||
5264406c | 670 | kiocb->ki_flags = file->f_iocb_flags; |
f3b44f92 JA |
671 | ret = kiocb_set_rw_flags(kiocb, rw->flags); |
672 | if (unlikely(ret)) | |
673 | return ret; | |
674 | ||
675 | /* | |
676 | * If the file is marked O_NONBLOCK, still allow retry for it if it | |
677 | * supports async. Otherwise it's impossible to use O_NONBLOCK files | |
678 | * reliably. If not, or it IOCB_NOWAIT is set, don't retry. | |
679 | */ | |
680 | if ((kiocb->ki_flags & IOCB_NOWAIT) || | |
681 | ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req))) | |
682 | req->flags |= REQ_F_NOWAIT; | |
683 | ||
684 | if (ctx->flags & IORING_SETUP_IOPOLL) { | |
685 | if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) | |
686 | return -EOPNOTSUPP; | |
687 | ||
688 | kiocb->private = NULL; | |
689 | kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE; | |
690 | kiocb->ki_complete = io_complete_rw_iopoll; | |
691 | req->iopoll_completed = 0; | |
692 | } else { | |
693 | if (kiocb->ki_flags & IOCB_HIPRI) | |
694 | return -EINVAL; | |
695 | kiocb->ki_complete = io_complete_rw; | |
696 | } | |
697 | ||
698 | return 0; | |
699 | } | |
700 | ||
701 | int io_read(struct io_kiocb *req, unsigned int issue_flags) | |
702 | { | |
f2ccb5ae | 703 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
704 | struct io_rw_state __s, *s = &__s; |
705 | struct iovec *iovec; | |
706 | struct kiocb *kiocb = &rw->kiocb; | |
707 | bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; | |
708 | struct io_async_rw *io; | |
709 | ssize_t ret, ret2; | |
710 | loff_t *ppos; | |
711 | ||
712 | if (!req_has_async_data(req)) { | |
713 | ret = io_import_iovec(READ, req, &iovec, s, issue_flags); | |
714 | if (unlikely(ret < 0)) | |
715 | return ret; | |
716 | } else { | |
717 | io = req->async_data; | |
718 | s = &io->s; | |
719 | ||
720 | /* | |
721 | * Safe and required to re-import if we're using provided | |
722 | * buffers, as we dropped the selected one before retry. | |
723 | */ | |
724 | if (io_do_buffer_select(req)) { | |
725 | ret = io_import_iovec(READ, req, &iovec, s, issue_flags); | |
726 | if (unlikely(ret < 0)) | |
727 | return ret; | |
728 | } | |
729 | ||
730 | /* | |
731 | * We come here from an earlier attempt, restore our state to | |
732 | * match in case it doesn't. It's cheap enough that we don't | |
733 | * need to make this conditional. | |
734 | */ | |
735 | iov_iter_restore(&s->iter, &s->iter_state); | |
736 | iovec = NULL; | |
737 | } | |
738 | ret = io_rw_init_file(req, FMODE_READ); | |
739 | if (unlikely(ret)) { | |
740 | kfree(iovec); | |
741 | return ret; | |
742 | } | |
743 | req->cqe.res = iov_iter_count(&s->iter); | |
744 | ||
745 | if (force_nonblock) { | |
746 | /* If the file doesn't support async, just async punt */ | |
747 | if (unlikely(!io_file_supports_nowait(req))) { | |
748 | ret = io_setup_async_rw(req, iovec, s, true); | |
749 | return ret ?: -EAGAIN; | |
750 | } | |
751 | kiocb->ki_flags |= IOCB_NOWAIT; | |
752 | } else { | |
753 | /* Ensure we clear previously set non-block flag */ | |
754 | kiocb->ki_flags &= ~IOCB_NOWAIT; | |
755 | } | |
756 | ||
757 | ppos = io_kiocb_update_pos(req); | |
758 | ||
759 | ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); | |
760 | if (unlikely(ret)) { | |
761 | kfree(iovec); | |
762 | return ret; | |
763 | } | |
764 | ||
765 | ret = io_iter_do_read(rw, &s->iter); | |
766 | ||
767 | if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { | |
768 | req->flags &= ~REQ_F_REISSUE; | |
769 | /* if we can poll, just do that */ | |
770 | if (req->opcode == IORING_OP_READ && file_can_poll(req->file)) | |
771 | return -EAGAIN; | |
772 | /* IOPOLL retry should happen for io-wq threads */ | |
773 | if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) | |
774 | goto done; | |
775 | /* no retry on NONBLOCK nor RWF_NOWAIT */ | |
776 | if (req->flags & REQ_F_NOWAIT) | |
777 | goto done; | |
778 | ret = 0; | |
779 | } else if (ret == -EIOCBQUEUED) { | |
df9830d8 PB |
780 | if (iovec) |
781 | kfree(iovec); | |
782 | return IOU_ISSUE_SKIP_COMPLETE; | |
f3b44f92 | 783 | } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || |
4e17aaab | 784 | (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) { |
f3b44f92 JA |
785 | /* read all, failed, already did sync or don't want to retry */ |
786 | goto done; | |
787 | } | |
788 | ||
789 | /* | |
790 | * Don't depend on the iter state matching what was consumed, or being | |
791 | * untouched in case of error. Restore it and we'll advance it | |
792 | * manually if we need to. | |
793 | */ | |
794 | iov_iter_restore(&s->iter, &s->iter_state); | |
795 | ||
796 | ret2 = io_setup_async_rw(req, iovec, s, true); | |
797 | if (ret2) | |
798 | return ret2; | |
799 | ||
800 | iovec = NULL; | |
801 | io = req->async_data; | |
802 | s = &io->s; | |
803 | /* | |
804 | * Now use our persistent iterator and state, if we aren't already. | |
805 | * We've restored and mapped the iter to match. | |
806 | */ | |
807 | ||
808 | do { | |
809 | /* | |
810 | * We end up here because of a partial read, either from | |
811 | * above or inside this loop. Advance the iter by the bytes | |
812 | * that were consumed. | |
813 | */ | |
814 | iov_iter_advance(&s->iter, ret); | |
815 | if (!iov_iter_count(&s->iter)) | |
816 | break; | |
817 | io->bytes_done += ret; | |
818 | iov_iter_save_state(&s->iter, &s->iter_state); | |
819 | ||
820 | /* if we can retry, do so with the callbacks armed */ | |
821 | if (!io_rw_should_retry(req)) { | |
822 | kiocb->ki_flags &= ~IOCB_WAITQ; | |
823 | return -EAGAIN; | |
824 | } | |
825 | ||
826 | /* | |
827 | * Now retry read with the IOCB_WAITQ parts set in the iocb. If | |
828 | * we get -EIOCBQUEUED, then we'll get a notification when the | |
829 | * desired page gets unlocked. We can also get a partial read | |
830 | * here, and if we do, then just retry at the new offset. | |
831 | */ | |
832 | ret = io_iter_do_read(rw, &s->iter); | |
833 | if (ret == -EIOCBQUEUED) | |
834 | return IOU_ISSUE_SKIP_COMPLETE; | |
835 | /* we got some bytes, but not all. retry. */ | |
836 | kiocb->ki_flags &= ~IOCB_WAITQ; | |
837 | iov_iter_restore(&s->iter, &s->iter_state); | |
838 | } while (ret > 0); | |
839 | done: | |
f3b44f92 JA |
840 | /* it's faster to check here then delegate to kfree */ |
841 | if (iovec) | |
842 | kfree(iovec); | |
df9830d8 | 843 | return kiocb_done(req, ret, issue_flags); |
f3b44f92 JA |
844 | } |
845 | ||
846 | int io_write(struct io_kiocb *req, unsigned int issue_flags) | |
847 | { | |
f2ccb5ae | 848 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
849 | struct io_rw_state __s, *s = &__s; |
850 | struct iovec *iovec; | |
851 | struct kiocb *kiocb = &rw->kiocb; | |
852 | bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; | |
853 | ssize_t ret, ret2; | |
854 | loff_t *ppos; | |
855 | ||
856 | if (!req_has_async_data(req)) { | |
857 | ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags); | |
858 | if (unlikely(ret < 0)) | |
859 | return ret; | |
860 | } else { | |
861 | struct io_async_rw *io = req->async_data; | |
862 | ||
863 | s = &io->s; | |
864 | iov_iter_restore(&s->iter, &s->iter_state); | |
865 | iovec = NULL; | |
866 | } | |
867 | ret = io_rw_init_file(req, FMODE_WRITE); | |
868 | if (unlikely(ret)) { | |
869 | kfree(iovec); | |
870 | return ret; | |
871 | } | |
872 | req->cqe.res = iov_iter_count(&s->iter); | |
873 | ||
874 | if (force_nonblock) { | |
875 | /* If the file doesn't support async, just async punt */ | |
876 | if (unlikely(!io_file_supports_nowait(req))) | |
877 | goto copy_iov; | |
878 | ||
4e17aaab SR |
879 | /* File path supports NOWAIT for non-direct_IO only for block devices. */ |
880 | if (!(kiocb->ki_flags & IOCB_DIRECT) && | |
881 | !(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) && | |
882 | (req->flags & REQ_F_ISREG)) | |
f3b44f92 JA |
883 | goto copy_iov; |
884 | ||
885 | kiocb->ki_flags |= IOCB_NOWAIT; | |
886 | } else { | |
887 | /* Ensure we clear previously set non-block flag */ | |
888 | kiocb->ki_flags &= ~IOCB_NOWAIT; | |
889 | } | |
890 | ||
891 | ppos = io_kiocb_update_pos(req); | |
892 | ||
893 | ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); | |
df9830d8 PB |
894 | if (unlikely(ret)) { |
895 | kfree(iovec); | |
896 | return ret; | |
897 | } | |
f3b44f92 JA |
898 | |
899 | /* | |
900 | * Open-code file_start_write here to grab freeze protection, | |
901 | * which will be released by another thread in | |
902 | * io_complete_rw(). Fool lockdep by telling it the lock got | |
903 | * released so that it doesn't complain about the held lock when | |
904 | * we return to userspace. | |
905 | */ | |
906 | if (req->flags & REQ_F_ISREG) { | |
907 | sb_start_write(file_inode(req->file)->i_sb); | |
908 | __sb_writers_release(file_inode(req->file)->i_sb, | |
909 | SB_FREEZE_WRITE); | |
910 | } | |
911 | kiocb->ki_flags |= IOCB_WRITE; | |
912 | ||
913 | if (likely(req->file->f_op->write_iter)) | |
914 | ret2 = call_write_iter(req->file, kiocb, &s->iter); | |
915 | else if (req->file->f_op->write) | |
916 | ret2 = loop_rw_iter(WRITE, rw, &s->iter); | |
917 | else | |
918 | ret2 = -EINVAL; | |
919 | ||
920 | if (req->flags & REQ_F_REISSUE) { | |
921 | req->flags &= ~REQ_F_REISSUE; | |
922 | ret2 = -EAGAIN; | |
923 | } | |
924 | ||
925 | /* | |
926 | * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just | |
927 | * retry them without IOCB_NOWAIT. | |
928 | */ | |
929 | if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) | |
930 | ret2 = -EAGAIN; | |
931 | /* no retry on NONBLOCK nor RWF_NOWAIT */ | |
932 | if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) | |
933 | goto done; | |
934 | if (!force_nonblock || ret2 != -EAGAIN) { | |
935 | /* IOPOLL retry should happen for io-wq threads */ | |
936 | if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) | |
937 | goto copy_iov; | |
4e17aaab SR |
938 | |
939 | if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { | |
940 | struct io_async_rw *rw; | |
941 | ||
1c849b48 SR |
942 | trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, |
943 | req->cqe.res, ret2); | |
944 | ||
4e17aaab SR |
945 | /* This is a partial write. The file pos has already been |
946 | * updated, setup the async struct to complete the request | |
947 | * in the worker. Also update bytes_done to account for | |
948 | * the bytes already written. | |
949 | */ | |
950 | iov_iter_save_state(&s->iter, &s->iter_state); | |
951 | ret = io_setup_async_rw(req, iovec, s, true); | |
952 | ||
953 | rw = req->async_data; | |
954 | if (rw) | |
955 | rw->bytes_done += ret2; | |
956 | ||
e053aaf4 JA |
957 | if (kiocb->ki_flags & IOCB_WRITE) |
958 | kiocb_end_write(req); | |
4e17aaab SR |
959 | return ret ? ret : -EAGAIN; |
960 | } | |
f3b44f92 | 961 | done: |
df9830d8 | 962 | ret = kiocb_done(req, ret2, issue_flags); |
f3b44f92 JA |
963 | } else { |
964 | copy_iov: | |
965 | iov_iter_restore(&s->iter, &s->iter_state); | |
966 | ret = io_setup_async_rw(req, iovec, s, false); | |
e053aaf4 JA |
967 | if (!ret) { |
968 | if (kiocb->ki_flags & IOCB_WRITE) | |
969 | kiocb_end_write(req); | |
970 | return -EAGAIN; | |
971 | } | |
972 | return ret; | |
f3b44f92 | 973 | } |
f3b44f92 JA |
974 | /* it's reportedly faster than delegating the null check to kfree() */ |
975 | if (iovec) | |
976 | kfree(iovec); | |
977 | return ret; | |
978 | } | |
979 | ||
980 | static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) | |
981 | { | |
46929b08 | 982 | io_commit_cqring_flush(ctx); |
f3b44f92 JA |
983 | if (ctx->flags & IORING_SETUP_SQPOLL) |
984 | io_cqring_wake(ctx); | |
985 | } | |
986 | ||
987 | int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) | |
988 | { | |
989 | struct io_wq_work_node *pos, *start, *prev; | |
990 | unsigned int poll_flags = BLK_POLL_NOSLEEP; | |
991 | DEFINE_IO_COMP_BATCH(iob); | |
992 | int nr_events = 0; | |
993 | ||
994 | /* | |
995 | * Only spin for completions if we don't have multiple devices hanging | |
996 | * off our complete list. | |
997 | */ | |
998 | if (ctx->poll_multi_queue || force_nonspin) | |
999 | poll_flags |= BLK_POLL_ONESHOT; | |
1000 | ||
1001 | wq_list_for_each(pos, start, &ctx->iopoll_list) { | |
1002 | struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); | |
f2ccb5ae | 1003 | struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); |
f3b44f92 JA |
1004 | int ret; |
1005 | ||
1006 | /* | |
1007 | * Move completed and retryable entries to our local lists. | |
1008 | * If we find a request that requires polling, break out | |
1009 | * and complete those lists first, if we have entries there. | |
1010 | */ | |
1011 | if (READ_ONCE(req->iopoll_completed)) | |
1012 | break; | |
1013 | ||
1014 | ret = rw->kiocb.ki_filp->f_op->iopoll(&rw->kiocb, &iob, poll_flags); | |
1015 | if (unlikely(ret < 0)) | |
1016 | return ret; | |
1017 | else if (ret) | |
1018 | poll_flags |= BLK_POLL_ONESHOT; | |
1019 | ||
1020 | /* iopoll may have completed current req */ | |
1021 | if (!rq_list_empty(iob.req_list) || | |
1022 | READ_ONCE(req->iopoll_completed)) | |
1023 | break; | |
1024 | } | |
1025 | ||
1026 | if (!rq_list_empty(iob.req_list)) | |
1027 | iob.complete(&iob); | |
1028 | else if (!pos) | |
1029 | return 0; | |
1030 | ||
1031 | prev = start; | |
1032 | wq_list_for_each_resume(pos, prev) { | |
1033 | struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); | |
1034 | ||
1035 | /* order with io_complete_rw_iopoll(), e.g. ->result updates */ | |
1036 | if (!smp_load_acquire(&req->iopoll_completed)) | |
1037 | break; | |
1038 | nr_events++; | |
1039 | if (unlikely(req->flags & REQ_F_CQE_SKIP)) | |
1040 | continue; | |
1041 | ||
1042 | req->cqe.flags = io_put_kbuf(req, 0); | |
1043 | __io_fill_cqe_req(req->ctx, req); | |
1044 | } | |
1045 | ||
1046 | if (unlikely(!nr_events)) | |
1047 | return 0; | |
1048 | ||
1049 | io_commit_cqring(ctx); | |
1050 | io_cqring_ev_posted_iopoll(ctx); | |
1051 | pos = start ? start->next : ctx->iopoll_list.first; | |
1052 | wq_list_cut(&ctx->iopoll_list, prev, start); | |
1053 | io_free_batch_list(ctx, pos); | |
1054 | return nr_events; | |
1055 | } |