Commit | Line | Data |
---|---|---|
db074436 DW |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (C) 2010 Red Hat, Inc. | |
a6d3d495 | 4 | * Copyright (c) 2016-2021 Christoph Hellwig. |
db074436 DW |
5 | */ |
6 | #include <linux/module.h> | |
7 | #include <linux/compiler.h> | |
8 | #include <linux/fs.h> | |
489734ef | 9 | #include <linux/fscrypt.h> |
4bdcd1dd | 10 | #include <linux/pagemap.h> |
db074436 DW |
11 | #include <linux/iomap.h> |
12 | #include <linux/backing-dev.h> | |
13 | #include <linux/uio.h> | |
14 | #include <linux/task_io_accounting_ops.h> | |
60263d58 | 15 | #include "trace.h" |
db074436 DW |
16 | |
17 | #include "../internal.h" | |
18 | ||
19 | /* | |
20 | * Private flags for iomap_dio, must not overlap with the public ones in | |
21 | * iomap.h: | |
22 | */ | |
23 | #define IOMAP_DIO_WRITE_FUA (1 << 28) | |
24 | #define IOMAP_DIO_NEED_SYNC (1 << 29) | |
25 | #define IOMAP_DIO_WRITE (1 << 30) | |
26 | #define IOMAP_DIO_DIRTY (1 << 31) | |
27 | ||
28 | struct iomap_dio { | |
29 | struct kiocb *iocb; | |
838c4f3d | 30 | const struct iomap_dio_ops *dops; |
db074436 DW |
31 | loff_t i_size; |
32 | loff_t size; | |
33 | atomic_t ref; | |
34 | unsigned flags; | |
35 | int error; | |
4fdccaa0 | 36 | size_t done_before; |
db074436 DW |
37 | bool wait_for_completion; |
38 | ||
39 | union { | |
40 | /* used during submission and for synchronous completion: */ | |
41 | struct { | |
42 | struct iov_iter *iter; | |
43 | struct task_struct *waiter; | |
3e08773c | 44 | struct bio *poll_bio; |
db074436 DW |
45 | } submit; |
46 | ||
47 | /* used for aio completion: */ | |
48 | struct { | |
49 | struct work_struct work; | |
50 | } aio; | |
51 | }; | |
52 | }; | |
53 | ||
908c5490 | 54 | static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter, |
dbd4eb81 | 55 | struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf) |
908c5490 CH |
56 | { |
57 | if (dio->dops && dio->dops->bio_set) | |
58 | return bio_alloc_bioset(iter->iomap.bdev, nr_vecs, opf, | |
59 | GFP_KERNEL, dio->dops->bio_set); | |
60 | return bio_alloc(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL); | |
61 | } | |
62 | ||
a6d3d495 CH |
63 | static void iomap_dio_submit_bio(const struct iomap_iter *iter, |
64 | struct iomap_dio *dio, struct bio *bio, loff_t pos) | |
db074436 DW |
65 | { |
66 | atomic_inc(&dio->ref); | |
67 | ||
9650b453 ML |
68 | /* Sync dio can't be polled reliably */ |
69 | if ((dio->iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(dio->iocb)) { | |
db074436 | 70 | bio_set_polled(bio, dio->iocb); |
3e08773c CH |
71 | dio->submit.poll_bio = bio; |
72 | } | |
db074436 | 73 | |
8cecd0ba | 74 | if (dio->dops && dio->dops->submit_io) |
3e08773c | 75 | dio->dops->submit_io(iter, bio, pos); |
8cecd0ba | 76 | else |
3e08773c | 77 | submit_bio(bio); |
db074436 DW |
78 | } |
79 | ||
c3d4ed1a | 80 | ssize_t iomap_dio_complete(struct iomap_dio *dio) |
db074436 | 81 | { |
838c4f3d | 82 | const struct iomap_dio_ops *dops = dio->dops; |
db074436 DW |
83 | struct kiocb *iocb = dio->iocb; |
84 | struct inode *inode = file_inode(iocb->ki_filp); | |
85 | loff_t offset = iocb->ki_pos; | |
838c4f3d | 86 | ssize_t ret = dio->error; |
db074436 | 87 | |
838c4f3d CH |
88 | if (dops && dops->end_io) |
89 | ret = dops->end_io(iocb, dio->size, ret, dio->flags); | |
db074436 DW |
90 | |
91 | if (likely(!ret)) { | |
92 | ret = dio->size; | |
93 | /* check for short read */ | |
94 | if (offset + ret > dio->i_size && | |
95 | !(dio->flags & IOMAP_DIO_WRITE)) | |
96 | ret = dio->i_size - offset; | |
97 | iocb->ki_pos += ret; | |
98 | } | |
99 | ||
100 | /* | |
101 | * Try again to invalidate clean pages which might have been cached by | |
102 | * non-direct readahead, or faulted in by get_user_pages() if the source | |
103 | * of the write was an mmap'ed region of the file we're writing. Either | |
104 | * one is a pretty crazy thing to do, so we don't support it 100%. If | |
105 | * this invalidation fails, tough, the write still worked... | |
106 | * | |
838c4f3d CH |
107 | * And this page cache invalidation has to be after ->end_io(), as some |
108 | * filesystems convert unwritten extents to real allocations in | |
109 | * ->end_io() when necessary, otherwise a racing buffer read would cache | |
db074436 DW |
110 | * zeros from unwritten extents. |
111 | */ | |
c114bbc6 | 112 | if (!dio->error && dio->size && |
db074436 DW |
113 | (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { |
114 | int err; | |
115 | err = invalidate_inode_pages2_range(inode->i_mapping, | |
116 | offset >> PAGE_SHIFT, | |
117 | (offset + dio->size - 1) >> PAGE_SHIFT); | |
118 | if (err) | |
119 | dio_warn_stale_pagecache(iocb->ki_filp); | |
120 | } | |
121 | ||
1a31182e | 122 | inode_dio_end(file_inode(iocb->ki_filp)); |
db074436 DW |
123 | /* |
124 | * If this is a DSYNC write, make sure we push it to stable storage now | |
125 | * that we've written data. | |
126 | */ | |
127 | if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC)) | |
128 | ret = generic_write_sync(iocb, ret); | |
129 | ||
4fdccaa0 AG |
130 | if (ret > 0) |
131 | ret += dio->done_before; | |
132 | ||
3fd41721 | 133 | trace_iomap_dio_complete(iocb, dio->error, ret); |
db074436 DW |
134 | kfree(dio); |
135 | ||
136 | return ret; | |
137 | } | |
c3d4ed1a | 138 | EXPORT_SYMBOL_GPL(iomap_dio_complete); |
db074436 DW |
139 | |
140 | static void iomap_dio_complete_work(struct work_struct *work) | |
141 | { | |
142 | struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); | |
143 | struct kiocb *iocb = dio->iocb; | |
144 | ||
6b19b766 | 145 | iocb->ki_complete(iocb, iomap_dio_complete(dio)); |
db074436 DW |
146 | } |
147 | ||
148 | /* | |
149 | * Set an error in the dio if none is set yet. We have to use cmpxchg | |
150 | * as the submission context and the completion context(s) can race to | |
151 | * update the error. | |
152 | */ | |
153 | static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) | |
154 | { | |
155 | cmpxchg(&dio->error, 0, ret); | |
156 | } | |
157 | ||
908c5490 | 158 | void iomap_dio_bio_end_io(struct bio *bio) |
db074436 DW |
159 | { |
160 | struct iomap_dio *dio = bio->bi_private; | |
161 | bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); | |
162 | ||
163 | if (bio->bi_status) | |
164 | iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); | |
165 | ||
166 | if (atomic_dec_and_test(&dio->ref)) { | |
167 | if (dio->wait_for_completion) { | |
168 | struct task_struct *waiter = dio->submit.waiter; | |
169 | WRITE_ONCE(dio->submit.waiter, NULL); | |
170 | blk_wake_io_task(waiter); | |
171 | } else if (dio->flags & IOMAP_DIO_WRITE) { | |
172 | struct inode *inode = file_inode(dio->iocb->ki_filp); | |
173 | ||
3e08773c | 174 | WRITE_ONCE(dio->iocb->private, NULL); |
db074436 DW |
175 | INIT_WORK(&dio->aio.work, iomap_dio_complete_work); |
176 | queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); | |
177 | } else { | |
3e08773c | 178 | WRITE_ONCE(dio->iocb->private, NULL); |
db074436 DW |
179 | iomap_dio_complete_work(&dio->aio.work); |
180 | } | |
181 | } | |
182 | ||
183 | if (should_dirty) { | |
184 | bio_check_pages_dirty(bio); | |
185 | } else { | |
186 | bio_release_pages(bio, false); | |
187 | bio_put(bio); | |
188 | } | |
189 | } | |
908c5490 | 190 | EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io); |
db074436 | 191 | |
a6d3d495 CH |
192 | static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio, |
193 | loff_t pos, unsigned len) | |
db074436 | 194 | { |
489734ef | 195 | struct inode *inode = file_inode(dio->iocb->ki_filp); |
db074436 | 196 | struct page *page = ZERO_PAGE(0); |
db074436 DW |
197 | struct bio *bio; |
198 | ||
908c5490 | 199 | bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE); |
489734ef EB |
200 | fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, |
201 | GFP_KERNEL); | |
a6d3d495 | 202 | bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos); |
db074436 DW |
203 | bio->bi_private = dio; |
204 | bio->bi_end_io = iomap_dio_bio_end_io; | |
205 | ||
206 | get_page(page); | |
207 | __bio_add_page(bio, page, len, 0); | |
a6d3d495 | 208 | iomap_dio_submit_bio(iter, dio, bio, pos); |
db074436 DW |
209 | } |
210 | ||
c3b0e880 NA |
211 | /* |
212 | * Figure out the bio's operation flags from the dio request, the | |
213 | * mapping, and whether or not we want FUA. Note that we can end up | |
214 | * clearing the WRITE_FUA flag in the dio request. | |
215 | */ | |
dbd4eb81 | 216 | static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio, |
a6d3d495 | 217 | const struct iomap *iomap, bool use_fua) |
c3b0e880 | 218 | { |
dbd4eb81 | 219 | blk_opf_t opflags = REQ_SYNC | REQ_IDLE; |
c3b0e880 | 220 | |
8e81aa16 | 221 | if (!(dio->flags & IOMAP_DIO_WRITE)) |
c3b0e880 | 222 | return REQ_OP_READ; |
c3b0e880 | 223 | |
8e81aa16 | 224 | opflags |= REQ_OP_WRITE; |
c3b0e880 NA |
225 | if (use_fua) |
226 | opflags |= REQ_FUA; | |
227 | else | |
228 | dio->flags &= ~IOMAP_DIO_WRITE_FUA; | |
229 | ||
230 | return opflags; | |
231 | } | |
232 | ||
a6d3d495 CH |
233 | static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, |
234 | struct iomap_dio *dio) | |
db074436 | 235 | { |
a6d3d495 CH |
236 | const struct iomap *iomap = &iter->iomap; |
237 | struct inode *inode = iter->inode; | |
db074436 | 238 | unsigned int fs_block_size = i_blocksize(inode), pad; |
a6d3d495 CH |
239 | loff_t length = iomap_length(iter); |
240 | loff_t pos = iter->pos; | |
dbd4eb81 | 241 | blk_opf_t bio_opf; |
db074436 DW |
242 | struct bio *bio; |
243 | bool need_zeroout = false; | |
244 | bool use_fua = false; | |
245 | int nr_pages, ret = 0; | |
246 | size_t copied = 0; | |
f550ee9b | 247 | size_t orig_count; |
db074436 | 248 | |
f1bd37a4 | 249 | if ((pos | length) & (bdev_logical_block_size(iomap->bdev) - 1) || |
bf8d0853 | 250 | !bdev_iter_is_aligned(iomap->bdev, dio->submit.iter)) |
db074436 DW |
251 | return -EINVAL; |
252 | ||
253 | if (iomap->type == IOMAP_UNWRITTEN) { | |
254 | dio->flags |= IOMAP_DIO_UNWRITTEN; | |
255 | need_zeroout = true; | |
256 | } | |
257 | ||
258 | if (iomap->flags & IOMAP_F_SHARED) | |
259 | dio->flags |= IOMAP_DIO_COW; | |
260 | ||
261 | if (iomap->flags & IOMAP_F_NEW) { | |
262 | need_zeroout = true; | |
263 | } else if (iomap->type == IOMAP_MAPPED) { | |
264 | /* | |
265 | * Use a FUA write if we need datasync semantics, this is a pure | |
266 | * data IO that doesn't require any metadata updates (including | |
267 | * after IO completion such as unwritten extent conversion) and | |
268 | * the underlying device supports FUA. This allows us to avoid | |
269 | * cache flushes on IO completion. | |
270 | */ | |
271 | if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && | |
a557e82e | 272 | (dio->flags & IOMAP_DIO_WRITE_FUA) && bdev_fua(iomap->bdev)) |
db074436 DW |
273 | use_fua = true; |
274 | } | |
275 | ||
276 | /* | |
f550ee9b JK |
277 | * Save the original count and trim the iter to just the extent we |
278 | * are operating on right now. The iter will be re-expanded once | |
279 | * we are done. | |
db074436 | 280 | */ |
f550ee9b JK |
281 | orig_count = iov_iter_count(dio->submit.iter); |
282 | iov_iter_truncate(dio->submit.iter, length); | |
db074436 | 283 | |
3e1a88ec | 284 | if (!iov_iter_count(dio->submit.iter)) |
f550ee9b | 285 | goto out; |
db074436 | 286 | |
f79d4749 CH |
287 | /* |
288 | * We can only poll for single bio I/Os. | |
289 | */ | |
290 | if (need_zeroout || | |
291 | ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) | |
292 | dio->iocb->ki_flags &= ~IOCB_HIPRI; | |
293 | ||
db074436 DW |
294 | if (need_zeroout) { |
295 | /* zero out from the start of the block to the write offset */ | |
296 | pad = pos & (fs_block_size - 1); | |
297 | if (pad) | |
a6d3d495 | 298 | iomap_dio_zero(iter, dio, pos - pad, pad); |
db074436 DW |
299 | } |
300 | ||
c3b0e880 NA |
301 | /* |
302 | * Set the operation flags early so that bio_iov_iter_get_pages | |
303 | * can set up the page vector appropriately for a ZONE_APPEND | |
304 | * operation. | |
305 | */ | |
306 | bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua); | |
307 | ||
a8affc03 | 308 | nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS); |
db074436 DW |
309 | do { |
310 | size_t n; | |
311 | if (dio->error) { | |
312 | iov_iter_revert(dio->submit.iter, copied); | |
f550ee9b JK |
313 | copied = ret = 0; |
314 | goto out; | |
db074436 DW |
315 | } |
316 | ||
908c5490 | 317 | bio = iomap_dio_alloc_bio(iter, dio, nr_pages, bio_opf); |
489734ef EB |
318 | fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, |
319 | GFP_KERNEL); | |
db074436 | 320 | bio->bi_iter.bi_sector = iomap_sector(iomap, pos); |
db074436 DW |
321 | bio->bi_ioprio = dio->iocb->ki_ioprio; |
322 | bio->bi_private = dio; | |
323 | bio->bi_end_io = iomap_dio_bio_end_io; | |
324 | ||
f550ee9b | 325 | ret = bio_iov_iter_get_pages(bio, dio->submit.iter); |
db074436 DW |
326 | if (unlikely(ret)) { |
327 | /* | |
328 | * We have to stop part way through an IO. We must fall | |
329 | * through to the sub-block tail zeroing here, otherwise | |
330 | * this short IO may expose stale data in the tail of | |
331 | * the block we haven't written data to. | |
332 | */ | |
333 | bio_put(bio); | |
334 | goto zero_tail; | |
335 | } | |
336 | ||
337 | n = bio->bi_iter.bi_size; | |
338 | if (dio->flags & IOMAP_DIO_WRITE) { | |
db074436 DW |
339 | task_io_account_write(n); |
340 | } else { | |
db074436 DW |
341 | if (dio->flags & IOMAP_DIO_DIRTY) |
342 | bio_set_pages_dirty(bio); | |
343 | } | |
344 | ||
db074436 | 345 | dio->size += n; |
db074436 DW |
346 | copied += n; |
347 | ||
3e1a88ec | 348 | nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, |
a8affc03 | 349 | BIO_MAX_VECS); |
f79d4749 CH |
350 | /* |
351 | * We can only poll for single bio I/Os. | |
352 | */ | |
353 | if (nr_pages) | |
354 | dio->iocb->ki_flags &= ~IOCB_HIPRI; | |
a6d3d495 | 355 | iomap_dio_submit_bio(iter, dio, bio, pos); |
8cecd0ba | 356 | pos += n; |
db074436 DW |
357 | } while (nr_pages); |
358 | ||
359 | /* | |
360 | * We need to zeroout the tail of a sub-block write if the extent type | |
361 | * requires zeroing or the write extends beyond EOF. If we don't zero | |
362 | * the block tail in the latter case, we can expose stale data via mmap | |
363 | * reads of the EOF block. | |
364 | */ | |
365 | zero_tail: | |
366 | if (need_zeroout || | |
367 | ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) { | |
368 | /* zero out from the end of the write to the end of the block */ | |
369 | pad = pos & (fs_block_size - 1); | |
370 | if (pad) | |
a6d3d495 | 371 | iomap_dio_zero(iter, dio, pos, fs_block_size - pad); |
db074436 | 372 | } |
f550ee9b JK |
373 | out: |
374 | /* Undo iter limitation to current extent */ | |
375 | iov_iter_reexpand(dio->submit.iter, orig_count - copied); | |
e9f930ac JS |
376 | if (copied) |
377 | return copied; | |
378 | return ret; | |
db074436 DW |
379 | } |
380 | ||
a6d3d495 CH |
381 | static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter, |
382 | struct iomap_dio *dio) | |
db074436 | 383 | { |
a6d3d495 CH |
384 | loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter); |
385 | ||
db074436 | 386 | dio->size += length; |
42c498c1 AG |
387 | if (!length) |
388 | return -EFAULT; | |
db074436 DW |
389 | return length; |
390 | } | |
391 | ||
a6d3d495 CH |
392 | static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi, |
393 | struct iomap_dio *dio) | |
db074436 | 394 | { |
a6d3d495 | 395 | const struct iomap *iomap = &iomi->iomap; |
db074436 | 396 | struct iov_iter *iter = dio->submit.iter; |
a6d3d495 CH |
397 | void *inline_data = iomap_inline_data(iomap, iomi->pos); |
398 | loff_t length = iomap_length(iomi); | |
399 | loff_t pos = iomi->pos; | |
db074436 DW |
400 | size_t copied; |
401 | ||
69f4a26c GX |
402 | if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap))) |
403 | return -EIO; | |
db074436 DW |
404 | |
405 | if (dio->flags & IOMAP_DIO_WRITE) { | |
a6d3d495 | 406 | loff_t size = iomi->inode->i_size; |
db074436 DW |
407 | |
408 | if (pos > size) | |
69f4a26c GX |
409 | memset(iomap_inline_data(iomap, size), 0, pos - size); |
410 | copied = copy_from_iter(inline_data, length, iter); | |
db074436 DW |
411 | if (copied) { |
412 | if (pos + copied > size) | |
a6d3d495 CH |
413 | i_size_write(iomi->inode, pos + copied); |
414 | mark_inode_dirty(iomi->inode); | |
db074436 DW |
415 | } |
416 | } else { | |
69f4a26c | 417 | copied = copy_to_iter(inline_data, length, iter); |
db074436 DW |
418 | } |
419 | dio->size += copied; | |
42c498c1 AG |
420 | if (!copied) |
421 | return -EFAULT; | |
db074436 DW |
422 | return copied; |
423 | } | |
424 | ||
a6d3d495 CH |
425 | static loff_t iomap_dio_iter(const struct iomap_iter *iter, |
426 | struct iomap_dio *dio) | |
db074436 | 427 | { |
a6d3d495 | 428 | switch (iter->iomap.type) { |
db074436 DW |
429 | case IOMAP_HOLE: |
430 | if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) | |
431 | return -EIO; | |
a6d3d495 | 432 | return iomap_dio_hole_iter(iter, dio); |
db074436 DW |
433 | case IOMAP_UNWRITTEN: |
434 | if (!(dio->flags & IOMAP_DIO_WRITE)) | |
a6d3d495 CH |
435 | return iomap_dio_hole_iter(iter, dio); |
436 | return iomap_dio_bio_iter(iter, dio); | |
db074436 | 437 | case IOMAP_MAPPED: |
a6d3d495 | 438 | return iomap_dio_bio_iter(iter, dio); |
db074436 | 439 | case IOMAP_INLINE: |
a6d3d495 | 440 | return iomap_dio_inline_iter(iter, dio); |
a805c111 QC |
441 | case IOMAP_DELALLOC: |
442 | /* | |
443 | * DIO is not serialised against mmap() access at all, and so | |
444 | * if the page_mkwrite occurs between the writeback and the | |
a6d3d495 | 445 | * iomap_iter() call in the DIO path, then it will see the |
a805c111 QC |
446 | * DELALLOC block that the page-mkwrite allocated. |
447 | */ | |
448 | pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n", | |
449 | dio->iocb->ki_filp, current->comm); | |
450 | return -EIO; | |
db074436 DW |
451 | default: |
452 | WARN_ON_ONCE(1); | |
453 | return -EIO; | |
454 | } | |
455 | } | |
456 | ||
457 | /* | |
458 | * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO | |
459 | * is being issued as AIO or not. This allows us to optimise pure data writes | |
460 | * to use REQ_FUA rather than requiring generic_write_sync() to issue a | |
461 | * REQ_FLUSH post write. This is slightly tricky because a single request here | |
462 | * can be mapped into multiple disjoint IOs and only a subset of the IOs issued | |
463 | * may be pure data writes. In that case, we still need to do a full data sync | |
464 | * completion. | |
60263d58 | 465 | * |
4fdccaa0 AG |
466 | * When page faults are disabled and @dio_flags includes IOMAP_DIO_PARTIAL, |
467 | * __iomap_dio_rw can return a partial result if it encounters a non-resident | |
468 | * page in @iter after preparing a transfer. In that case, the non-resident | |
469 | * pages can be faulted in and the request resumed with @done_before set to the | |
470 | * number of bytes previously transferred. The request will then complete with | |
471 | * the correct total number of bytes transferred; this is essential for | |
472 | * completing partial requests asynchronously. | |
473 | * | |
60263d58 CH |
474 | * Returns -ENOTBLK In case of a page invalidation invalidation failure for |
475 | * writes. The callers needs to fall back to buffered I/O in this case. | |
db074436 | 476 | */ |
c3d4ed1a CH |
477 | struct iomap_dio * |
478 | __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |
13ef9544 | 479 | const struct iomap_ops *ops, const struct iomap_dio_ops *dops, |
786f847f | 480 | unsigned int dio_flags, void *private, size_t done_before) |
db074436 DW |
481 | { |
482 | struct address_space *mapping = iocb->ki_filp->f_mapping; | |
483 | struct inode *inode = file_inode(iocb->ki_filp); | |
a6d3d495 CH |
484 | struct iomap_iter iomi = { |
485 | .inode = inode, | |
486 | .pos = iocb->ki_pos, | |
487 | .len = iov_iter_count(iter), | |
488 | .flags = IOMAP_DIRECT, | |
786f847f | 489 | .private = private, |
a6d3d495 CH |
490 | }; |
491 | loff_t end = iomi.pos + iomi.len - 1, ret = 0; | |
2f632965 CH |
492 | bool wait_for_completion = |
493 | is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT); | |
db074436 DW |
494 | struct blk_plug plug; |
495 | struct iomap_dio *dio; | |
496 | ||
3fd41721 RHI |
497 | trace_iomap_dio_rw_begin(iocb, iter, dio_flags, done_before); |
498 | ||
a6d3d495 | 499 | if (!iomi.len) |
c3d4ed1a | 500 | return NULL; |
db074436 DW |
501 | |
502 | dio = kmalloc(sizeof(*dio), GFP_KERNEL); | |
503 | if (!dio) | |
c3d4ed1a | 504 | return ERR_PTR(-ENOMEM); |
db074436 DW |
505 | |
506 | dio->iocb = iocb; | |
507 | atomic_set(&dio->ref, 1); | |
508 | dio->size = 0; | |
509 | dio->i_size = i_size_read(inode); | |
838c4f3d | 510 | dio->dops = dops; |
db074436 DW |
511 | dio->error = 0; |
512 | dio->flags = 0; | |
4fdccaa0 | 513 | dio->done_before = done_before; |
db074436 DW |
514 | |
515 | dio->submit.iter = iter; | |
516 | dio->submit.waiter = current; | |
3e08773c | 517 | dio->submit.poll_bio = NULL; |
db074436 DW |
518 | |
519 | if (iov_iter_rw(iter) == READ) { | |
a6d3d495 | 520 | if (iomi.pos >= dio->i_size) |
db074436 DW |
521 | goto out_free_dio; |
522 | ||
985b71db | 523 | if (iocb->ki_flags & IOCB_NOWAIT) { |
a6d3d495 CH |
524 | if (filemap_range_needs_writeback(mapping, iomi.pos, |
525 | end)) { | |
985b71db JA |
526 | ret = -EAGAIN; |
527 | goto out_free_dio; | |
528 | } | |
a6d3d495 | 529 | iomi.flags |= IOMAP_NOWAIT; |
985b71db JA |
530 | } |
531 | ||
fcb14cb1 | 532 | if (user_backed_iter(iter)) |
db074436 DW |
533 | dio->flags |= IOMAP_DIO_DIRTY; |
534 | } else { | |
a6d3d495 | 535 | iomi.flags |= IOMAP_WRITE; |
db074436 DW |
536 | dio->flags |= IOMAP_DIO_WRITE; |
537 | ||
985b71db | 538 | if (iocb->ki_flags & IOCB_NOWAIT) { |
a6d3d495 | 539 | if (filemap_range_has_page(mapping, iomi.pos, end)) { |
985b71db JA |
540 | ret = -EAGAIN; |
541 | goto out_free_dio; | |
542 | } | |
a6d3d495 | 543 | iomi.flags |= IOMAP_NOWAIT; |
985b71db JA |
544 | } |
545 | ||
db074436 | 546 | /* for data sync or sync, we need sync completion processing */ |
d3bff1fc | 547 | if (iocb_is_dsync(iocb)) { |
db074436 DW |
548 | dio->flags |= IOMAP_DIO_NEED_SYNC; |
549 | ||
36518b6b AV |
550 | /* |
551 | * For datasync only writes, we optimistically try | |
552 | * using FUA for this IO. Any non-FUA write that | |
553 | * occurs will clear this flag, hence we know before | |
554 | * completion whether a cache flush is necessary. | |
555 | */ | |
556 | if (!(iocb->ki_flags & IOCB_SYNC)) | |
557 | dio->flags |= IOMAP_DIO_WRITE_FUA; | |
558 | } | |
db074436 DW |
559 | } |
560 | ||
213f6271 CH |
561 | if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) { |
562 | ret = -EAGAIN; | |
a6d3d495 CH |
563 | if (iomi.pos >= dio->i_size || |
564 | iomi.pos + iomi.len > dio->i_size) | |
213f6271 | 565 | goto out_free_dio; |
a6d3d495 | 566 | iomi.flags |= IOMAP_OVERWRITE_ONLY; |
db074436 DW |
567 | } |
568 | ||
a6d3d495 | 569 | ret = filemap_write_and_wait_range(mapping, iomi.pos, end); |
db074436 DW |
570 | if (ret) |
571 | goto out_free_dio; | |
572 | ||
54752de9 DC |
573 | if (iov_iter_rw(iter) == WRITE) { |
574 | /* | |
575 | * Try to invalidate cache pages for the range we are writing. | |
60263d58 CH |
576 | * If this invalidation fails, let the caller fall back to |
577 | * buffered I/O. | |
54752de9 | 578 | */ |
a6d3d495 CH |
579 | if (invalidate_inode_pages2_range(mapping, |
580 | iomi.pos >> PAGE_SHIFT, end >> PAGE_SHIFT)) { | |
581 | trace_iomap_dio_invalidate_fail(inode, iomi.pos, | |
582 | iomi.len); | |
60263d58 CH |
583 | ret = -ENOTBLK; |
584 | goto out_free_dio; | |
585 | } | |
db074436 | 586 | |
54752de9 DC |
587 | if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) { |
588 | ret = sb_init_dio_done_wq(inode->i_sb); | |
589 | if (ret < 0) | |
590 | goto out_free_dio; | |
591 | } | |
db074436 DW |
592 | } |
593 | ||
594 | inode_dio_begin(inode); | |
595 | ||
596 | blk_start_plug(&plug); | |
f79d4749 | 597 | while ((ret = iomap_iter(&iomi, ops)) > 0) { |
a6d3d495 | 598 | iomi.processed = iomap_dio_iter(&iomi, dio); |
f79d4749 CH |
599 | |
600 | /* | |
601 | * We can only poll for single bio I/Os. | |
602 | */ | |
603 | iocb->ki_flags &= ~IOCB_HIPRI; | |
604 | } | |
605 | ||
db074436 DW |
606 | blk_finish_plug(&plug); |
607 | ||
a6d3d495 CH |
608 | /* |
609 | * We only report that we've read data up to i_size. | |
610 | * Revert iter to a state corresponding to that as some callers (such | |
611 | * as the splice code) rely on it. | |
612 | */ | |
613 | if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size) | |
614 | iov_iter_revert(iter, iomi.pos - dio->i_size); | |
615 | ||
97308f8b AG |
616 | if (ret == -EFAULT && dio->size && (dio_flags & IOMAP_DIO_PARTIAL)) { |
617 | if (!(iocb->ki_flags & IOCB_NOWAIT)) | |
618 | wait_for_completion = true; | |
619 | ret = 0; | |
620 | } | |
621 | ||
a6d3d495 CH |
622 | /* magic error code to fall back to buffered I/O */ |
623 | if (ret == -ENOTBLK) { | |
624 | wait_for_completion = true; | |
625 | ret = 0; | |
626 | } | |
db074436 DW |
627 | if (ret < 0) |
628 | iomap_dio_set_error(dio, ret); | |
629 | ||
630 | /* | |
631 | * If all the writes we issued were FUA, we don't need to flush the | |
632 | * cache on IO completion. Clear the sync flag for this case. | |
633 | */ | |
634 | if (dio->flags & IOMAP_DIO_WRITE_FUA) | |
635 | dio->flags &= ~IOMAP_DIO_NEED_SYNC; | |
636 | ||
3e08773c | 637 | WRITE_ONCE(iocb->private, dio->submit.poll_bio); |
db074436 DW |
638 | |
639 | /* | |
640 | * We are about to drop our additional submission reference, which | |
d9973ce2 | 641 | * might be the last reference to the dio. There are three different |
642 | * ways we can progress here: | |
db074436 DW |
643 | * |
644 | * (a) If this is the last reference we will always complete and free | |
645 | * the dio ourselves. | |
646 | * (b) If this is not the last reference, and we serve an asynchronous | |
647 | * iocb, we must never touch the dio after the decrement, the | |
648 | * I/O completion handler will complete and free it. | |
649 | * (c) If this is not the last reference, but we serve a synchronous | |
650 | * iocb, the I/O completion handler will wake us up on the drop | |
651 | * of the final reference, and we will complete and free it here | |
652 | * after we got woken by the I/O completion handler. | |
653 | */ | |
654 | dio->wait_for_completion = wait_for_completion; | |
655 | if (!atomic_dec_and_test(&dio->ref)) { | |
3fd41721 RHI |
656 | if (!wait_for_completion) { |
657 | trace_iomap_dio_rw_queued(inode, iomi.pos, iomi.len); | |
c3d4ed1a | 658 | return ERR_PTR(-EIOCBQUEUED); |
3fd41721 | 659 | } |
db074436 DW |
660 | |
661 | for (;;) { | |
662 | set_current_state(TASK_UNINTERRUPTIBLE); | |
663 | if (!READ_ONCE(dio->submit.waiter)) | |
664 | break; | |
665 | ||
9650b453 | 666 | blk_io_schedule(); |
db074436 DW |
667 | } |
668 | __set_current_state(TASK_RUNNING); | |
669 | } | |
670 | ||
c3d4ed1a | 671 | return dio; |
db074436 DW |
672 | |
673 | out_free_dio: | |
674 | kfree(dio); | |
c3d4ed1a CH |
675 | if (ret) |
676 | return ERR_PTR(ret); | |
677 | return NULL; | |
678 | } | |
679 | EXPORT_SYMBOL_GPL(__iomap_dio_rw); | |
680 | ||
681 | ssize_t | |
682 | iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |
683 | const struct iomap_ops *ops, const struct iomap_dio_ops *dops, | |
786f847f | 684 | unsigned int dio_flags, void *private, size_t done_before) |
c3d4ed1a CH |
685 | { |
686 | struct iomap_dio *dio; | |
687 | ||
786f847f CH |
688 | dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, private, |
689 | done_before); | |
c3d4ed1a CH |
690 | if (IS_ERR_OR_NULL(dio)) |
691 | return PTR_ERR_OR_ZERO(dio); | |
692 | return iomap_dio_complete(dio); | |
db074436 DW |
693 | } |
694 | EXPORT_SYMBOL_GPL(iomap_dio_rw); |