Merge tag 'mm-stable-2023-04-27-15-30' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / fs / iomap / buffered-io.c
CommitLineData
afc51aaa
DW
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Red Hat, Inc.
598ecfba 4 * Copyright (C) 2016-2019 Christoph Hellwig.
afc51aaa
DW
5 */
6#include <linux/module.h>
7#include <linux/compiler.h>
8#include <linux/fs.h>
9#include <linux/iomap.h>
10#include <linux/pagemap.h>
11#include <linux/uio.h>
12#include <linux/buffer_head.h>
13#include <linux/dax.h>
14#include <linux/writeback.h>
598ecfba 15#include <linux/list_sort.h>
afc51aaa
DW
16#include <linux/swap.h>
17#include <linux/bio.h>
18#include <linux/sched/signal.h>
19#include <linux/migrate.h>
9e91c572 20#include "trace.h"
afc51aaa
DW
21
22#include "../internal.h"
23
ebb7fb15
DC
24#define IOEND_BATCH_SIZE 4096
25
ab08b01e 26/*
95c4cd05
MWO
27 * Structure allocated for each folio when block size < folio size
28 * to track sub-folio uptodate status and I/O completions.
ab08b01e
CH
29 */
30struct iomap_page {
7d636676 31 atomic_t read_bytes_pending;
0fb2d720 32 atomic_t write_bytes_pending;
1cea335d 33 spinlock_t uptodate_lock;
0a195b91 34 unsigned long uptodate[];
ab08b01e
CH
35};
36
95c4cd05 37static inline struct iomap_page *to_iomap_page(struct folio *folio)
ab08b01e 38{
95c4cd05
MWO
39 if (folio_test_private(folio))
40 return folio_get_private(folio);
ab08b01e
CH
41 return NULL;
42}
43
598ecfba
CH
44static struct bio_set iomap_ioend_bioset;
45
afc51aaa 46static struct iomap_page *
9753b868 47iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags)
afc51aaa 48{
95c4cd05 49 struct iomap_page *iop = to_iomap_page(folio);
435d44b3 50 unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
9753b868 51 gfp_t gfp;
afc51aaa 52
0a195b91 53 if (iop || nr_blocks <= 1)
afc51aaa
DW
54 return iop;
55
9753b868
SR
56 if (flags & IOMAP_NOWAIT)
57 gfp = GFP_NOWAIT;
58 else
59 gfp = GFP_NOFS | __GFP_NOFAIL;
60
0a195b91 61 iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
9753b868
SR
62 gfp);
63 if (iop) {
64 spin_lock_init(&iop->uptodate_lock);
65 if (folio_test_uptodate(folio))
66 bitmap_fill(iop->uptodate, nr_blocks);
67 folio_attach_private(folio, iop);
68 }
afc51aaa
DW
69 return iop;
70}
71
c46e8324 72static void iomap_page_release(struct folio *folio)
afc51aaa 73{
c46e8324
MWO
74 struct iomap_page *iop = folio_detach_private(folio);
75 struct inode *inode = folio->mapping->host;
76 unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
afc51aaa
DW
77
78 if (!iop)
79 return;
7d636676 80 WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
0fb2d720 81 WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
0a195b91 82 WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
c46e8324 83 folio_test_uptodate(folio));
afc51aaa
DW
84 kfree(iop);
85}
86
87/*
431c0566 88 * Calculate the range inside the folio that we actually need to read.
afc51aaa 89 */
431c0566
MWO
90static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
91 loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
afc51aaa 92{
431c0566 93 struct iomap_page *iop = to_iomap_page(folio);
afc51aaa
DW
94 loff_t orig_pos = *pos;
95 loff_t isize = i_size_read(inode);
96 unsigned block_bits = inode->i_blkbits;
97 unsigned block_size = (1 << block_bits);
431c0566
MWO
98 size_t poff = offset_in_folio(folio, *pos);
99 size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
afc51aaa
DW
100 unsigned first = poff >> block_bits;
101 unsigned last = (poff + plen - 1) >> block_bits;
102
103 /*
f1f264b4 104 * If the block size is smaller than the page size, we need to check the
afc51aaa
DW
105 * per-block uptodate status and adjust the offset and length if needed
106 * to avoid reading in already uptodate ranges.
107 */
108 if (iop) {
109 unsigned int i;
110
111 /* move forward for each leading block marked uptodate */
112 for (i = first; i <= last; i++) {
113 if (!test_bit(i, iop->uptodate))
114 break;
115 *pos += block_size;
116 poff += block_size;
117 plen -= block_size;
118 first++;
119 }
120
121 /* truncate len if we find any trailing uptodate block(s) */
122 for ( ; i <= last; i++) {
123 if (test_bit(i, iop->uptodate)) {
124 plen -= (last - i + 1) * block_size;
125 last = i - 1;
126 break;
127 }
128 }
129 }
130
131 /*
f1f264b4 132 * If the extent spans the block that contains the i_size, we need to
afc51aaa
DW
133 * handle both halves separately so that we properly zero data in the
134 * page cache for blocks that are entirely outside of i_size.
135 */
136 if (orig_pos <= isize && orig_pos + length > isize) {
431c0566 137 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
afc51aaa
DW
138
139 if (first <= end && last > end)
140 plen -= (last - end) * block_size;
141 }
142
143 *offp = poff;
144 *lenp = plen;
145}
146
431c0566
MWO
147static void iomap_iop_set_range_uptodate(struct folio *folio,
148 struct iomap_page *iop, size_t off, size_t len)
afc51aaa 149{
431c0566 150 struct inode *inode = folio->mapping->host;
afc51aaa
DW
151 unsigned first = off >> inode->i_blkbits;
152 unsigned last = (off + len - 1) >> inode->i_blkbits;
1cea335d 153 unsigned long flags;
afc51aaa 154
1cea335d 155 spin_lock_irqsave(&iop->uptodate_lock, flags);
b21866f5 156 bitmap_set(iop->uptodate, first, last - first + 1);
431c0566
MWO
157 if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
158 folio_mark_uptodate(folio);
1cea335d
CH
159 spin_unlock_irqrestore(&iop->uptodate_lock, flags);
160}
161
431c0566
MWO
162static void iomap_set_range_uptodate(struct folio *folio,
163 struct iomap_page *iop, size_t off, size_t len)
1cea335d 164{
cd1e5afe 165 if (iop)
431c0566 166 iomap_iop_set_range_uptodate(folio, iop, off, len);
1cea335d 167 else
431c0566 168 folio_mark_uptodate(folio);
afc51aaa
DW
169}
170
8ffd74e9
MWO
171static void iomap_finish_folio_read(struct folio *folio, size_t offset,
172 size_t len, int error)
afc51aaa 173{
95c4cd05 174 struct iomap_page *iop = to_iomap_page(folio);
afc51aaa
DW
175
176 if (unlikely(error)) {
8ffd74e9
MWO
177 folio_clear_uptodate(folio);
178 folio_set_error(folio);
afc51aaa 179 } else {
431c0566 180 iomap_set_range_uptodate(folio, iop, offset, len);
afc51aaa
DW
181 }
182
8ffd74e9
MWO
183 if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
184 folio_unlock(folio);
afc51aaa
DW
185}
186
8ffd74e9 187static void iomap_read_end_io(struct bio *bio)
afc51aaa
DW
188{
189 int error = blk_status_to_errno(bio->bi_status);
8ffd74e9 190 struct folio_iter fi;
afc51aaa 191
8ffd74e9
MWO
192 bio_for_each_folio_all(fi, bio)
193 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
afc51aaa
DW
194 bio_put(bio);
195}
196
197struct iomap_readpage_ctx {
3aa9c659
MWO
198 struct folio *cur_folio;
199 bool cur_folio_in_bio;
afc51aaa 200 struct bio *bio;
9d24a13a 201 struct readahead_control *rac;
afc51aaa
DW
202};
203
5ad448ce
AG
204/**
205 * iomap_read_inline_data - copy inline data into the page cache
206 * @iter: iteration structure
874628a2 207 * @folio: folio to copy to
5ad448ce 208 *
874628a2 209 * Copy the inline data in @iter into @folio and zero out the rest of the folio.
5ad448ce
AG
210 * Only a single IOMAP_INLINE extent is allowed at the end of each file.
211 * Returns zero for success to complete the read, or the usual negative errno.
212 */
213static int iomap_read_inline_data(const struct iomap_iter *iter,
874628a2 214 struct folio *folio)
afc51aaa 215{
cd1e5afe 216 struct iomap_page *iop;
fad0a1ab 217 const struct iomap *iomap = iomap_iter_srcmap(iter);
1b5c1e36 218 size_t size = i_size_read(iter->inode) - iomap->offset;
b405435b 219 size_t poff = offset_in_page(iomap->offset);
431c0566 220 size_t offset = offset_in_folio(folio, iomap->offset);
afc51aaa
DW
221 void *addr;
222
874628a2 223 if (folio_test_uptodate(folio))
5ad448ce 224 return 0;
afc51aaa 225
ae44f9c2
MWO
226 if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
227 return -EIO;
69f4a26c
GX
228 if (WARN_ON_ONCE(size > PAGE_SIZE -
229 offset_in_page(iomap->inline_data)))
230 return -EIO;
231 if (WARN_ON_ONCE(size > iomap->length))
232 return -EIO;
431c0566 233 if (offset > 0)
9753b868 234 iop = iomap_page_create(iter->inode, folio, iter->flags);
cd1e5afe
MWO
235 else
236 iop = to_iomap_page(folio);
afc51aaa 237
874628a2 238 addr = kmap_local_folio(folio, offset);
afc51aaa 239 memcpy(addr, iomap->inline_data, size);
b405435b 240 memset(addr + size, 0, PAGE_SIZE - poff - size);
ab069d5f 241 kunmap_local(addr);
431c0566 242 iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff);
5ad448ce 243 return 0;
afc51aaa
DW
244}
245
fad0a1ab 246static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
1b5c1e36 247 loff_t pos)
009d8d84 248{
fad0a1ab 249 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1b5c1e36
CH
250
251 return srcmap->type != IOMAP_MAPPED ||
252 (srcmap->flags & IOMAP_F_NEW) ||
253 pos >= i_size_read(iter->inode);
009d8d84
CH
254}
255
fad0a1ab 256static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
f6d48000 257 struct iomap_readpage_ctx *ctx, loff_t offset)
afc51aaa 258{
fad0a1ab 259 const struct iomap *iomap = &iter->iomap;
f6d48000
CH
260 loff_t pos = iter->pos + offset;
261 loff_t length = iomap_length(iter) - offset;
3aa9c659 262 struct folio *folio = ctx->cur_folio;
637d3375 263 struct iomap_page *iop;
afc51aaa 264 loff_t orig_pos = pos;
431c0566 265 size_t poff, plen;
afc51aaa
DW
266 sector_t sector;
267
5ad448ce 268 if (iomap->type == IOMAP_INLINE)
874628a2 269 return iomap_read_inline_data(iter, folio);
afc51aaa
DW
270
271 /* zero post-eof blocks as the page may be mapped */
9753b868 272 iop = iomap_page_create(iter->inode, folio, iter->flags);
431c0566 273 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
afc51aaa
DW
274 if (plen == 0)
275 goto done;
276
1b5c1e36 277 if (iomap_block_needs_zeroing(iter, pos)) {
431c0566
MWO
278 folio_zero_range(folio, poff, plen);
279 iomap_set_range_uptodate(folio, iop, poff, plen);
afc51aaa
DW
280 goto done;
281 }
282
3aa9c659 283 ctx->cur_folio_in_bio = true;
7d636676
MWO
284 if (iop)
285 atomic_add(plen, &iop->read_bytes_pending);
afc51aaa 286
afc51aaa 287 sector = iomap_sector(iomap, pos);
d0364f94
CH
288 if (!ctx->bio ||
289 bio_end_sector(ctx->bio) != sector ||
431c0566 290 !bio_add_folio(ctx->bio, folio, plen, poff)) {
3aa9c659 291 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
457df33e 292 gfp_t orig_gfp = gfp;
5f7136db 293 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
afc51aaa
DW
294
295 if (ctx->bio)
296 submit_bio(ctx->bio);
297
9d24a13a 298 if (ctx->rac) /* same as readahead_gfp_mask */
afc51aaa 299 gfp |= __GFP_NORETRY | __GFP_NOWARN;
07888c66
CH
300 ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
301 REQ_OP_READ, gfp);
457df33e
MWO
302 /*
303 * If the bio_alloc fails, try it again for a single page to
304 * avoid having to deal with partial page reads. This emulates
f132ab7d 305 * what do_mpage_read_folio does.
457df33e 306 */
07888c66
CH
307 if (!ctx->bio) {
308 ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
309 orig_gfp);
310 }
9d24a13a 311 if (ctx->rac)
afc51aaa
DW
312 ctx->bio->bi_opf |= REQ_RAHEAD;
313 ctx->bio->bi_iter.bi_sector = sector;
afc51aaa 314 ctx->bio->bi_end_io = iomap_read_end_io;
431c0566 315 bio_add_folio(ctx->bio, folio, plen, poff);
afc51aaa 316 }
431c0566 317
afc51aaa
DW
318done:
319 /*
320 * Move the caller beyond our range so that it keeps making progress.
f1f264b4 321 * For that, we have to include any leading non-uptodate ranges, but
afc51aaa
DW
322 * we can skip trailing ones as they will be handled in the next
323 * iteration.
324 */
325 return pos - orig_pos + plen;
326}
327
7479c505 328int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
afc51aaa 329{
f6d48000 330 struct iomap_iter iter = {
3aa9c659
MWO
331 .inode = folio->mapping->host,
332 .pos = folio_pos(folio),
333 .len = folio_size(folio),
f6d48000
CH
334 };
335 struct iomap_readpage_ctx ctx = {
3aa9c659 336 .cur_folio = folio,
f6d48000
CH
337 };
338 int ret;
afc51aaa 339
3aa9c659 340 trace_iomap_readpage(iter.inode, 1);
9e91c572 341
f6d48000
CH
342 while ((ret = iomap_iter(&iter, ops)) > 0)
343 iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
344
345 if (ret < 0)
3aa9c659 346 folio_set_error(folio);
afc51aaa
DW
347
348 if (ctx.bio) {
349 submit_bio(ctx.bio);
3aa9c659 350 WARN_ON_ONCE(!ctx.cur_folio_in_bio);
afc51aaa 351 } else {
3aa9c659
MWO
352 WARN_ON_ONCE(ctx.cur_folio_in_bio);
353 folio_unlock(folio);
afc51aaa
DW
354 }
355
356 /*
2c69e205 357 * Just like mpage_readahead and block_read_full_folio, we always
7479c505 358 * return 0 and just set the folio error flag on errors. This
f1f264b4 359 * should be cleaned up throughout the stack eventually.
afc51aaa
DW
360 */
361 return 0;
362}
7479c505 363EXPORT_SYMBOL_GPL(iomap_read_folio);
afc51aaa 364
fad0a1ab 365static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
f6d48000 366 struct iomap_readpage_ctx *ctx)
afc51aaa 367{
f6d48000 368 loff_t length = iomap_length(iter);
afc51aaa
DW
369 loff_t done, ret;
370
371 for (done = 0; done < length; done += ret) {
3aa9c659
MWO
372 if (ctx->cur_folio &&
373 offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
374 if (!ctx->cur_folio_in_bio)
375 folio_unlock(ctx->cur_folio);
376 ctx->cur_folio = NULL;
afc51aaa 377 }
3aa9c659
MWO
378 if (!ctx->cur_folio) {
379 ctx->cur_folio = readahead_folio(ctx->rac);
380 ctx->cur_folio_in_bio = false;
afc51aaa 381 }
f6d48000 382 ret = iomap_readpage_iter(iter, ctx, done);
d8af404f
AG
383 if (ret <= 0)
384 return ret;
afc51aaa
DW
385 }
386
387 return done;
388}
389
9d24a13a
MWO
390/**
391 * iomap_readahead - Attempt to read pages from a file.
392 * @rac: Describes the pages to be read.
393 * @ops: The operations vector for the filesystem.
394 *
395 * This function is for filesystems to call to implement their readahead
396 * address_space operation.
397 *
398 * Context: The @ops callbacks may submit I/O (eg to read the addresses of
399 * blocks from disc), and may wait for it. The caller may be trying to
400 * access a different page, and so sleeping excessively should be avoided.
401 * It may allocate memory, but should avoid costly allocations. This
402 * function is called with memalloc_nofs set, so allocations will not cause
403 * the filesystem to be reentered.
404 */
405void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
afc51aaa 406{
f6d48000
CH
407 struct iomap_iter iter = {
408 .inode = rac->mapping->host,
409 .pos = readahead_pos(rac),
410 .len = readahead_length(rac),
411 };
afc51aaa 412 struct iomap_readpage_ctx ctx = {
9d24a13a 413 .rac = rac,
afc51aaa 414 };
afc51aaa 415
f6d48000 416 trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
9e91c572 417
f6d48000
CH
418 while (iomap_iter(&iter, ops) > 0)
419 iter.processed = iomap_readahead_iter(&iter, &ctx);
9d24a13a 420
afc51aaa
DW
421 if (ctx.bio)
422 submit_bio(ctx.bio);
3aa9c659
MWO
423 if (ctx.cur_folio) {
424 if (!ctx.cur_folio_in_bio)
425 folio_unlock(ctx.cur_folio);
afc51aaa 426 }
afc51aaa 427}
9d24a13a 428EXPORT_SYMBOL_GPL(iomap_readahead);
afc51aaa
DW
429
430/*
2e7e80f7 431 * iomap_is_partially_uptodate checks whether blocks within a folio are
afc51aaa
DW
432 * uptodate or not.
433 *
2e7e80f7
MWO
434 * Returns true if all blocks which correspond to the specified part
435 * of the folio are uptodate.
afc51aaa 436 */
2e7e80f7 437bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
afc51aaa 438{
95c4cd05 439 struct iomap_page *iop = to_iomap_page(folio);
2e7e80f7 440 struct inode *inode = folio->mapping->host;
2e7e80f7 441 unsigned first, last, i;
afc51aaa 442
2e7e80f7
MWO
443 if (!iop)
444 return false;
afc51aaa 445
2756c818
MWO
446 /* Caller's range may extend past the end of this folio */
447 count = min(folio_size(folio) - from, count);
afc51aaa 448
2756c818 449 /* First and last blocks in range within folio */
afc51aaa 450 first = from >> inode->i_blkbits;
2756c818 451 last = (from + count - 1) >> inode->i_blkbits;
afc51aaa 452
2e7e80f7
MWO
453 for (i = first; i <= last; i++)
454 if (!test_bit(i, iop->uptodate))
455 return false;
456 return true;
afc51aaa
DW
457}
458EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
459
98321b51
AG
460/**
461 * iomap_get_folio - get a folio reference for writing
462 * @iter: iteration structure
463 * @pos: start offset of write
464 *
465 * Returns a locked reference to the folio at @pos, or an error pointer if the
466 * folio could not be obtained.
467 */
468struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos)
469{
e999a5c5 470 unsigned fgp = FGP_WRITEBEGIN | FGP_NOFS;
98321b51
AG
471
472 if (iter->flags & IOMAP_NOWAIT)
473 fgp |= FGP_NOWAIT;
474
66dabbb6 475 return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
98321b51 476 fgp, mapping_gfp_mask(iter->inode->i_mapping));
98321b51
AG
477}
478EXPORT_SYMBOL_GPL(iomap_get_folio);
479
8597447d 480bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
afc51aaa 481{
8597447d 482 trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
39f16c83 483 folio_size(folio));
9e91c572 484
afc51aaa 485 /*
8597447d
MWO
486 * mm accommodates an old ext3 case where clean folios might
487 * not have had the dirty bit cleared. Thus, it can send actual
488 * dirty folios to ->release_folio() via shrink_active_list();
489 * skip those here.
afc51aaa 490 */
39f16c83 491 if (folio_test_dirty(folio) || folio_test_writeback(folio))
8597447d 492 return false;
c46e8324 493 iomap_page_release(folio);
8597447d 494 return true;
afc51aaa 495}
8597447d 496EXPORT_SYMBOL_GPL(iomap_release_folio);
afc51aaa 497
8306a5f5 498void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
afc51aaa 499{
d82354f6 500 trace_iomap_invalidate_folio(folio->mapping->host,
1241ebec 501 folio_pos(folio) + offset, len);
9e91c572 502
afc51aaa 503 /*
60d82310
MWO
504 * If we're invalidating the entire folio, clear the dirty state
505 * from it and release it to avoid unnecessary buildup of the LRU.
afc51aaa 506 */
8306a5f5
MWO
507 if (offset == 0 && len == folio_size(folio)) {
508 WARN_ON_ONCE(folio_test_writeback(folio));
509 folio_cancel_dirty(folio);
c46e8324 510 iomap_page_release(folio);
60d82310
MWO
511 } else if (folio_test_large(folio)) {
512 /* Must release the iop so the page can be split */
513 WARN_ON_ONCE(!folio_test_uptodate(folio) &&
514 folio_test_dirty(folio));
515 iomap_page_release(folio);
afc51aaa
DW
516 }
517}
8306a5f5
MWO
518EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
519
afc51aaa
DW
520static void
521iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
522{
523 loff_t i_size = i_size_read(inode);
524
525 /*
526 * Only truncate newly allocated pages beyoned EOF, even if the
527 * write started inside the existing inode size.
528 */
529 if (pos + len > i_size)
b71450e2
AG
530 truncate_pagecache_range(inode, max(pos, i_size),
531 pos + len - 1);
afc51aaa
DW
532}
533
431c0566
MWO
534static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
535 size_t poff, size_t plen, const struct iomap *iomap)
afc51aaa
DW
536{
537 struct bio_vec bvec;
538 struct bio bio;
539
49add496 540 bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
afc51aaa 541 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
431c0566 542 bio_add_folio(&bio, folio, plen, poff);
afc51aaa
DW
543 return submit_bio_wait(&bio);
544}
545
fad0a1ab 546static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
bc6123a8 547 size_t len, struct folio *folio)
afc51aaa 548{
fad0a1ab 549 const struct iomap *srcmap = iomap_iter_srcmap(iter);
9753b868 550 struct iomap_page *iop;
1b5c1e36 551 loff_t block_size = i_blocksize(iter->inode);
6cc19c5f
NB
552 loff_t block_start = round_down(pos, block_size);
553 loff_t block_end = round_up(pos + len, block_size);
cae2de69 554 unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
431c0566
MWO
555 size_t from = offset_in_folio(folio, pos), to = from + len;
556 size_t poff, plen;
afc51aaa 557
431c0566 558 if (folio_test_uptodate(folio))
afc51aaa 559 return 0;
431c0566 560 folio_clear_error(folio);
afc51aaa 561
9753b868 562 iop = iomap_page_create(iter->inode, folio, iter->flags);
cae2de69
SR
563 if ((iter->flags & IOMAP_NOWAIT) && !iop && nr_blocks > 1)
564 return -EAGAIN;
9753b868 565
afc51aaa 566 do {
431c0566 567 iomap_adjust_read_range(iter->inode, folio, &block_start,
afc51aaa
DW
568 block_end - block_start, &poff, &plen);
569 if (plen == 0)
570 break;
571
b74b1293 572 if (!(iter->flags & IOMAP_UNSHARE) &&
32a38a49 573 (from <= poff || from >= poff + plen) &&
d3b40439
CH
574 (to <= poff || to >= poff + plen))
575 continue;
576
1b5c1e36 577 if (iomap_block_needs_zeroing(iter, block_start)) {
b74b1293 578 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
32a38a49 579 return -EIO;
431c0566 580 folio_zero_segments(folio, poff, from, to, poff + plen);
14284fed 581 } else {
cae2de69
SR
582 int status;
583
584 if (iter->flags & IOMAP_NOWAIT)
585 return -EAGAIN;
586
587 status = iomap_read_folio_sync(block_start, folio,
14284fed
MWO
588 poff, plen, srcmap);
589 if (status)
590 return status;
afc51aaa 591 }
431c0566 592 iomap_set_range_uptodate(folio, iop, poff, plen);
afc51aaa
DW
593 } while ((block_start += plen) < block_end);
594
d3b40439 595 return 0;
afc51aaa
DW
596}
597
07c22b56
AG
598static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
599 size_t len)
600{
471859f5 601 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
07c22b56 602
471859f5
AG
603 if (folio_ops && folio_ops->get_folio)
604 return folio_ops->get_folio(iter, pos, len);
07c22b56
AG
605 else
606 return iomap_get_folio(iter, pos);
607}
608
7a70a508
AG
609static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
610 struct folio *folio)
611{
471859f5 612 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
7a70a508 613
471859f5
AG
614 if (folio_ops && folio_ops->put_folio) {
615 folio_ops->put_folio(iter->inode, pos, ret, folio);
9060bc4d 616 } else {
7a70a508 617 folio_unlock(folio);
7a70a508 618 folio_put(folio);
80baab88 619 }
7a70a508
AG
620}
621
fad0a1ab 622static int iomap_write_begin_inline(const struct iomap_iter *iter,
bc6123a8 623 struct folio *folio)
69f4a26c
GX
624{
625 /* needs more work for the tailpacking case; disable for now */
1b5c1e36 626 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
69f4a26c 627 return -EIO;
874628a2 628 return iomap_read_inline_data(iter, folio);
69f4a26c
GX
629}
630
d7b64041 631static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
bc6123a8 632 size_t len, struct folio **foliop)
afc51aaa 633{
471859f5 634 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
fad0a1ab 635 const struct iomap *srcmap = iomap_iter_srcmap(iter);
d1bd0b4e 636 struct folio *folio;
afc51aaa
DW
637 int status = 0;
638
1b5c1e36
CH
639 BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
640 if (srcmap != &iter->iomap)
c039b997 641 BUG_ON(pos + len > srcmap->offset + srcmap->length);
afc51aaa
DW
642
643 if (fatal_signal_pending(current))
644 return -EINTR;
645
d454ab82
MWO
646 if (!mapping_large_folio_support(iter->inode->i_mapping))
647 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
648
07c22b56 649 folio = __iomap_get_folio(iter, pos, len);
9060bc4d 650 if (IS_ERR(folio))
98321b51 651 return PTR_ERR(folio);
d7b64041
DC
652
653 /*
654 * Now we have a locked folio, before we do anything with it we need to
655 * check that the iomap we have cached is not stale. The inode extent
656 * mapping can change due to concurrent IO in flight (e.g.
657 * IOMAP_UNWRITTEN state can change and memory reclaim could have
658 * reclaimed a previously partially written page at this index after IO
659 * completion before this write reaches this file offset) and hence we
660 * could do the wrong thing here (zero a page range incorrectly or fail
661 * to zero) and corrupt data.
662 */
471859f5
AG
663 if (folio_ops && folio_ops->iomap_valid) {
664 bool iomap_valid = folio_ops->iomap_valid(iter->inode,
665 &iter->iomap);
d7b64041
DC
666 if (!iomap_valid) {
667 iter->iomap.flags |= IOMAP_F_STALE;
668 status = 0;
669 goto out_unlock;
670 }
671 }
672
d454ab82
MWO
673 if (pos + len > folio_pos(folio) + folio_size(folio))
674 len = folio_pos(folio) + folio_size(folio) - pos;
afc51aaa 675
c039b997 676 if (srcmap->type == IOMAP_INLINE)
bc6123a8 677 status = iomap_write_begin_inline(iter, folio);
1b5c1e36 678 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
d1bd0b4e 679 status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
afc51aaa 680 else
bc6123a8 681 status = __iomap_write_begin(iter, pos, len, folio);
afc51aaa
DW
682
683 if (unlikely(status))
684 goto out_unlock;
685
bc6123a8 686 *foliop = folio;
afc51aaa
DW
687 return 0;
688
689out_unlock:
7a70a508 690 __iomap_put_folio(iter, pos, 0, folio);
1b5c1e36 691 iomap_write_failed(iter->inode, pos, len);
afc51aaa 692
afc51aaa
DW
693 return status;
694}
695
e25ba8cb 696static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
bc6123a8 697 size_t copied, struct folio *folio)
afc51aaa 698{
cd1e5afe 699 struct iomap_page *iop = to_iomap_page(folio);
bc6123a8 700 flush_dcache_folio(folio);
afc51aaa
DW
701
702 /*
703 * The blocks that were entirely written will now be uptodate, so we
7479c505 704 * don't have to worry about a read_folio reading them and overwriting a
f1f264b4 705 * partial write. However, if we've encountered a short write and only
afc51aaa 706 * partially written into a block, it will not be marked uptodate, so a
7479c505 707 * read_folio might come in and destroy our partial write.
afc51aaa 708 *
f1f264b4
AG
709 * Do the simplest thing and just treat any short write to a
710 * non-uptodate page as a zero-length write, and force the caller to
711 * redo the whole thing.
afc51aaa 712 */
bc6123a8 713 if (unlikely(copied < len && !folio_test_uptodate(folio)))
afc51aaa 714 return 0;
431c0566 715 iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
bc6123a8 716 filemap_dirty_folio(inode->i_mapping, folio);
afc51aaa
DW
717 return copied;
718}
719
fad0a1ab 720static size_t iomap_write_end_inline(const struct iomap_iter *iter,
9c4ce08d 721 struct folio *folio, loff_t pos, size_t copied)
afc51aaa 722{
fad0a1ab 723 const struct iomap *iomap = &iter->iomap;
afc51aaa
DW
724 void *addr;
725
9c4ce08d 726 WARN_ON_ONCE(!folio_test_uptodate(folio));
69f4a26c 727 BUG_ON(!iomap_inline_data_valid(iomap));
afc51aaa 728
9c4ce08d
MWO
729 flush_dcache_folio(folio);
730 addr = kmap_local_folio(folio, pos);
ab069d5f
MWO
731 memcpy(iomap_inline_data(iomap, pos), addr, copied);
732 kunmap_local(addr);
afc51aaa 733
1b5c1e36 734 mark_inode_dirty(iter->inode);
afc51aaa
DW
735 return copied;
736}
737
e25ba8cb 738/* Returns the number of bytes copied. May be 0. Cannot be an errno. */
1b5c1e36 739static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
bc6123a8 740 size_t copied, struct folio *folio)
afc51aaa 741{
fad0a1ab 742 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1b5c1e36 743 loff_t old_size = iter->inode->i_size;
e25ba8cb 744 size_t ret;
afc51aaa 745
c039b997 746 if (srcmap->type == IOMAP_INLINE) {
9c4ce08d 747 ret = iomap_write_end_inline(iter, folio, pos, copied);
c039b997 748 } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
1b5c1e36 749 ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
bc6123a8 750 copied, &folio->page, NULL);
afc51aaa 751 } else {
bc6123a8 752 ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
afc51aaa
DW
753 }
754
755 /*
756 * Update the in-memory inode size after copying the data into the page
757 * cache. It's up to the file system to write the updated size to disk,
758 * preferably after I/O completion so that no stale data is exposed.
759 */
760 if (pos + ret > old_size) {
1b5c1e36
CH
761 i_size_write(iter->inode, pos + ret);
762 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
afc51aaa 763 }
7a70a508 764 __iomap_put_folio(iter, pos, ret, folio);
afc51aaa
DW
765
766 if (old_size < pos)
1b5c1e36 767 pagecache_isize_extended(iter->inode, old_size, pos);
afc51aaa 768 if (ret < len)
d74999c8 769 iomap_write_failed(iter->inode, pos + ret, len - ret);
afc51aaa
DW
770 return ret;
771}
772
ce83a025 773static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
afc51aaa 774{
ce83a025
CH
775 loff_t length = iomap_length(iter);
776 loff_t pos = iter->pos;
afc51aaa 777 ssize_t written = 0;
ce83a025 778 long status = 0;
cae2de69
SR
779 struct address_space *mapping = iter->inode->i_mapping;
780 unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
afc51aaa
DW
781
782 do {
bc6123a8 783 struct folio *folio;
afc51aaa
DW
784 struct page *page;
785 unsigned long offset; /* Offset into pagecache page */
786 unsigned long bytes; /* Bytes to write to page */
787 size_t copied; /* Bytes copied from user */
788
789 offset = offset_in_page(pos);
790 bytes = min_t(unsigned long, PAGE_SIZE - offset,
791 iov_iter_count(i));
792again:
cae2de69
SR
793 status = balance_dirty_pages_ratelimited_flags(mapping,
794 bdp_flags);
795 if (unlikely(status))
796 break;
797
afc51aaa
DW
798 if (bytes > length)
799 bytes = length;
800
801 /*
f1f264b4 802 * Bring in the user page that we'll copy from _first_.
afc51aaa
DW
803 * Otherwise there's a nasty deadlock on copying from the
804 * same page as we're writing to, without it being marked
805 * up-to-date.
cae2de69
SR
806 *
807 * For async buffered writes the assumption is that the user
808 * page has already been faulted in. This can be optimized by
809 * faulting the user page.
afc51aaa 810 */
631f871f 811 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
afc51aaa
DW
812 status = -EFAULT;
813 break;
814 }
815
bc6123a8 816 status = iomap_write_begin(iter, pos, bytes, &folio);
afc51aaa
DW
817 if (unlikely(status))
818 break;
d7b64041
DC
819 if (iter->iomap.flags & IOMAP_F_STALE)
820 break;
afc51aaa 821
bc6123a8 822 page = folio_file_page(folio, pos >> PAGE_SHIFT);
cae2de69 823 if (mapping_writably_mapped(mapping))
afc51aaa
DW
824 flush_dcache_page(page);
825
f0b65f39 826 copied = copy_page_from_iter_atomic(page, offset, bytes, i);
afc51aaa 827
bc6123a8 828 status = iomap_write_end(iter, pos, bytes, copied, folio);
afc51aaa 829
f0b65f39
AV
830 if (unlikely(copied != status))
831 iov_iter_revert(i, copied - status);
afc51aaa 832
f0b65f39 833 cond_resched();
bc1bb416 834 if (unlikely(status == 0)) {
afc51aaa 835 /*
bc1bb416
AV
836 * A short copy made iomap_write_end() reject the
837 * thing entirely. Might be memory poisoning
838 * halfway through, might be a race with munmap,
839 * might be severe memory pressure.
afc51aaa 840 */
bc1bb416
AV
841 if (copied)
842 bytes = copied;
afc51aaa
DW
843 goto again;
844 }
f0b65f39
AV
845 pos += status;
846 written += status;
847 length -= status;
afc51aaa
DW
848 } while (iov_iter_count(i) && length);
849
18e419f6
SR
850 if (status == -EAGAIN) {
851 iov_iter_revert(i, written);
852 return -EAGAIN;
853 }
afc51aaa
DW
854 return written ? written : status;
855}
856
857ssize_t
ce83a025 858iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
afc51aaa
DW
859 const struct iomap_ops *ops)
860{
ce83a025
CH
861 struct iomap_iter iter = {
862 .inode = iocb->ki_filp->f_mapping->host,
863 .pos = iocb->ki_pos,
864 .len = iov_iter_count(i),
865 .flags = IOMAP_WRITE,
866 };
867 int ret;
afc51aaa 868
cae2de69
SR
869 if (iocb->ki_flags & IOCB_NOWAIT)
870 iter.flags |= IOMAP_NOWAIT;
871
ce83a025
CH
872 while ((ret = iomap_iter(&iter, ops)) > 0)
873 iter.processed = iomap_write_iter(&iter, i);
874 if (iter.pos == iocb->ki_pos)
875 return ret;
876 return iter.pos - iocb->ki_pos;
afc51aaa
DW
877}
878EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
879
f43dc4dc
DC
880/*
881 * Scan the data range passed to us for dirty page cache folios. If we find a
882 * dirty folio, punch out the preceeding range and update the offset from which
883 * the next punch will start from.
884 *
885 * We can punch out storage reservations under clean pages because they either
886 * contain data that has been written back - in which case the delalloc punch
887 * over that range is a no-op - or they have been read faults in which case they
888 * contain zeroes and we can remove the delalloc backing range and any new
889 * writes to those pages will do the normal hole filling operation...
890 *
891 * This makes the logic simple: we only need to keep the delalloc extents only
892 * over the dirty ranges of the page cache.
893 *
894 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
895 * simplify range iterations.
896 */
897static int iomap_write_delalloc_scan(struct inode *inode,
898 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
899 int (*punch)(struct inode *inode, loff_t offset, loff_t length))
900{
901 while (start_byte < end_byte) {
902 struct folio *folio;
903
904 /* grab locked page */
905 folio = filemap_lock_folio(inode->i_mapping,
906 start_byte >> PAGE_SHIFT);
66dabbb6 907 if (IS_ERR(folio)) {
f43dc4dc
DC
908 start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
909 PAGE_SIZE;
910 continue;
911 }
912
913 /* if dirty, punch up to offset */
914 if (folio_test_dirty(folio)) {
915 if (start_byte > *punch_start_byte) {
916 int error;
917
918 error = punch(inode, *punch_start_byte,
919 start_byte - *punch_start_byte);
920 if (error) {
921 folio_unlock(folio);
922 folio_put(folio);
923 return error;
924 }
925 }
926
927 /*
928 * Make sure the next punch start is correctly bound to
929 * the end of this data range, not the end of the folio.
930 */
931 *punch_start_byte = min_t(loff_t, end_byte,
932 folio_next_index(folio) << PAGE_SHIFT);
933 }
934
935 /* move offset to start of next folio in range */
936 start_byte = folio_next_index(folio) << PAGE_SHIFT;
937 folio_unlock(folio);
938 folio_put(folio);
939 }
940 return 0;
941}
942
943/*
944 * Punch out all the delalloc blocks in the range given except for those that
945 * have dirty data still pending in the page cache - those are going to be
946 * written and so must still retain the delalloc backing for writeback.
947 *
948 * As we are scanning the page cache for data, we don't need to reimplement the
949 * wheel - mapping_seek_hole_data() does exactly what we need to identify the
950 * start and end of data ranges correctly even for sub-folio block sizes. This
951 * byte range based iteration is especially convenient because it means we
952 * don't have to care about variable size folios, nor where the start or end of
953 * the data range lies within a folio, if they lie within the same folio or even
954 * if there are multiple discontiguous data ranges within the folio.
955 *
956 * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
957 * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
958 * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
959 * date. A write page fault can then mark it dirty. If we then fail a write()
960 * beyond EOF into that up to date cached range, we allocate a delalloc block
961 * beyond EOF and then have to punch it out. Because the range is up to date,
962 * mapping_seek_hole_data() will return it, and we will skip the punch because
963 * the folio is dirty. THis is incorrect - we always need to punch out delalloc
964 * beyond EOF in this case as writeback will never write back and covert that
965 * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
966 * resulting in always punching out the range from the EOF to the end of the
967 * range the iomap spans.
968 *
969 * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
970 * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
971 * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
972 * returns the end of the data range (data_end). Using closed intervals would
973 * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
974 * the code to subtle off-by-one bugs....
975 */
976static int iomap_write_delalloc_release(struct inode *inode,
977 loff_t start_byte, loff_t end_byte,
978 int (*punch)(struct inode *inode, loff_t pos, loff_t length))
979{
980 loff_t punch_start_byte = start_byte;
981 loff_t scan_end_byte = min(i_size_read(inode), end_byte);
982 int error = 0;
983
984 /*
985 * Lock the mapping to avoid races with page faults re-instantiating
986 * folios and dirtying them via ->page_mkwrite whilst we walk the
987 * cache and perform delalloc extent removal. Failing to do this can
988 * leave dirty pages with no space reservation in the cache.
989 */
990 filemap_invalidate_lock(inode->i_mapping);
991 while (start_byte < scan_end_byte) {
992 loff_t data_end;
993
994 start_byte = mapping_seek_hole_data(inode->i_mapping,
995 start_byte, scan_end_byte, SEEK_DATA);
996 /*
997 * If there is no more data to scan, all that is left is to
998 * punch out the remaining range.
999 */
1000 if (start_byte == -ENXIO || start_byte == scan_end_byte)
1001 break;
1002 if (start_byte < 0) {
1003 error = start_byte;
1004 goto out_unlock;
1005 }
1006 WARN_ON_ONCE(start_byte < punch_start_byte);
1007 WARN_ON_ONCE(start_byte > scan_end_byte);
1008
1009 /*
1010 * We find the end of this contiguous cached data range by
1011 * seeking from start_byte to the beginning of the next hole.
1012 */
1013 data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
1014 scan_end_byte, SEEK_HOLE);
1015 if (data_end < 0) {
1016 error = data_end;
1017 goto out_unlock;
1018 }
1019 WARN_ON_ONCE(data_end <= start_byte);
1020 WARN_ON_ONCE(data_end > scan_end_byte);
1021
1022 error = iomap_write_delalloc_scan(inode, &punch_start_byte,
1023 start_byte, data_end, punch);
1024 if (error)
1025 goto out_unlock;
1026
1027 /* The next data search starts at the end of this one. */
1028 start_byte = data_end;
1029 }
1030
1031 if (punch_start_byte < end_byte)
1032 error = punch(inode, punch_start_byte,
1033 end_byte - punch_start_byte);
1034out_unlock:
1035 filemap_invalidate_unlock(inode->i_mapping);
1036 return error;
1037}
1038
9c7babf9
DC
1039/*
1040 * When a short write occurs, the filesystem may need to remove reserved space
1041 * that was allocated in ->iomap_begin from it's ->iomap_end method. For
1042 * filesystems that use delayed allocation, we need to punch out delalloc
1043 * extents from the range that are not dirty in the page cache. As the write can
1044 * race with page faults, there can be dirty pages over the delalloc extent
1045 * outside the range of a short write but still within the delalloc extent
1046 * allocated for this iomap.
1047 *
1048 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
f43dc4dc
DC
1049 * simplify range iterations.
1050 *
1051 * The punch() callback *must* only punch delalloc extents in the range passed
1052 * to it. It must skip over all other types of extents in the range and leave
1053 * them completely unchanged. It must do this punch atomically with respect to
1054 * other extent modifications.
1055 *
1056 * The punch() callback may be called with a folio locked to prevent writeback
1057 * extent allocation racing at the edge of the range we are currently punching.
1058 * The locked folio may or may not cover the range being punched, so it is not
1059 * safe for the punch() callback to lock folios itself.
1060 *
1061 * Lock order is:
1062 *
1063 * inode->i_rwsem (shared or exclusive)
1064 * inode->i_mapping->invalidate_lock (exclusive)
1065 * folio_lock()
1066 * ->punch
1067 * internal filesystem allocation lock
9c7babf9
DC
1068 */
1069int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
1070 struct iomap *iomap, loff_t pos, loff_t length,
1071 ssize_t written,
1072 int (*punch)(struct inode *inode, loff_t pos, loff_t length))
1073{
1074 loff_t start_byte;
1075 loff_t end_byte;
1076 int blocksize = i_blocksize(inode);
9c7babf9
DC
1077
1078 if (iomap->type != IOMAP_DELALLOC)
1079 return 0;
1080
1081 /* If we didn't reserve the blocks, we're not allowed to punch them. */
1082 if (!(iomap->flags & IOMAP_F_NEW))
1083 return 0;
1084
1085 /*
1086 * start_byte refers to the first unused block after a short write. If
1087 * nothing was written, round offset down to point at the first block in
1088 * the range.
1089 */
1090 if (unlikely(!written))
1091 start_byte = round_down(pos, blocksize);
1092 else
1093 start_byte = round_up(pos + written, blocksize);
1094 end_byte = round_up(pos + length, blocksize);
1095
1096 /* Nothing to do if we've written the entire delalloc extent */
1097 if (start_byte >= end_byte)
1098 return 0;
1099
f43dc4dc
DC
1100 return iomap_write_delalloc_release(inode, start_byte, end_byte,
1101 punch);
9c7babf9
DC
1102}
1103EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
1104
8fc274d1 1105static loff_t iomap_unshare_iter(struct iomap_iter *iter)
afc51aaa 1106{
8fc274d1 1107 struct iomap *iomap = &iter->iomap;
fad0a1ab 1108 const struct iomap *srcmap = iomap_iter_srcmap(iter);
8fc274d1
CH
1109 loff_t pos = iter->pos;
1110 loff_t length = iomap_length(iter);
afc51aaa 1111 long status = 0;
d4ff3b2e 1112 loff_t written = 0;
afc51aaa 1113
3590c4d8
CH
1114 /* don't bother with blocks that are not shared to start with */
1115 if (!(iomap->flags & IOMAP_F_SHARED))
1116 return length;
1117 /* don't bother with holes or unwritten extents */
c039b997 1118 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
3590c4d8
CH
1119 return length;
1120
afc51aaa 1121 do {
32a38a49
CH
1122 unsigned long offset = offset_in_page(pos);
1123 unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
bc6123a8 1124 struct folio *folio;
afc51aaa 1125
bc6123a8 1126 status = iomap_write_begin(iter, pos, bytes, &folio);
afc51aaa
DW
1127 if (unlikely(status))
1128 return status;
d7b64041
DC
1129 if (iter->iomap.flags & IOMAP_F_STALE)
1130 break;
afc51aaa 1131
bc6123a8 1132 status = iomap_write_end(iter, pos, bytes, bytes, folio);
e25ba8cb
MWO
1133 if (WARN_ON_ONCE(status == 0))
1134 return -EIO;
afc51aaa
DW
1135
1136 cond_resched();
1137
1138 pos += status;
1139 written += status;
1140 length -= status;
1141
8fc274d1 1142 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
afc51aaa
DW
1143 } while (length);
1144
1145 return written;
1146}
1147
1148int
3590c4d8 1149iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
afc51aaa
DW
1150 const struct iomap_ops *ops)
1151{
8fc274d1
CH
1152 struct iomap_iter iter = {
1153 .inode = inode,
1154 .pos = pos,
1155 .len = len,
b74b1293 1156 .flags = IOMAP_WRITE | IOMAP_UNSHARE,
8fc274d1
CH
1157 };
1158 int ret;
afc51aaa 1159
8fc274d1
CH
1160 while ((ret = iomap_iter(&iter, ops)) > 0)
1161 iter.processed = iomap_unshare_iter(&iter);
1162 return ret;
afc51aaa 1163}
3590c4d8 1164EXPORT_SYMBOL_GPL(iomap_file_unshare);
afc51aaa 1165
2aa3048e 1166static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
afc51aaa 1167{
fad0a1ab 1168 const struct iomap *srcmap = iomap_iter_srcmap(iter);
2aa3048e
CH
1169 loff_t pos = iter->pos;
1170 loff_t length = iomap_length(iter);
afc51aaa 1171 loff_t written = 0;
afc51aaa
DW
1172
1173 /* already zeroed? we're done. */
c039b997 1174 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
81ee8e52 1175 return length;
afc51aaa
DW
1176
1177 do {
4d7bd0eb
MWO
1178 struct folio *folio;
1179 int status;
1180 size_t offset;
1181 size_t bytes = min_t(u64, SIZE_MAX, length);
1182
4d7bd0eb
MWO
1183 status = iomap_write_begin(iter, pos, bytes, &folio);
1184 if (status)
1185 return status;
d7b64041
DC
1186 if (iter->iomap.flags & IOMAP_F_STALE)
1187 break;
4d7bd0eb
MWO
1188
1189 offset = offset_in_folio(folio, pos);
1190 if (bytes > folio_size(folio) - offset)
1191 bytes = folio_size(folio) - offset;
1192
1193 folio_zero_range(folio, offset, bytes);
1194 folio_mark_accessed(folio);
1195
1196 bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
4d7bd0eb
MWO
1197 if (WARN_ON_ONCE(bytes == 0))
1198 return -EIO;
afc51aaa
DW
1199
1200 pos += bytes;
81ee8e52 1201 length -= bytes;
afc51aaa 1202 written += bytes;
81ee8e52 1203 } while (length > 0);
afc51aaa 1204
98eb8d95
KX
1205 if (did_zero)
1206 *did_zero = true;
afc51aaa
DW
1207 return written;
1208}
1209
1210int
1211iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1212 const struct iomap_ops *ops)
1213{
2aa3048e
CH
1214 struct iomap_iter iter = {
1215 .inode = inode,
1216 .pos = pos,
1217 .len = len,
1218 .flags = IOMAP_ZERO,
1219 };
1220 int ret;
afc51aaa 1221
2aa3048e
CH
1222 while ((ret = iomap_iter(&iter, ops)) > 0)
1223 iter.processed = iomap_zero_iter(&iter, did_zero);
1224 return ret;
afc51aaa
DW
1225}
1226EXPORT_SYMBOL_GPL(iomap_zero_range);
1227
1228int
1229iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1230 const struct iomap_ops *ops)
1231{
1232 unsigned int blocksize = i_blocksize(inode);
1233 unsigned int off = pos & (blocksize - 1);
1234
1235 /* Block boundary? Nothing to do */
1236 if (!off)
1237 return 0;
1238 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1239}
1240EXPORT_SYMBOL_GPL(iomap_truncate_page);
1241
ea0f843a
MWO
1242static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
1243 struct folio *folio)
afc51aaa 1244{
253564ba 1245 loff_t length = iomap_length(iter);
afc51aaa
DW
1246 int ret;
1247
253564ba 1248 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
d1bd0b4e 1249 ret = __block_write_begin_int(folio, iter->pos, length, NULL,
253564ba 1250 &iter->iomap);
afc51aaa
DW
1251 if (ret)
1252 return ret;
ea0f843a 1253 block_commit_write(&folio->page, 0, length);
afc51aaa 1254 } else {
ea0f843a
MWO
1255 WARN_ON_ONCE(!folio_test_uptodate(folio));
1256 folio_mark_dirty(folio);
afc51aaa
DW
1257 }
1258
1259 return length;
1260}
1261
1262vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1263{
253564ba
CH
1264 struct iomap_iter iter = {
1265 .inode = file_inode(vmf->vma->vm_file),
1266 .flags = IOMAP_WRITE | IOMAP_FAULT,
1267 };
ea0f843a 1268 struct folio *folio = page_folio(vmf->page);
afc51aaa
DW
1269 ssize_t ret;
1270
ea0f843a
MWO
1271 folio_lock(folio);
1272 ret = folio_mkwrite_check_truncate(folio, iter.inode);
243145bc 1273 if (ret < 0)
afc51aaa 1274 goto out_unlock;
ea0f843a 1275 iter.pos = folio_pos(folio);
253564ba
CH
1276 iter.len = ret;
1277 while ((ret = iomap_iter(&iter, ops)) > 0)
ea0f843a 1278 iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
afc51aaa 1279
253564ba
CH
1280 if (ret < 0)
1281 goto out_unlock;
ea0f843a 1282 folio_wait_stable(folio);
afc51aaa
DW
1283 return VM_FAULT_LOCKED;
1284out_unlock:
ea0f843a 1285 folio_unlock(folio);
afc51aaa
DW
1286 return block_page_mkwrite_return(ret);
1287}
1288EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
598ecfba 1289
8ffd74e9
MWO
1290static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1291 size_t len, int error)
598ecfba 1292{
95c4cd05 1293 struct iomap_page *iop = to_iomap_page(folio);
598ecfba
CH
1294
1295 if (error) {
8ffd74e9 1296 folio_set_error(folio);
b69eea82 1297 mapping_set_error(inode->i_mapping, error);
598ecfba
CH
1298 }
1299
8ffd74e9 1300 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop);
0fb2d720 1301 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
598ecfba 1302
0fb2d720 1303 if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
8ffd74e9 1304 folio_end_writeback(folio);
598ecfba
CH
1305}
1306
1307/*
1308 * We're now finished for good with this ioend structure. Update the page
1309 * state, release holds on bios, and finally free up memory. Do not use the
1310 * ioend after this.
1311 */
ebb7fb15 1312static u32
598ecfba
CH
1313iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1314{
1315 struct inode *inode = ioend->io_inode;
1316 struct bio *bio = &ioend->io_inline_bio;
1317 struct bio *last = ioend->io_bio, *next;
1318 u64 start = bio->bi_iter.bi_sector;
c275779f 1319 loff_t offset = ioend->io_offset;
598ecfba 1320 bool quiet = bio_flagged(bio, BIO_QUIET);
ebb7fb15 1321 u32 folio_count = 0;
598ecfba
CH
1322
1323 for (bio = &ioend->io_inline_bio; bio; bio = next) {
8ffd74e9 1324 struct folio_iter fi;
598ecfba
CH
1325
1326 /*
1327 * For the last bio, bi_private points to the ioend, so we
1328 * need to explicitly end the iteration here.
1329 */
1330 if (bio == last)
1331 next = NULL;
1332 else
1333 next = bio->bi_private;
1334
8ffd74e9 1335 /* walk all folios in bio, ending page IO on them */
ebb7fb15 1336 bio_for_each_folio_all(fi, bio) {
8ffd74e9
MWO
1337 iomap_finish_folio_write(inode, fi.folio, fi.length,
1338 error);
ebb7fb15
DC
1339 folio_count++;
1340 }
598ecfba
CH
1341 bio_put(bio);
1342 }
c275779f 1343 /* The ioend has been freed by bio_put() */
598ecfba
CH
1344
1345 if (unlikely(error && !quiet)) {
1346 printk_ratelimited(KERN_ERR
9cd0ed63 1347"%s: writeback error on inode %lu, offset %lld, sector %llu",
c275779f 1348 inode->i_sb->s_id, inode->i_ino, offset, start);
598ecfba 1349 }
ebb7fb15 1350 return folio_count;
598ecfba
CH
1351}
1352
ebb7fb15
DC
1353/*
1354 * Ioend completion routine for merged bios. This can only be called from task
1355 * contexts as merged ioends can be of unbound length. Hence we have to break up
1356 * the writeback completions into manageable chunks to avoid long scheduler
1357 * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
1358 * good batch processing throughput without creating adverse scheduler latency
1359 * conditions.
1360 */
598ecfba
CH
1361void
1362iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1363{
1364 struct list_head tmp;
ebb7fb15
DC
1365 u32 completions;
1366
1367 might_sleep();
598ecfba
CH
1368
1369 list_replace_init(&ioend->io_list, &tmp);
ebb7fb15 1370 completions = iomap_finish_ioend(ioend, error);
598ecfba
CH
1371
1372 while (!list_empty(&tmp)) {
ebb7fb15
DC
1373 if (completions > IOEND_BATCH_SIZE * 8) {
1374 cond_resched();
1375 completions = 0;
1376 }
598ecfba
CH
1377 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1378 list_del_init(&ioend->io_list);
ebb7fb15 1379 completions += iomap_finish_ioend(ioend, error);
598ecfba
CH
1380 }
1381}
1382EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1383
1384/*
1385 * We can merge two adjacent ioends if they have the same set of work to do.
1386 */
1387static bool
1388iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1389{
1390 if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1391 return false;
1392 if ((ioend->io_flags & IOMAP_F_SHARED) ^
1393 (next->io_flags & IOMAP_F_SHARED))
1394 return false;
1395 if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1396 (next->io_type == IOMAP_UNWRITTEN))
1397 return false;
1398 if (ioend->io_offset + ioend->io_size != next->io_offset)
1399 return false;
ebb7fb15
DC
1400 /*
1401 * Do not merge physically discontiguous ioends. The filesystem
1402 * completion functions will have to iterate the physical
1403 * discontiguities even if we merge the ioends at a logical level, so
1404 * we don't gain anything by merging physical discontiguities here.
1405 *
1406 * We cannot use bio->bi_iter.bi_sector here as it is modified during
1407 * submission so does not point to the start sector of the bio at
1408 * completion.
1409 */
1410 if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
1411 return false;
598ecfba
CH
1412 return true;
1413}
1414
1415void
6e552494 1416iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
598ecfba
CH
1417{
1418 struct iomap_ioend *next;
1419
1420 INIT_LIST_HEAD(&ioend->io_list);
1421
1422 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1423 io_list))) {
1424 if (!iomap_ioend_can_merge(ioend, next))
1425 break;
1426 list_move_tail(&next->io_list, &ioend->io_list);
1427 ioend->io_size += next->io_size;
598ecfba
CH
1428 }
1429}
1430EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1431
1432static int
4f0f586b
ST
1433iomap_ioend_compare(void *priv, const struct list_head *a,
1434 const struct list_head *b)
598ecfba 1435{
b3d423ec
CH
1436 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1437 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
598ecfba 1438
598ecfba
CH
1439 if (ia->io_offset < ib->io_offset)
1440 return -1;
b3d423ec 1441 if (ia->io_offset > ib->io_offset)
598ecfba
CH
1442 return 1;
1443 return 0;
1444}
1445
1446void
1447iomap_sort_ioends(struct list_head *ioend_list)
1448{
1449 list_sort(NULL, ioend_list, iomap_ioend_compare);
1450}
1451EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1452
1453static void iomap_writepage_end_bio(struct bio *bio)
1454{
1455 struct iomap_ioend *ioend = bio->bi_private;
1456
1457 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1458}
1459
1460/*
1461 * Submit the final bio for an ioend.
1462 *
1463 * If @error is non-zero, it means that we have a situation where some part of
f1f264b4 1464 * the submission process has failed after we've marked pages for writeback
598ecfba
CH
1465 * and unlocked them. In this situation, we need to fail the bio instead of
1466 * submitting it. This typically only happens on a filesystem shutdown.
1467 */
1468static int
1469iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1470 int error)
1471{
1472 ioend->io_bio->bi_private = ioend;
1473 ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1474
1475 if (wpc->ops->prepare_ioend)
1476 error = wpc->ops->prepare_ioend(ioend, error);
1477 if (error) {
1478 /*
f1f264b4 1479 * If we're failing the IO now, just mark the ioend with an
598ecfba
CH
1480 * error and finish it. This will run IO completion immediately
1481 * as there is only one reference to the ioend at this point in
1482 * time.
1483 */
1484 ioend->io_bio->bi_status = errno_to_blk_status(error);
1485 bio_endio(ioend->io_bio);
1486 return error;
1487 }
1488
1489 submit_bio(ioend->io_bio);
1490 return 0;
1491}
1492
1493static struct iomap_ioend *
1494iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
1495 loff_t offset, sector_t sector, struct writeback_control *wbc)
1496{
1497 struct iomap_ioend *ioend;
1498 struct bio *bio;
1499
609be106
CH
1500 bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1501 REQ_OP_WRITE | wbc_to_write_flags(wbc),
1502 GFP_NOFS, &iomap_ioend_bioset);
598ecfba 1503 bio->bi_iter.bi_sector = sector;
598ecfba
CH
1504 wbc_init_bio(wbc, bio);
1505
1506 ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1507 INIT_LIST_HEAD(&ioend->io_list);
1508 ioend->io_type = wpc->iomap.type;
1509 ioend->io_flags = wpc->iomap.flags;
1510 ioend->io_inode = inode;
1511 ioend->io_size = 0;
ebb7fb15 1512 ioend->io_folios = 0;
598ecfba 1513 ioend->io_offset = offset;
598ecfba 1514 ioend->io_bio = bio;
ebb7fb15 1515 ioend->io_sector = sector;
598ecfba
CH
1516 return ioend;
1517}
1518
1519/*
1520 * Allocate a new bio, and chain the old bio to the new one.
1521 *
f1f264b4 1522 * Note that we have to perform the chaining in this unintuitive order
598ecfba
CH
1523 * so that the bi_private linkage is set up in the right direction for the
1524 * traversal in iomap_finish_ioend().
1525 */
1526static struct bio *
1527iomap_chain_bio(struct bio *prev)
1528{
1529 struct bio *new;
1530
07888c66
CH
1531 new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS);
1532 bio_clone_blkg_association(new, prev);
598ecfba 1533 new->bi_iter.bi_sector = bio_end_sector(prev);
598ecfba
CH
1534
1535 bio_chain(prev, new);
1536 bio_get(prev); /* for iomap_finish_ioend */
1537 submit_bio(prev);
1538 return new;
1539}
1540
1541static bool
1542iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
1543 sector_t sector)
1544{
1545 if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1546 (wpc->ioend->io_flags & IOMAP_F_SHARED))
1547 return false;
1548 if (wpc->iomap.type != wpc->ioend->io_type)
1549 return false;
1550 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1551 return false;
1552 if (sector != bio_end_sector(wpc->ioend->io_bio))
1553 return false;
ebb7fb15
DC
1554 /*
1555 * Limit ioend bio chain lengths to minimise IO completion latency. This
1556 * also prevents long tight loops ending page writeback on all the
1557 * folios in the ioend.
1558 */
1559 if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE)
1560 return false;
598ecfba
CH
1561 return true;
1562}
1563
1564/*
1565 * Test to see if we have an existing ioend structure that we could append to
f1f264b4 1566 * first; otherwise finish off the current ioend and start another.
598ecfba
CH
1567 */
1568static void
e735c007 1569iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
598ecfba
CH
1570 struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
1571 struct writeback_control *wbc, struct list_head *iolist)
1572{
e735c007 1573 sector_t sector = iomap_sector(&wpc->iomap, pos);
598ecfba 1574 unsigned len = i_blocksize(inode);
e735c007 1575 size_t poff = offset_in_folio(folio, pos);
598ecfba 1576
e735c007 1577 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
598ecfba
CH
1578 if (wpc->ioend)
1579 list_add(&wpc->ioend->io_list, iolist);
e735c007 1580 wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
598ecfba
CH
1581 }
1582
e735c007 1583 if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
c1b79f11 1584 wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
e735c007 1585 bio_add_folio(wpc->ioend->io_bio, folio, len, poff);
598ecfba
CH
1586 }
1587
c1b79f11
CH
1588 if (iop)
1589 atomic_add(len, &iop->write_bytes_pending);
598ecfba 1590 wpc->ioend->io_size += len;
e735c007 1591 wbc_account_cgroup_owner(wbc, &folio->page, len);
598ecfba
CH
1592}
1593
1594/*
1595 * We implement an immediate ioend submission policy here to avoid needing to
1596 * chain multiple ioends and hence nest mempool allocations which can violate
f1f264b4
AG
1597 * the forward progress guarantees we need to provide. The current ioend we're
1598 * adding blocks to is cached in the writepage context, and if the new block
1599 * doesn't append to the cached ioend, it will create a new ioend and cache that
598ecfba
CH
1600 * instead.
1601 *
1602 * If a new ioend is created and cached, the old ioend is returned and queued
1603 * locally for submission once the entire page is processed or an error has been
1604 * detected. While ioends are submitted immediately after they are completed,
1605 * batching optimisations are provided by higher level block plugging.
1606 *
1607 * At the end of a writeback pass, there will be a cached ioend remaining on the
1608 * writepage context that the caller will need to submit.
1609 */
1610static int
1611iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1612 struct writeback_control *wbc, struct inode *inode,
e735c007 1613 struct folio *folio, u64 end_pos)
598ecfba 1614{
9753b868 1615 struct iomap_page *iop = iomap_page_create(inode, folio, 0);
598ecfba
CH
1616 struct iomap_ioend *ioend, *next;
1617 unsigned len = i_blocksize(inode);
92655036
MWO
1618 unsigned nblocks = i_blocks_per_folio(inode, folio);
1619 u64 pos = folio_pos(folio);
598ecfba
CH
1620 int error = 0, count = 0, i;
1621 LIST_HEAD(submit_list);
1622
0fb2d720 1623 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
598ecfba
CH
1624
1625 /*
92655036
MWO
1626 * Walk through the folio to find areas to write back. If we
1627 * run off the end of the current map or find the current map
1628 * invalid, grab a new one.
598ecfba 1629 */
92655036 1630 for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
598ecfba
CH
1631 if (iop && !test_bit(i, iop->uptodate))
1632 continue;
1633
92655036 1634 error = wpc->ops->map_blocks(wpc, inode, pos);
598ecfba
CH
1635 if (error)
1636 break;
adc9c2e5 1637 trace_iomap_writepage_map(inode, &wpc->iomap);
3e19e6f3
CH
1638 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
1639 continue;
598ecfba
CH
1640 if (wpc->iomap.type == IOMAP_HOLE)
1641 continue;
e735c007 1642 iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc,
598ecfba
CH
1643 &submit_list);
1644 count++;
1645 }
ebb7fb15
DC
1646 if (count)
1647 wpc->ioend->io_folios++;
598ecfba
CH
1648
1649 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
e735c007
MWO
1650 WARN_ON_ONCE(!folio_test_locked(folio));
1651 WARN_ON_ONCE(folio_test_writeback(folio));
1652 WARN_ON_ONCE(folio_test_dirty(folio));
598ecfba
CH
1653
1654 /*
1655 * We cannot cancel the ioend directly here on error. We may have
1656 * already set other pages under writeback and hence we have to run I/O
1657 * completion to mark the error state of the pages under writeback
1658 * appropriately.
1659 */
1660 if (unlikely(error)) {
763e4cdc
BF
1661 /*
1662 * Let the filesystem know what portion of the current page
f1f264b4 1663 * failed to map. If the page hasn't been added to ioend, it
763e4cdc
BF
1664 * won't be affected by I/O completion and we must unlock it
1665 * now.
1666 */
6e478521 1667 if (wpc->ops->discard_folio)
92655036 1668 wpc->ops->discard_folio(folio, pos);
598ecfba 1669 if (!count) {
e735c007 1670 folio_unlock(folio);
598ecfba
CH
1671 goto done;
1672 }
598ecfba
CH
1673 }
1674
e735c007
MWO
1675 folio_start_writeback(folio);
1676 folio_unlock(folio);
598ecfba
CH
1677
1678 /*
f1f264b4 1679 * Preserve the original error if there was one; catch
598ecfba
CH
1680 * submission errors here and propagate into subsequent ioend
1681 * submissions.
1682 */
1683 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1684 int error2;
1685
1686 list_del_init(&ioend->io_list);
1687 error2 = iomap_submit_ioend(wpc, ioend, error);
1688 if (error2 && !error)
1689 error = error2;
1690 }
1691
1692 /*
1693 * We can end up here with no error and nothing to write only if we race
1694 * with a partial page truncate on a sub-page block sized filesystem.
1695 */
1696 if (!count)
e735c007 1697 folio_end_writeback(folio);
598ecfba 1698done:
3d5f3ba1 1699 mapping_set_error(inode->i_mapping, error);
598ecfba
CH
1700 return error;
1701}
1702
1703/*
1704 * Write out a dirty page.
1705 *
f1f264b4
AG
1706 * For delalloc space on the page, we need to allocate space and flush it.
1707 * For unwritten space on the page, we need to start the conversion to
598ecfba
CH
1708 * regular allocated space.
1709 */
d585bdbe
MWO
1710static int iomap_do_writepage(struct folio *folio,
1711 struct writeback_control *wbc, void *data)
598ecfba
CH
1712{
1713 struct iomap_writepage_ctx *wpc = data;
e735c007 1714 struct inode *inode = folio->mapping->host;
81d4782a 1715 u64 end_pos, isize;
598ecfba 1716
e735c007 1717 trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
598ecfba
CH
1718
1719 /*
e735c007 1720 * Refuse to write the folio out if we're called from reclaim context.
598ecfba
CH
1721 *
1722 * This avoids stack overflows when called from deeply used stacks in
1723 * random callers for direct reclaim or memcg reclaim. We explicitly
1724 * allow reclaim from kswapd as the stack usage there is relatively low.
1725 *
1726 * This should never happen except in the case of a VM regression so
1727 * warn about it.
1728 */
1729 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1730 PF_MEMALLOC))
1731 goto redirty;
1732
598ecfba 1733 /*
e735c007 1734 * Is this folio beyond the end of the file?
598ecfba 1735 *
e735c007
MWO
1736 * The folio index is less than the end_index, adjust the end_pos
1737 * to the highest offset that this folio should represent.
598ecfba
CH
1738 * -----------------------------------------------------
1739 * | file mapping | <EOF> |
1740 * -----------------------------------------------------
1741 * | Page ... | Page N-2 | Page N-1 | Page N | |
1742 * ^--------------------------------^----------|--------
1743 * | desired writeback range | see else |
1744 * ---------------------------------^------------------|
1745 */
81d4782a 1746 isize = i_size_read(inode);
e735c007 1747 end_pos = folio_pos(folio) + folio_size(folio);
81d4782a 1748 if (end_pos > isize) {
598ecfba
CH
1749 /*
1750 * Check whether the page to write out is beyond or straddles
1751 * i_size or not.
1752 * -------------------------------------------------------
1753 * | file mapping | <EOF> |
1754 * -------------------------------------------------------
1755 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1756 * ^--------------------------------^-----------|---------
1757 * | | Straddles |
1758 * ---------------------------------^-----------|--------|
1759 */
e735c007 1760 size_t poff = offset_in_folio(folio, isize);
81d4782a 1761 pgoff_t end_index = isize >> PAGE_SHIFT;
598ecfba
CH
1762
1763 /*
d58562ca
CM
1764 * Skip the page if it's fully outside i_size, e.g.
1765 * due to a truncate operation that's in progress. We've
1766 * cleaned this page and truncate will finish things off for
1767 * us.
598ecfba 1768 *
f1f264b4
AG
1769 * Note that the end_index is unsigned long. If the given
1770 * offset is greater than 16TB on a 32-bit system then if we
1771 * checked if the page is fully outside i_size with
1772 * "if (page->index >= end_index + 1)", "end_index + 1" would
1773 * overflow and evaluate to 0. Hence this page would be
1774 * redirtied and written out repeatedly, which would result in
1775 * an infinite loop; the user program performing this operation
1776 * would hang. Instead, we can detect this situation by
1777 * checking if the page is totally beyond i_size or if its
598ecfba
CH
1778 * offset is just equal to the EOF.
1779 */
e735c007
MWO
1780 if (folio->index > end_index ||
1781 (folio->index == end_index && poff == 0))
d58562ca 1782 goto unlock;
598ecfba
CH
1783
1784 /*
1785 * The page straddles i_size. It must be zeroed out on each
1786 * and every writepage invocation because it may be mmapped.
1787 * "A file is mapped in multiples of the page size. For a file
1788 * that is not a multiple of the page size, the remaining
1789 * memory is zeroed when mapped, and writes to that region are
1790 * not written out to the file."
1791 */
e735c007 1792 folio_zero_segment(folio, poff, folio_size(folio));
81d4782a 1793 end_pos = isize;
598ecfba
CH
1794 }
1795
e735c007 1796 return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
598ecfba
CH
1797
1798redirty:
e735c007 1799 folio_redirty_for_writepage(wbc, folio);
d58562ca 1800unlock:
e735c007 1801 folio_unlock(folio);
598ecfba
CH
1802 return 0;
1803}
1804
598ecfba
CH
1805int
1806iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1807 struct iomap_writepage_ctx *wpc,
1808 const struct iomap_writeback_ops *ops)
1809{
1810 int ret;
1811
1812 wpc->ops = ops;
1813 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
1814 if (!wpc->ioend)
1815 return ret;
1816 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1817}
1818EXPORT_SYMBOL_GPL(iomap_writepages);
1819
1820static int __init iomap_init(void)
1821{
1822 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1823 offsetof(struct iomap_ioend, io_inline_bio),
1824 BIOSET_NEED_BVECS);
1825}
1826fs_initcall(iomap_init);