Merge tag 'probes-fixes-v6.16-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / fs / mpage.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * fs/mpage.c
4 *
5 * Copyright (C) 2002, Linus Torvalds.
6 *
7 * Contains functions related to preparing and submitting BIOs which contain
8 * multiple pagecache pages.
9 *
e1f8e874 10 * 15May2002 Andrew Morton
1da177e4
LT
11 * Initial version
12 * 27Jun2002 axboe@suse.de
13 * use bio_add_page() to build bio's just the right size
14 */
15
16#include <linux/kernel.h>
630d9c47 17#include <linux/export.h>
1da177e4
LT
18#include <linux/mm.h>
19#include <linux/kdev_t.h>
5a0e3ad6 20#include <linux/gfp.h>
1da177e4
LT
21#include <linux/bio.h>
22#include <linux/fs.h>
23#include <linux/buffer_head.h>
24#include <linux/blkdev.h>
25#include <linux/highmem.h>
26#include <linux/prefetch.h>
27#include <linux/mpage.h>
02c43638 28#include <linux/mm_inline.h>
1da177e4
LT
29#include <linux/writeback.h>
30#include <linux/backing-dev.h>
31#include <linux/pagevec.h>
4db96b71 32#include "internal.h"
1da177e4
LT
33
34/*
35 * I/O completion handler for multipage BIOs.
36 *
37 * The mpage code never puts partial pages into a BIO (except for end-of-file).
38 * If a page does not map to a contiguous run of blocks then it simply falls
2c69e205 39 * back to block_read_full_folio().
1da177e4
LT
40 *
41 * Why is this? If a page's completion depends on a number of different BIOs
42 * which can complete in any order (or at the same time) then determining the
43 * status of that page is hard. See end_buffer_async_read() for the details.
44 * There is no point in duplicating all that complexity.
45 */
f0d6ca46 46static void mpage_read_end_io(struct bio *bio)
1da177e4 47{
09a607c9
PR
48 struct folio_iter fi;
49 int err = blk_status_to_errno(bio->bi_status);
1da177e4 50
7ad635ea
MWO
51 bio_for_each_folio_all(fi, bio)
52 folio_end_read(fi.folio, err == 0);
2c30c71b 53
1da177e4 54 bio_put(bio);
1da177e4
LT
55}
56
f0d6ca46
PR
57static void mpage_write_end_io(struct bio *bio)
58{
09a607c9
PR
59 struct folio_iter fi;
60 int err = blk_status_to_errno(bio->bi_status);
f0d6ca46 61
09a607c9 62 bio_for_each_folio_all(fi, bio) {
7ad635ea 63 if (err)
09a607c9 64 mapping_set_error(fi.folio->mapping, err);
09a607c9
PR
65 folio_end_writeback(fi.folio);
66 }
f0d6ca46
PR
67
68 bio_put(bio);
69}
70
71static struct bio *mpage_bio_submit_read(struct bio *bio)
72{
73 bio->bi_end_io = mpage_read_end_io;
74 guard_bio_eod(bio);
75 submit_bio(bio);
76 return NULL;
77}
78
79static struct bio *mpage_bio_submit_write(struct bio *bio)
1da177e4 80{
f0d6ca46 81 bio->bi_end_io = mpage_write_end_io;
83c9c547 82 guard_bio_eod(bio);
4e49ea4a 83 submit_bio(bio);
1da177e4
LT
84 return NULL;
85}
86
1da177e4 87/*
d4388340 88 * support function for mpage_readahead. The fs supplied get_block might
1da177e4 89 * return an up to date buffer. This is used to map that buffer into
2c69e205 90 * the page, which allows read_folio to avoid triggering a duplicate call
1da177e4
LT
91 * to get_block.
92 *
93 * The idea is to avoid adding buffers to pages that don't already have
94 * them. So when the buffer is up to date and the page size == block size,
95 * this marks the page up to date instead of adding new buffers.
96 */
211d0444
MWO
97static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh,
98 int page_block)
1da177e4 99{
211d0444 100 struct inode *inode = folio->mapping->host;
1da177e4
LT
101 struct buffer_head *page_bh, *head;
102 int block = 0;
103
211d0444
MWO
104 head = folio_buffers(folio);
105 if (!head) {
1da177e4
LT
106 /*
107 * don't make any buffers if there is only one buffer on
211d0444 108 * the folio and the folio just needs to be set up to date
1da177e4 109 */
8b45a4f4 110 if (inode->i_blkbits == folio_shift(folio) &&
1da177e4 111 buffer_uptodate(bh)) {
211d0444 112 folio_mark_uptodate(folio);
1da177e4
LT
113 return;
114 }
0a88810d 115 head = create_empty_buffers(folio, i_blocksize(inode), 0);
1da177e4 116 }
211d0444 117
1da177e4
LT
118 page_bh = head;
119 do {
120 if (block == page_block) {
121 page_bh->b_state = bh->b_state;
122 page_bh->b_bdev = bh->b_bdev;
123 page_bh->b_blocknr = bh->b_blocknr;
124 break;
125 }
126 page_bh = page_bh->b_this_page;
127 block++;
128 } while (page_bh != head);
129}
130
357c1206
JA
131struct mpage_readpage_args {
132 struct bio *bio;
211d0444 133 struct folio *folio;
357c1206 134 unsigned int nr_pages;
74c8164e 135 bool is_readahead;
357c1206
JA
136 sector_t last_block_in_bio;
137 struct buffer_head map_bh;
138 unsigned long first_logical_block;
139 get_block_t *get_block;
357c1206
JA
140};
141
fa30bd05
BP
142/*
143 * This is the worker routine which does all the work of mapping the disk
144 * blocks and constructs largest possible bios, submits them for IO if the
145 * blocks are not contiguous on the disk.
146 *
147 * We pass a buffer_head back and forth and use its buffer_mapped() flag to
148 * represent the validity of its disk mapping and to decide when to do the next
149 * get_block() call.
150 */
357c1206 151static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
1da177e4 152{
211d0444
MWO
153 struct folio *folio = args->folio;
154 struct inode *inode = folio->mapping->host;
1da177e4 155 const unsigned blkbits = inode->i_blkbits;
8b45a4f4 156 const unsigned blocks_per_folio = folio_size(folio) >> blkbits;
1da177e4 157 const unsigned blocksize = 1 << blkbits;
357c1206 158 struct buffer_head *map_bh = &args->map_bh;
1da177e4
LT
159 sector_t block_in_file;
160 sector_t last_block;
fa30bd05 161 sector_t last_block_in_file;
12ac5a65 162 sector_t first_block;
1da177e4 163 unsigned page_block;
8b45a4f4 164 unsigned first_hole = blocks_per_folio;
1da177e4 165 struct block_device *bdev = NULL;
1da177e4
LT
166 int length;
167 int fully_mapped = 1;
f84c94af 168 blk_opf_t opf = REQ_OP_READ;
fa30bd05
BP
169 unsigned nblocks;
170 unsigned relative_block;
211d0444
MWO
171 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
172
74c8164e 173 if (args->is_readahead) {
f84c94af 174 opf |= REQ_RAHEAD;
61285ff7 175 gfp |= __GFP_NORETRY | __GFP_NOWARN;
74c8164e 176 }
1da177e4 177
211d0444 178 if (folio_buffers(folio))
1da177e4
LT
179 goto confused;
180
86c60efd 181 block_in_file = folio_pos(folio) >> blkbits;
8b45a4f4 182 last_block = block_in_file + ((args->nr_pages * PAGE_SIZE) >> blkbits);
fa30bd05
BP
183 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
184 if (last_block > last_block_in_file)
185 last_block = last_block_in_file;
186 page_block = 0;
187
188 /*
189 * Map blocks using the result from the previous get_blocks call first.
190 */
191 nblocks = map_bh->b_size >> blkbits;
357c1206
JA
192 if (buffer_mapped(map_bh) &&
193 block_in_file > args->first_logical_block &&
194 block_in_file < (args->first_logical_block + nblocks)) {
195 unsigned map_offset = block_in_file - args->first_logical_block;
fa30bd05
BP
196 unsigned last = nblocks - map_offset;
197
12ac5a65 198 first_block = map_bh->b_blocknr + map_offset;
fa30bd05
BP
199 for (relative_block = 0; ; relative_block++) {
200 if (relative_block == last) {
201 clear_buffer_mapped(map_bh);
202 break;
203 }
8b45a4f4 204 if (page_block == blocks_per_folio)
fa30bd05 205 break;
fa30bd05
BP
206 page_block++;
207 block_in_file++;
208 }
209 bdev = map_bh->b_bdev;
210 }
211
212 /*
211d0444 213 * Then do more get_blocks calls until we are done with this folio.
fa30bd05 214 */
a5fd8390 215 map_bh->b_folio = folio;
8b45a4f4 216 while (page_block < blocks_per_folio) {
fa30bd05
BP
217 map_bh->b_state = 0;
218 map_bh->b_size = 0;
1da177e4 219
1da177e4 220 if (block_in_file < last_block) {
fa30bd05 221 map_bh->b_size = (last_block-block_in_file) << blkbits;
357c1206 222 if (args->get_block(inode, block_in_file, map_bh, 0))
1da177e4 223 goto confused;
357c1206 224 args->first_logical_block = block_in_file;
1da177e4
LT
225 }
226
fa30bd05 227 if (!buffer_mapped(map_bh)) {
1da177e4 228 fully_mapped = 0;
8b45a4f4 229 if (first_hole == blocks_per_folio)
1da177e4 230 first_hole = page_block;
fa30bd05
BP
231 page_block++;
232 block_in_file++;
1da177e4
LT
233 continue;
234 }
235
236 /* some filesystems will copy data into the page during
237 * the get_block call, in which case we don't want to
211d0444
MWO
238 * read it again. map_buffer_to_folio copies the data
239 * we just collected from get_block into the folio's buffers
240 * so read_folio doesn't have to repeat the get_block call
1da177e4 241 */
fa30bd05 242 if (buffer_uptodate(map_bh)) {
211d0444 243 map_buffer_to_folio(folio, map_bh, page_block);
1da177e4
LT
244 goto confused;
245 }
246
8b45a4f4 247 if (first_hole != blocks_per_folio)
1da177e4
LT
248 goto confused; /* hole -> non-hole */
249
250 /* Contiguous blocks? */
12ac5a65
MWO
251 if (!page_block)
252 first_block = map_bh->b_blocknr;
253 else if (first_block + page_block != map_bh->b_blocknr)
1da177e4 254 goto confused;
fa30bd05
BP
255 nblocks = map_bh->b_size >> blkbits;
256 for (relative_block = 0; ; relative_block++) {
257 if (relative_block == nblocks) {
258 clear_buffer_mapped(map_bh);
259 break;
8b45a4f4 260 } else if (page_block == blocks_per_folio)
fa30bd05 261 break;
fa30bd05
BP
262 page_block++;
263 block_in_file++;
264 }
265 bdev = map_bh->b_bdev;
1da177e4
LT
266 }
267
8b45a4f4
LC
268 if (first_hole != blocks_per_folio) {
269 folio_zero_segment(folio, first_hole << blkbits, folio_size(folio));
1da177e4 270 if (first_hole == 0) {
211d0444
MWO
271 folio_mark_uptodate(folio);
272 folio_unlock(folio);
1da177e4
LT
273 goto out;
274 }
275 } else if (fully_mapped) {
211d0444 276 folio_set_mappedtodisk(folio);
1da177e4
LT
277 }
278
279 /*
211d0444 280 * This folio will go to BIO. Do we need to send this BIO off first?
1da177e4 281 */
12ac5a65 282 if (args->bio && (args->last_block_in_bio != first_block - 1))
f0d6ca46 283 args->bio = mpage_bio_submit_read(args->bio);
1da177e4
LT
284
285alloc_new:
357c1206 286 if (args->bio == NULL) {
f84c94af 287 args->bio = bio_alloc(bdev, bio_max_segs(args->nr_pages), opf,
07888c66 288 gfp);
357c1206 289 if (args->bio == NULL)
1da177e4 290 goto confused;
12ac5a65 291 args->bio->bi_iter.bi_sector = first_block << (blkbits - 9);
1da177e4
LT
292 }
293
294 length = first_hole << blkbits;
211d0444 295 if (!bio_add_folio(args->bio, folio, length, 0)) {
f0d6ca46 296 args->bio = mpage_bio_submit_read(args->bio);
1da177e4
LT
297 goto alloc_new;
298 }
299
357c1206 300 relative_block = block_in_file - args->first_logical_block;
38c8e618
MS
301 nblocks = map_bh->b_size >> blkbits;
302 if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
8b45a4f4 303 (first_hole != blocks_per_folio))
f0d6ca46 304 args->bio = mpage_bio_submit_read(args->bio);
1da177e4 305 else
8b45a4f4 306 args->last_block_in_bio = first_block + blocks_per_folio - 1;
1da177e4 307out:
357c1206 308 return args->bio;
1da177e4
LT
309
310confused:
357c1206 311 if (args->bio)
f0d6ca46 312 args->bio = mpage_bio_submit_read(args->bio);
211d0444
MWO
313 if (!folio_test_uptodate(folio))
314 block_read_full_folio(folio, args->get_block);
1da177e4 315 else
211d0444 316 folio_unlock(folio);
1da177e4
LT
317 goto out;
318}
319
67be2dd1 320/**
d4388340
MWO
321 * mpage_readahead - start reads against pages
322 * @rac: Describes which pages to read.
67be2dd1
MW
323 * @get_block: The filesystem's block mapper function.
324 *
325 * This function walks the pages and the blocks within each page, building and
326 * emitting large BIOs.
327 *
328 * If anything unusual happens, such as:
329 *
330 * - encountering a page which has buffers
331 * - encountering a page which has a non-hole after a hole
332 * - encountering a page with non-contiguous blocks
333 *
334 * then this code just gives up and calls the buffer_head-based read function.
335 * It does handle a page which has holes at the end - that is a common case:
ea1754a0 336 * the end-of-file on blocksize < PAGE_SIZE setups.
67be2dd1
MW
337 *
338 * BH_Boundary explanation:
339 *
340 * There is a problem. The mpage read code assembles several pages, gets all
341 * their disk mappings, and then submits them all. That's fine, but obtaining
342 * the disk mappings may require I/O. Reads of indirect blocks, for example.
343 *
344 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
345 * submitted in the following order:
0117d427 346 *
67be2dd1 347 * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
78a4a50a 348 *
67be2dd1
MW
349 * because the indirect block has to be read to get the mappings of blocks
350 * 13,14,15,16. Obviously, this impacts performance.
351 *
352 * So what we do it to allow the filesystem's get_block() function to set
353 * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block
354 * after this one will require I/O against a block which is probably close to
355 * this one. So you should push what I/O you have currently accumulated.
356 *
357 * This all causes the disk requests to be issued in the correct order.
358 */
d4388340 359void mpage_readahead(struct readahead_control *rac, get_block_t get_block)
1da177e4 360{
211d0444 361 struct folio *folio;
357c1206
JA
362 struct mpage_readpage_args args = {
363 .get_block = get_block,
74c8164e 364 .is_readahead = true,
357c1206 365 };
1da177e4 366
211d0444
MWO
367 while ((folio = readahead_folio(rac))) {
368 prefetchw(&folio->flags);
369 args.folio = folio;
d4388340
MWO
370 args.nr_pages = readahead_count(rac);
371 args.bio = do_mpage_readpage(&args);
1da177e4 372 }
357c1206 373 if (args.bio)
f0d6ca46 374 mpage_bio_submit_read(args.bio);
1da177e4 375}
d4388340 376EXPORT_SYMBOL(mpage_readahead);
1da177e4
LT
377
378/*
379 * This isn't called much at all
380 */
f132ab7d 381int mpage_read_folio(struct folio *folio, get_block_t get_block)
1da177e4 382{
357c1206 383 struct mpage_readpage_args args = {
211d0444 384 .folio = folio,
8b45a4f4 385 .nr_pages = folio_nr_pages(folio),
357c1206 386 .get_block = get_block,
357c1206
JA
387 };
388
389 args.bio = do_mpage_readpage(&args);
390 if (args.bio)
f0d6ca46 391 mpage_bio_submit_read(args.bio);
1da177e4
LT
392 return 0;
393}
f132ab7d 394EXPORT_SYMBOL(mpage_read_folio);
1da177e4
LT
395
396/*
397 * Writing is not so simple.
398 *
399 * If the page has buffers then they will be used for obtaining the disk
400 * mapping. We only support pages which are fully mapped-and-dirty, with a
401 * special case for pages which are unmapped at the end: end-of-file.
402 *
403 * If the page has no buffers (preferred) then the page is mapped here.
404 *
405 * If all blocks are found to be contiguous then the page can go into the
406 * BIO. Otherwise fall back to the mapping's writepage().
407 *
408 * FIXME: This code wants an estimate of how many pages are still to be
409 * written, so it can intelligently allocate a suitably-sized BIO. For now,
410 * just allocate full-size (16-page) BIOs.
411 */
0ea97180 412
ced117c7
DV
413struct mpage_data {
414 struct bio *bio;
415 sector_t last_block_in_bio;
416 get_block_t *get_block;
ced117c7
DV
417};
418
90768eee
MW
419/*
420 * We have our BIO, so we can now mark the buffers clean. Make
421 * sure to only clean buffers which we know we'll be writing.
422 */
e8ff8248 423static void clean_buffers(struct folio *folio, unsigned first_unmapped)
90768eee
MW
424{
425 unsigned buffer_counter = 0;
e8ff8248
MWO
426 struct buffer_head *bh, *head = folio_buffers(folio);
427
428 if (!head)
90768eee 429 return;
90768eee
MW
430 bh = head;
431
432 do {
433 if (buffer_counter++ == first_unmapped)
434 break;
435 clear_buffer_dirty(bh);
436 bh = bh->b_this_page;
437 } while (bh != head);
438
439 /*
440 * we cannot drop the bh if the page is not uptodate or a concurrent
2c69e205 441 * read_folio would fail to serialize with the bh and it would read from
90768eee
MW
442 * disk before we reach the platter.
443 */
e8ff8248
MWO
444 if (buffer_heads_over_limit && folio_test_uptodate(folio))
445 try_to_free_buffers(folio);
90768eee
MW
446}
447
bb01e8cc
CH
448static int mpage_write_folio(struct writeback_control *wbc, struct folio *folio,
449 struct mpage_data *mpd)
1da177e4 450{
0ea97180 451 struct bio *bio = mpd->bio;
9160cffd
MWO
452 struct address_space *mapping = folio->mapping;
453 struct inode *inode = mapping->host;
1da177e4 454 const unsigned blkbits = inode->i_blkbits;
8b45a4f4 455 const unsigned blocks_per_folio = folio_size(folio) >> blkbits;
1da177e4
LT
456 sector_t last_block;
457 sector_t block_in_file;
6ad7c607 458 sector_t first_block;
1da177e4 459 unsigned page_block;
8b45a4f4 460 unsigned first_unmapped = blocks_per_folio;
1da177e4
LT
461 struct block_device *bdev = NULL;
462 int boundary = 0;
463 sector_t boundary_block = 0;
464 struct block_device *boundary_bdev = NULL;
9160cffd 465 size_t length;
1da177e4
LT
466 struct buffer_head map_bh;
467 loff_t i_size = i_size_read(inode);
0ea97180 468 int ret = 0;
9160cffd 469 struct buffer_head *head = folio_buffers(folio);
1da177e4 470
9160cffd 471 if (head) {
1da177e4
LT
472 struct buffer_head *bh = head;
473
474 /* If they're all mapped and dirty, do it */
475 page_block = 0;
476 do {
477 BUG_ON(buffer_locked(bh));
478 if (!buffer_mapped(bh)) {
479 /*
480 * unmapped dirty buffers are created by
e621900a 481 * block_dirty_folio -> mmapped data
1da177e4
LT
482 */
483 if (buffer_dirty(bh))
484 goto confused;
8b45a4f4 485 if (first_unmapped == blocks_per_folio)
1da177e4
LT
486 first_unmapped = page_block;
487 continue;
488 }
489
8b45a4f4 490 if (first_unmapped != blocks_per_folio)
1da177e4
LT
491 goto confused; /* hole -> non-hole */
492
493 if (!buffer_dirty(bh) || !buffer_uptodate(bh))
494 goto confused;
495 if (page_block) {
6ad7c607 496 if (bh->b_blocknr != first_block + page_block)
1da177e4 497 goto confused;
6ad7c607
MWO
498 } else {
499 first_block = bh->b_blocknr;
1da177e4 500 }
6ad7c607 501 page_block++;
1da177e4
LT
502 boundary = buffer_boundary(bh);
503 if (boundary) {
504 boundary_block = bh->b_blocknr;
505 boundary_bdev = bh->b_bdev;
506 }
507 bdev = bh->b_bdev;
508 } while ((bh = bh->b_this_page) != head);
509
510 if (first_unmapped)
511 goto page_is_mapped;
512
513 /*
514 * Page has buffers, but they are all unmapped. The page was
515 * created by pagein or read over a hole which was handled by
2c69e205 516 * block_read_full_folio(). If this address_space is also
d4388340 517 * using mpage_readahead then this can rarely happen.
1da177e4
LT
518 */
519 goto confused;
520 }
521
522 /*
523 * The page has no buffers: map it to disk
524 */
9160cffd 525 BUG_ON(!folio_test_uptodate(folio));
86c60efd 526 block_in_file = folio_pos(folio) >> blkbits;
4b89a37d
JK
527 /*
528 * Whole page beyond EOF? Skip allocating blocks to avoid leaking
529 * space.
530 */
531 if (block_in_file >= (i_size + (1 << blkbits) - 1) >> blkbits)
532 goto page_is_mapped;
1da177e4 533 last_block = (i_size - 1) >> blkbits;
9160cffd 534 map_bh.b_folio = folio;
8b45a4f4 535 for (page_block = 0; page_block < blocks_per_folio; ) {
1da177e4
LT
536
537 map_bh.b_state = 0;
b0cf2321 538 map_bh.b_size = 1 << blkbits;
0ea97180 539 if (mpd->get_block(inode, block_in_file, &map_bh, 1))
1da177e4 540 goto confused;
7010839c
JK
541 if (!buffer_mapped(&map_bh))
542 goto confused;
1da177e4 543 if (buffer_new(&map_bh))
e64855c6 544 clean_bdev_bh_alias(&map_bh);
1da177e4
LT
545 if (buffer_boundary(&map_bh)) {
546 boundary_block = map_bh.b_blocknr;
547 boundary_bdev = map_bh.b_bdev;
548 }
549 if (page_block) {
6ad7c607 550 if (map_bh.b_blocknr != first_block + page_block)
1da177e4 551 goto confused;
6ad7c607
MWO
552 } else {
553 first_block = map_bh.b_blocknr;
1da177e4 554 }
6ad7c607 555 page_block++;
1da177e4
LT
556 boundary = buffer_boundary(&map_bh);
557 bdev = map_bh.b_bdev;
558 if (block_in_file == last_block)
559 break;
560 block_in_file++;
561 }
562 BUG_ON(page_block == 0);
563
564 first_unmapped = page_block;
565
566page_is_mapped:
9160cffd
MWO
567 /* Don't bother writing beyond EOF, truncate will discard the folio */
568 if (folio_pos(folio) >= i_size)
569 goto confused;
570 length = folio_size(folio);
571 if (folio_pos(folio) + length > i_size) {
1da177e4
LT
572 /*
573 * The page straddles i_size. It must be zeroed out on each
2a61aa40 574 * and every writepage invocation because it may be mmapped.
1da177e4
LT
575 * "A file is mapped in multiples of the page size. For a file
576 * that is not a multiple of the page size, the remaining memory
577 * is zeroed when mapped, and writes to that region are not
578 * written out to the file."
579 */
9160cffd
MWO
580 length = i_size - folio_pos(folio);
581 folio_zero_segment(folio, length, folio_size(folio));
1da177e4
LT
582 }
583
584 /*
585 * This page will go to BIO. Do we need to send this BIO off first?
586 */
6ad7c607 587 if (bio && mpd->last_block_in_bio != first_block - 1)
f0d6ca46 588 bio = mpage_bio_submit_write(bio);
1da177e4
LT
589
590alloc_new:
591 if (bio == NULL) {
77c436de
CH
592 bio = bio_alloc(bdev, BIO_MAX_VECS,
593 REQ_OP_WRITE | wbc_to_write_flags(wbc),
594 GFP_NOFS);
6ad7c607 595 bio->bi_iter.bi_sector = first_block << (blkbits - 9);
b16b1deb 596 wbc_init_bio(wbc, bio);
44981351 597 bio->bi_write_hint = inode->i_write_hint;
1da177e4
LT
598 }
599
600 /*
601 * Must try to add the page before marking the buffer clean or
602 * the confused fail path above (OOM) will be very confused when
603 * it finds all bh marked clean (i.e. it will not write anything)
604 */
30dac24e 605 wbc_account_cgroup_owner(wbc, folio, folio_size(folio));
1da177e4 606 length = first_unmapped << blkbits;
9160cffd 607 if (!bio_add_folio(bio, folio, length, 0)) {
f0d6ca46 608 bio = mpage_bio_submit_write(bio);
1da177e4
LT
609 goto alloc_new;
610 }
611
e8ff8248 612 clean_buffers(folio, first_unmapped);
1da177e4 613
9160cffd
MWO
614 BUG_ON(folio_test_writeback(folio));
615 folio_start_writeback(folio);
616 folio_unlock(folio);
8b45a4f4 617 if (boundary || (first_unmapped != blocks_per_folio)) {
f0d6ca46 618 bio = mpage_bio_submit_write(bio);
1da177e4
LT
619 if (boundary_block) {
620 write_boundary_block(boundary_bdev,
621 boundary_block, 1 << blkbits);
622 }
623 } else {
8b45a4f4 624 mpd->last_block_in_bio = first_block + blocks_per_folio - 1;
1da177e4
LT
625 }
626 goto out;
627
628confused:
629 if (bio)
f0d6ca46 630 bio = mpage_bio_submit_write(bio);
1da177e4 631
1da177e4
LT
632 /*
633 * The caller has a ref on the inode, so *mapping is stable
634 */
17bf23a9 635 ret = block_write_full_folio(folio, wbc, mpd->get_block);
0ea97180 636 mapping_set_error(mapping, ret);
1da177e4 637out:
0ea97180
MS
638 mpd->bio = bio;
639 return ret;
1da177e4
LT
640}
641
642/**
78a4a50a 643 * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
1da177e4
LT
644 * @mapping: address space structure to write
645 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
646 * @get_block: the filesystem's block mapper function.
1da177e4
LT
647 *
648 * This is a library function, which implements the writepages()
649 * address_space_operation.
1da177e4
LT
650 */
651int
652mpage_writepages(struct address_space *mapping,
653 struct writeback_control *wbc, get_block_t get_block)
1da177e4 654{
cf5e7a65
CH
655 struct mpage_data mpd = {
656 .get_block = get_block,
657 };
bb01e8cc 658 struct folio *folio = NULL;
2ed1a6bc 659 struct blk_plug plug;
bb01e8cc 660 int error;
0ea97180 661
2ed1a6bc 662 blk_start_plug(&plug);
bb01e8cc
CH
663 while ((folio = writeback_iter(mapping, wbc, folio, &error)))
664 error = mpage_write_folio(wbc, folio, &mpd);
77c436de 665 if (mpd.bio)
f0d6ca46 666 mpage_bio_submit_write(mpd.bio);
2ed1a6bc 667 blk_finish_plug(&plug);
bb01e8cc 668 return error;
1da177e4 669}
1da177e4 670EXPORT_SYMBOL(mpage_writepages);