fs: convert block_write_full_page to block_write_full_folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 15 Dec 2023 20:02:44 +0000 (20:02 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 29 Dec 2023 19:58:35 +0000 (11:58 -0800)
Convert the function to be compatible with writepage_t so that it can be
passed to write_cache_pages() by blkdev.  This removes a call to
compound_head().  We can also remove the function export as both callers
are built-in.

Link: https://lkml.kernel.org/r/20231215200245.748418-14-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
block/fops.c
fs/buffer.c
fs/ext4/page-io.c
fs/gfs2/aops.c
fs/mpage.c
fs/ntfs/aops.c
fs/ocfs2/alloc.c
fs/ocfs2/file.c
include/linux/buffer_head.h

index 0bdad1e8d51457c2fccfc2e99999a6aff3e4bc87..0cf8cf72cdfa108926ae8fc7f53bce05a3225058 100644 (file)
@@ -410,9 +410,24 @@ static int blkdev_get_block(struct inode *inode, sector_t iblock,
        return 0;
 }
 
-static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
+/*
+ * We cannot call mpage_writepages() as it does not take the buffer lock.
+ * We must use block_write_full_folio() directly which holds the buffer
+ * lock.  The buffer lock provides the synchronisation with writeback
+ * that filesystems rely on when they use the blockdev's mapping.
+ */
+static int blkdev_writepages(struct address_space *mapping,
+               struct writeback_control *wbc)
 {
-       return block_write_full_page(page, blkdev_get_block, wbc);
+       struct blk_plug plug;
+       int err;
+
+       blk_start_plug(&plug);
+       err = write_cache_pages(mapping, wbc, block_write_full_folio,
+                       blkdev_get_block);
+       blk_finish_plug(&plug);
+
+       return err;
 }
 
 static int blkdev_read_folio(struct file *file, struct folio *folio)
@@ -449,7 +464,7 @@ const struct address_space_operations def_blk_aops = {
        .invalidate_folio = block_invalidate_folio,
        .read_folio     = blkdev_read_folio,
        .readahead      = blkdev_readahead,
-       .writepage      = blkdev_writepage,
+       .writepages     = blkdev_writepages,
        .write_begin    = blkdev_write_begin,
        .write_end      = blkdev_write_end,
        .migrate_folio  = buffer_migrate_folio_norefs,
index 3a8c8322ed2862fee818a0d84c63b77ac57a341e..c838b4a31009210c37672be0fb20c6062d5d86af 100644 (file)
@@ -372,7 +372,7 @@ static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
 }
 
 /*
- * Completion handler for block_write_full_page() - pages which are unlocked
+ * Completion handler for block_write_full_folio() - pages which are unlocked
  * during I/O, and which have PageWriteback cleared upon I/O completion.
  */
 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
@@ -1771,18 +1771,18 @@ static struct buffer_head *folio_create_buffers(struct folio *folio,
  */
 
 /*
- * While block_write_full_page is writing back the dirty buffers under
+ * While block_write_full_folio is writing back the dirty buffers under
  * the page lock, whoever dirtied the buffers may decide to clean them
  * again at any time.  We handle that by only looking at the buffer
  * state inside lock_buffer().
  *
- * If block_write_full_page() is called for regular writeback
+ * If block_write_full_folio() is called for regular writeback
  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
  * locked buffer.   This only can happen if someone has written the buffer
  * directly, with submit_bh().  At the address_space level PageWriteback
  * prevents this contention from occurring.
  *
- * If block_write_full_page() is called with wbc->sync_mode ==
+ * If block_write_full_folio() is called with wbc->sync_mode ==
  * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
  * causes the writes to be flagged as synchronous writes.
  */
@@ -1829,7 +1829,7 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
                         * truncate in progress.
                         */
                        /*
-                        * The buffer was zeroed by block_write_full_page()
+                        * The buffer was zeroed by block_write_full_folio()
                         */
                        clear_buffer_dirty(bh);
                        set_buffer_uptodate(bh);
@@ -2696,10 +2696,9 @@ EXPORT_SYMBOL(block_truncate_page);
 /*
  * The generic ->writepage function for buffer-backed address_spaces
  */
-int block_write_full_page(struct page *page, get_block_t *get_block,
-                       struct writeback_control *wbc)
+int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
+               void *get_block)
 {
-       struct folio *folio = page_folio(page);
        struct inode * const inode = folio->mapping->host;
        loff_t i_size = i_size_read(inode);
 
@@ -2726,7 +2725,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
        return __block_write_full_folio(inode, folio, get_block, wbc,
                        end_buffer_async_write);
 }
-EXPORT_SYMBOL(block_write_full_page);
 
 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
                            get_block_t *get_block)
index dfdd7e5cf0389088a70ed549ce744496dcd83830..312bc68133574db9343098453827c1c132f83424 100644 (file)
@@ -444,7 +444,7 @@ int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
        folio_clear_error(folio);
 
        /*
-        * Comments copied from block_write_full_page:
+        * Comments copied from block_write_full_folio:
         *
         * The folio straddles i_size.  It must be zeroed out on each and every
         * writepage invocation because it may be mmapped.  "A file is mapped
index 5cffb079b87cbf0c2c77a5fa69406969ed56fd63..f986cd032b7699b7620902eb557e6daa4bf3cd06 100644 (file)
@@ -82,11 +82,11 @@ static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
 }
 
 /**
- * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_page
+ * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio
  * @folio: The folio to write
  * @wbc: The writeback control
  *
- * This is the same as calling block_write_full_page, but it also
+ * This is the same as calling block_write_full_folio, but it also
  * writes pages outside of i_size
  */
 static int gfs2_write_jdata_folio(struct folio *folio,
index d4963f3d8051c77915f7490f0c20d299bae3d5ec..738882e0766d083743698916df5f77ce1e68dea7 100644 (file)
@@ -642,7 +642,7 @@ confused:
        /*
         * The caller has a ref on the inode, so *mapping is stable
         */
-       ret = block_write_full_page(&folio->page, mpd->get_block, wbc);
+       ret = block_write_full_folio(folio, wbc, mpd->get_block);
        mapping_set_error(mapping, ret);
 out:
        mpd->bio = bio;
index 70479ce915e8f166f46115b33990f37012a9907c..6c414957e2c24a62cb62a3d44d93073b9f3ca2ec 100644 (file)
@@ -1304,7 +1304,7 @@ done:
  * page cleaned.  The VM has already locked the page and marked it clean.
  *
  * For non-resident attributes, ntfs_writepage() writes the @page by calling
- * the ntfs version of the generic block_write_full_page() function,
+ * the ntfs version of the generic block_write_full_folio() function,
  * ntfs_write_block(), which in turn if necessary creates and writes the
  * buffers associated with the page asynchronously.
  *
@@ -1314,7 +1314,7 @@ done:
  * vfs inode dirty code path for the inode the mft record belongs to or via the
  * vm page dirty code path for the page the mft record is in.
  *
- * Based on ntfs_read_folio() and fs/buffer.c::block_write_full_page().
+ * Based on ntfs_read_folio() and fs/buffer.c::block_write_full_folio().
  *
  * Return 0 on success and -errno on error.
  */
index 91b32b2377acc9336cbe03a5c7be2d4cfe4e9cda..ea9127ba3208447eb4e08f2abf069cf72b9ea72a 100644 (file)
@@ -6934,7 +6934,7 @@ static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
  * nonzero data on subsequent file extends.
  *
  * We need to call this before i_size is updated on the inode because
- * otherwise block_write_full_page() will skip writeout of pages past
+ * otherwise block_write_full_folio() will skip writeout of pages past
  * i_size.
  */
 int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
index 94e2a12444420e8678cf24e16fc91d45f0787668..8b6d15010703b30a8b1503e52aefbee35166a319 100644 (file)
@@ -818,7 +818,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
        /*
         * fs-writeback will release the dirty pages without page lock
         * whose offset are over inode size, the release happens at
-        * block_write_full_page().
+        * block_write_full_folio().
         */
        i_size_write(inode, abs_to);
        inode->i_blocks = ocfs2_inode_sector_count(inode);
index 94f6161eb45eba42bf96ecabc638740c20421af5..396b2adf24bf1a20533f6217bea946f82603af7b 100644 (file)
@@ -252,8 +252,8 @@ void __bh_read_batch(int nr, struct buffer_head *bhs[],
  * address_spaces.
  */
 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
-int block_write_full_page(struct page *page, get_block_t *get_block,
-                               struct writeback_control *wbc);
+int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
+               void *get_block);
 int __block_write_full_folio(struct inode *inode, struct folio *folio,
                        get_block_t *get_block, struct writeback_control *wbc,
                        bh_end_io_t *handler);