fs: Convert __set_page_dirty_buffers to block_dirty_folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 9 Feb 2022 20:22:12 +0000 (20:22 +0000)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 16 Mar 2022 17:37:04 +0000 (13:37 -0400)
Convert all callers; mostly this is just changing the aops to point
at it, but a few implementations need a little more work.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Acked-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs
Tested-by: David Howells <dhowells@redhat.com> # afs
32 files changed:
block/fops.c
fs/adfs/inode.c
fs/affs/file.c
fs/bfs/file.c
fs/buffer.c
fs/ecryptfs/mmap.c
fs/exfat/inode.c
fs/ext2/inode.c
fs/ext4/inode.c
fs/fat/inode.c
fs/gfs2/aops.c
fs/gfs2/meta_io.c
fs/hfs/inode.c
fs/hfsplus/inode.c
fs/hpfs/file.c
fs/jfs/inode.c
fs/minix/inode.c
fs/mpage.c
fs/nilfs2/mdt.c
fs/ntfs/aops.c
fs/ntfs3/inode.c
fs/ocfs2/aops.c
fs/omfs/file.c
fs/reiserfs/inode.c
fs/sysv/itree.c
fs/udf/file.c
fs/udf/inode.c
fs/ufs/inode.c
include/linux/buffer_head.h
mm/filemap.c
mm/page-writeback.c
mm/rmap.c

index 8ce1dccd15b931f469817d78818ca1ebd67c26a9..796a78fd1583996fc0ee5f8de0f598c4444a549d 100644 (file)
@@ -429,7 +429,7 @@ static int blkdev_writepages(struct address_space *mapping,
 }
 
 const struct address_space_operations def_blk_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage       = blkdev_readpage,
        .readahead      = blkdev_readahead,
index 5c423254895ae51fbcc25fb9cda4c226e60eafbd..561bc748c04a0f410ac1a3539cae428e6e037aa4 100644 (file)
@@ -73,7 +73,7 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
 }
 
 static const struct address_space_operations adfs_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage       = adfs_readpage,
        .writepage      = adfs_writepage,
index 6d4921f97162f1b797ba5765b50c2de970878aa5..b3f81d84ff4cf70e2e5696bcc1f391c7db0c0f87 100644 (file)
@@ -453,7 +453,7 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
 }
 
 const struct address_space_operations affs_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage = affs_readpage,
        .writepage = affs_writepage,
@@ -835,7 +835,7 @@ err_bh:
 }
 
 const struct address_space_operations affs_aops_ofs = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage = affs_readpage_ofs,
        //.writepage = affs_writepage_ofs,
index 2e42b82edb58953322ab2f804c87debcd5b3bd69..03139344568f57a7a47b33a755c3304e56cad895 100644 (file)
@@ -188,7 +188,7 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
 }
 
 const struct address_space_operations bfs_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage       = bfs_readpage,
        .writepage      = bfs_writepage,
index 5fe02e5a9807944121e05ccedd053d6d3e03a1af..28b9739b719befdf1bb2bb80894b81d3b714f95d 100644 (file)
@@ -613,17 +613,14 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
  * address_space though.
  */
-int __set_page_dirty_buffers(struct page *page)
+bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
 {
-       int newly_dirty;
-       struct address_space *mapping = page_mapping(page);
-
-       if (unlikely(!mapping))
-               return !TestSetPageDirty(page);
+       struct buffer_head *head;
+       bool newly_dirty;
 
        spin_lock(&mapping->private_lock);
-       if (page_has_buffers(page)) {
-               struct buffer_head *head = page_buffers(page);
+       head = folio_buffers(folio);
+       if (head) {
                struct buffer_head *bh = head;
 
                do {
@@ -635,21 +632,21 @@ int __set_page_dirty_buffers(struct page *page)
         * Lock out page's memcg migration to keep PageDirty
         * synchronized with per-memcg dirty page counters.
         */
-       lock_page_memcg(page);
-       newly_dirty = !TestSetPageDirty(page);
+       folio_memcg_lock(folio);
+       newly_dirty = !folio_test_set_dirty(folio);
        spin_unlock(&mapping->private_lock);
 
        if (newly_dirty)
-               __set_page_dirty(page, mapping, 1);
+               __folio_mark_dirty(folio, mapping, 1);
 
-       unlock_page_memcg(page);
+       folio_memcg_unlock(folio);
 
        if (newly_dirty)
                __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 
        return newly_dirty;
 }
-EXPORT_SYMBOL(__set_page_dirty_buffers);
+EXPORT_SYMBOL(block_dirty_folio);
 
 /*
  * Write out and wait upon a list of buffers.
@@ -1548,7 +1545,7 @@ EXPORT_SYMBOL(block_invalidate_folio);
 
 /*
  * We attach and possibly dirty the buffers atomically wrt
- * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
+ * block_dirty_folio() via private_lock.  try_to_free_buffers
  * is already excluded via the page lock.
  */
 void create_empty_buffers(struct page *page,
@@ -1723,12 +1720,12 @@ int __block_write_full_page(struct inode *inode, struct page *page,
                                        (1 << BH_Dirty)|(1 << BH_Uptodate));
 
        /*
-        * Be very careful.  We have no exclusion from __set_page_dirty_buffers
+        * Be very careful.  We have no exclusion from block_dirty_folio
         * here, and the (potentially unmapped) buffers may become dirty at
         * any time.  If a buffer becomes dirty here after we've inspected it
         * then we just miss that fact, and the page stays dirty.
         *
-        * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
+        * Buffers outside i_size may be dirtied by block_dirty_folio;
         * handle that here by just cleaning them.
         */
 
@@ -3182,7 +3179,7 @@ EXPORT_SYMBOL(sync_dirty_buffer);
  *
  * The same applies to regular filesystem pages: if all the buffers are
  * clean then we set the page clean and proceed.  To do that, we require
- * total exclusion from __set_page_dirty_buffers().  That is obtained with
+ * total exclusion from block_dirty_folio().  That is obtained with
  * private_lock.
  *
  * try_to_free_buffers() is non-blocking.
@@ -3249,7 +3246,7 @@ int try_to_free_buffers(struct page *page)
         * the page also.
         *
         * private_lock must be held over this entire operation in order
-        * to synchronise against __set_page_dirty_buffers and prevent the
+        * to synchronise against block_dirty_folio and prevent the
         * dirty bit from being lost.
         */
        if (ret)
index bf7f35b375b7969b5463676e43a814d6e7be5953..9aabcb2f52e9c721f2a6565a3d5126293b8733fa 100644 (file)
@@ -545,7 +545,7 @@ const struct address_space_operations ecryptfs_aops = {
         * feedback.
         */
 #ifdef CONFIG_BLOCK
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
 #endif
        .writepage = ecryptfs_writepage,
index 5ed471eb973b48b609b48c2be47c7c10305a77de..fc0ea16848803d5df36d0039a8cac18be1919226 100644 (file)
@@ -490,7 +490,7 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from)
 }
 
 static const struct address_space_operations exfat_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage       = exfat_readpage,
        .readahead      = exfat_readahead,
index 9b579ee56eafcc0e8d2a85108423d60e8c0f1caa..d9452a0511988251341883544ee18603bb034d04 100644 (file)
@@ -967,8 +967,8 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc
 }
 
 const struct address_space_operations ext2_aops = {
-       .set_page_dirty         = __set_page_dirty_buffers,
-       .invalidate_folio = block_invalidate_folio,
+       .dirty_folio            = block_dirty_folio,
+       .invalidate_folio       = block_invalidate_folio,
        .readpage               = ext2_readpage,
        .readahead              = ext2_readahead,
        .writepage              = ext2_writepage,
@@ -983,8 +983,8 @@ const struct address_space_operations ext2_aops = {
 };
 
 const struct address_space_operations ext2_nobh_aops = {
-       .set_page_dirty         = __set_page_dirty_buffers,
-       .invalidate_folio = block_invalidate_folio,
+       .dirty_folio            = block_dirty_folio,
+       .invalidate_folio       = block_invalidate_folio,
        .readpage               = ext2_readpage,
        .readahead              = ext2_readahead,
        .writepage              = ext2_nobh_writepage,
index c48dbbf0e9b2ad957bfa5014fe306d65fafde2b0..4c34104a94f00b47615c10a9ada595edbbc0fa14 100644 (file)
@@ -3560,11 +3560,11 @@ static bool ext4_journalled_dirty_folio(struct address_space *mapping,
        return filemap_dirty_folio(mapping, folio);
 }
 
-static int ext4_set_page_dirty(struct page *page)
+static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio)
 {
-       WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
-       WARN_ON_ONCE(!page_has_buffers(page));
-       return __set_page_dirty_buffers(page);
+       WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio));
+       WARN_ON_ONCE(!folio_buffers(folio));
+       return block_dirty_folio(mapping, folio);
 }
 
 static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
@@ -3581,7 +3581,7 @@ static const struct address_space_operations ext4_aops = {
        .writepages             = ext4_writepages,
        .write_begin            = ext4_write_begin,
        .write_end              = ext4_write_end,
-       .set_page_dirty         = ext4_set_page_dirty,
+       .dirty_folio            = ext4_dirty_folio,
        .bmap                   = ext4_bmap,
        .invalidate_folio       = ext4_invalidate_folio,
        .releasepage            = ext4_releasepage,
@@ -3616,7 +3616,7 @@ static const struct address_space_operations ext4_da_aops = {
        .writepages             = ext4_writepages,
        .write_begin            = ext4_da_write_begin,
        .write_end              = ext4_da_write_end,
-       .set_page_dirty         = ext4_set_page_dirty,
+       .dirty_folio            = ext4_dirty_folio,
        .bmap                   = ext4_bmap,
        .invalidate_folio       = ext4_invalidate_folio,
        .releasepage            = ext4_releasepage,
index 1e2f1e24a073cfd9c285e511ba2ce3524fa8ddf5..86957dd07bda08c571d6b584ca4721a1479d0978 100644 (file)
@@ -342,7 +342,7 @@ int fat_block_truncate_page(struct inode *inode, loff_t from)
 }
 
 static const struct address_space_operations fat_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage       = fat_readpage,
        .readahead      = fat_readahead,
index 7c096a75d70339d51cadf609262133e3399a3f72..72c9f31ce72446c81683cbcfa7d8b0f4aa502604 100644 (file)
@@ -606,18 +606,12 @@ out:
        gfs2_trans_end(sdp);
 }
 
-/**
- * jdata_set_page_dirty - Page dirtying function
- * @page: The page to dirty
- *
- * Returns: 1 if it dirtyed the page, or 0 otherwise
- */
-static int jdata_set_page_dirty(struct page *page)
+static bool jdata_dirty_folio(struct address_space *mapping,
+               struct folio *folio)
 {
        if (current->journal_info)
-               SetPageChecked(page);
-       return __set_page_dirty_buffers(page);
+               folio_set_checked(folio);
+       return block_dirty_folio(mapping, folio);
 }
 
 /**
@@ -795,7 +789,7 @@ static const struct address_space_operations gfs2_jdata_aops = {
        .writepages = gfs2_jdata_writepages,
        .readpage = gfs2_readpage,
        .readahead = gfs2_readahead,
-       .set_page_dirty = jdata_set_page_dirty,
+       .dirty_folio = jdata_dirty_folio,
        .bmap = gfs2_bmap,
        .invalidate_folio = gfs2_invalidate_folio,
        .releasepage = gfs2_releasepage,
index d23c8b0354473b99515c41c9f8b879107c1b9af4..ac4d27ccd87dae1f1c53d032fb844c4a3cd4f487 100644 (file)
@@ -89,14 +89,14 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
 }
 
 const struct address_space_operations gfs2_meta_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .writepage = gfs2_aspace_writepage,
        .releasepage = gfs2_releasepage,
 };
 
 const struct address_space_operations gfs2_rgrp_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .writepage = gfs2_aspace_writepage,
        .releasepage = gfs2_releasepage,
index 029d1869a22481bba9efde709a199995e1e0a9d6..55f45e9b4930e3cfd1968b6a1fd6d83018cb3ec6 100644 (file)
@@ -159,7 +159,7 @@ static int hfs_writepages(struct address_space *mapping,
 }
 
 const struct address_space_operations hfs_btree_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage       = hfs_readpage,
        .writepage      = hfs_writepage,
@@ -170,7 +170,7 @@ const struct address_space_operations hfs_btree_aops = {
 };
 
 const struct address_space_operations hfs_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage       = hfs_readpage,
        .writepage      = hfs_writepage,
index a91b9b5e92a85d8ba59684528e86c0e2335903ec..446a816aa8e1e2e20a497441b5cf6b7b63e7e8e6 100644 (file)
@@ -156,7 +156,7 @@ static int hfsplus_writepages(struct address_space *mapping,
 }
 
 const struct address_space_operations hfsplus_btree_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage       = hfsplus_readpage,
        .writepage      = hfsplus_writepage,
@@ -167,7 +167,7 @@ const struct address_space_operations hfsplus_btree_aops = {
 };
 
 const struct address_space_operations hfsplus_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage       = hfsplus_readpage,
        .writepage      = hfsplus_writepage,
index cf68f5e76dddac26a34f167d4f0bd1d5d0b599a4..99493a23c5d0c2ecfb48697d756a28fb2177fea9 100644 (file)
@@ -245,7 +245,7 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 }
 
 const struct address_space_operations hpfs_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage = hpfs_readpage,
        .writepage = hpfs_writepage,
index 3950b3d610a0376f42415af3dc73c229a684f892..27be2e8ba237ec648b66024f83f77f629e0a5717 100644 (file)
@@ -357,7 +357,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 }
 
 const struct address_space_operations jfs_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage       = jfs_readpage,
        .readahead      = jfs_readahead,
index 2295804d1893b3157589d930890a9107648afd87..1e41fba68dcffd2418de7414101dee7688ba6143 100644 (file)
@@ -442,7 +442,7 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
 }
 
 static const struct address_space_operations minix_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage = minix_readpage,
        .writepage = minix_writepage,
index 87f5cfef6caa71c7333cca396563605700882f23..571862da9f561b1a4a74223bfcccfd6a3296c6dc 100644 (file)
@@ -504,7 +504,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
                        if (!buffer_mapped(bh)) {
                                /*
                                 * unmapped dirty buffers are created by
-                                * __set_page_dirty_buffers -> mmapped data
+                                * block_dirty_folio -> mmapped data
                                 */
                                if (buffer_dirty(bh))
                                        goto confused;
index 72adca629bc9ca3f821f15985eb10cc2915c16f2..78db33decd72fb04db41f4f515c6b87a54d1158a 100644 (file)
@@ -434,8 +434,8 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
 
 
 static const struct address_space_operations def_mdt_aops = {
-       .set_page_dirty         = __set_page_dirty_buffers,
-       .invalidate_folio = block_invalidate_folio,
+       .dirty_folio            = block_dirty_folio,
+       .invalidate_folio       = block_invalidate_folio,
        .writepage              = nilfs_mdt_write_page,
 };
 
index dd71f6ac0272158aa8290ed1bbf89cca9530dbb7..d154dcfe06afdd5f4dcd4c449ab132722ba252ec 100644 (file)
@@ -593,12 +593,12 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
        iblock = initialized_size >> blocksize_bits;
 
        /*
-        * Be very careful.  We have no exclusion from __set_page_dirty_buffers
+        * Be very careful.  We have no exclusion from block_dirty_folio
         * here, and the (potentially unmapped) buffers may become dirty at
         * any time.  If a buffer becomes dirty here after we've inspected it
         * then we just miss that fact, and the page stays dirty.
         *
-        * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
+        * Buffers outside i_size may be dirtied by block_dirty_folio;
         * handle that here by just cleaning them.
         */
 
@@ -653,7 +653,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
                                // Update initialized size in the attribute and
                                // in the inode.
                                // Again, for each page do:
-                               //      __set_page_dirty_buffers();
+                               //      block_dirty_folio();
                                // put_page()
                                // We don't need to wait on the writes.
                                // Update iblock.
@@ -1654,7 +1654,7 @@ const struct address_space_operations ntfs_normal_aops = {
        .readpage       = ntfs_readpage,
 #ifdef NTFS_RW
        .writepage      = ntfs_writepage,
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
 #endif /* NTFS_RW */
        .bmap           = ntfs_bmap,
        .migratepage    = buffer_migrate_page,
@@ -1669,7 +1669,7 @@ const struct address_space_operations ntfs_compressed_aops = {
        .readpage       = ntfs_readpage,
 #ifdef NTFS_RW
        .writepage      = ntfs_writepage,
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
 #endif /* NTFS_RW */
        .migratepage    = buffer_migrate_page,
        .is_partially_uptodate = block_is_partially_uptodate,
@@ -1746,7 +1746,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
                set_buffer_dirty(bh);
        } while ((bh = bh->b_this_page) != head);
        spin_unlock(&mapping->private_lock);
-       __set_page_dirty_nobuffers(page);
+       block_dirty_folio(mapping, page_folio(page));
        if (unlikely(buffers_to_free)) {
                do {
                        bh = buffers_to_free->b_this_page;
index a87ab3ad3cd386397e9c210c691824fe83b647a9..9eab11e3b03415528f9b7beedcea58a747c43553 100644 (file)
@@ -1950,7 +1950,7 @@ const struct address_space_operations ntfs_aops = {
        .write_end      = ntfs_write_end,
        .direct_IO      = ntfs_direct_IO,
        .bmap           = ntfs_bmap,
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
 };
 
 const struct address_space_operations ntfs_aops_cmpr = {
index b274061e22a7b83e974429e01aa198d0a1d75797..fc890ca2e17ee57ba9716d2b4126f1780570dbc9 100644 (file)
@@ -2453,7 +2453,7 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 }
 
 const struct address_space_operations ocfs2_aops = {
-       .set_page_dirty         = __set_page_dirty_buffers,
+       .dirty_folio            = block_dirty_folio,
        .readpage               = ocfs2_readpage,
        .readahead              = ocfs2_readahead,
        .writepage              = ocfs2_writepage,
index 139d6a21dca155bf97ee974b80dbb22d98c77e91..3f297b541713282fa157199f89fd77719f0b80e5 100644 (file)
@@ -372,7 +372,7 @@ const struct inode_operations omfs_file_inops = {
 };
 
 const struct address_space_operations omfs_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage = omfs_readpage,
        .readahead = omfs_readahead,
index f7fa70b419d27adf44e40487c9b2c04ada3b95cd..e4221fa85ea215e71b11d1afb818ee755a6cb96f 100644 (file)
@@ -3201,14 +3201,14 @@ out:
        return;
 }
 
-static int reiserfs_set_page_dirty(struct page *page)
+static bool reiserfs_dirty_folio(struct address_space *mapping,
+               struct folio *folio)
 {
-       struct inode *inode = page->mapping->host;
-       if (reiserfs_file_data_log(inode)) {
-               SetPageChecked(page);
-               return __set_page_dirty_nobuffers(page);
+       if (reiserfs_file_data_log(mapping->host)) {
+               folio_set_checked(folio);
+               return filemap_dirty_folio(mapping, folio);
        }
-       return __set_page_dirty_buffers(page);
+       return block_dirty_folio(mapping, folio);
 }
 
 /*
@@ -3435,5 +3435,5 @@ const struct address_space_operations reiserfs_address_space_operations = {
        .write_end = reiserfs_write_end,
        .bmap = reiserfs_aop_bmap,
        .direct_IO = reiserfs_direct_IO,
-       .set_page_dirty = reiserfs_set_page_dirty,
+       .dirty_folio = reiserfs_dirty_folio,
 };
index d39984a1d4d3ce3ca5db1775fdfd4cd70d5e3773..409ab5e178031cf9229f704da8b5350871a00a29 100644 (file)
@@ -495,7 +495,7 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
 }
 
 const struct address_space_operations sysv_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage = sysv_readpage,
        .writepage = sysv_writepage,
index a91011a7bb88cd1be7d829085ffce81365d45fdb..0f6bf2504437ba2ee5cb56d41888c3ba508365b5 100644 (file)
@@ -125,7 +125,7 @@ static int udf_adinicb_write_end(struct file *file, struct address_space *mappin
 }
 
 const struct address_space_operations udf_adinicb_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage       = udf_adinicb_readpage,
        .writepage      = udf_adinicb_writepage,
index ab98c7aaf9f9415a97c56d6ed080f7d18f8c86ea..ca4fa710e562f22f5db95dbba0c3214fe7c2b5ef 100644 (file)
@@ -235,7 +235,7 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block)
 }
 
 const struct address_space_operations udf_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio    = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage       = udf_readpage,
        .readahead      = udf_readahead,
index 2d005788c24d553a3de0e07283f466805c192328..d0dda01620f0d7ca5675cde7139eccbe9cfd8da1 100644 (file)
@@ -526,7 +526,7 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
 }
 
 const struct address_space_operations ufs_aops = {
-       .set_page_dirty = __set_page_dirty_buffers,
+       .dirty_folio = block_dirty_folio,
        .invalidate_folio = block_invalidate_folio,
        .readpage = ufs_readpage,
        .writepage = ufs_writepage,
index 9ee9d003d73625d4e7ebb08c13060a4c5752aef9..bcb4fe9b8575c21cd3f91a2e79c2abf8e203fb38 100644 (file)
@@ -397,7 +397,7 @@ __bread(struct block_device *bdev, sector_t block, unsigned size)
        return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
 }
 
-extern int __set_page_dirty_buffers(struct page *page);
+bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
 
 #else /* CONFIG_BLOCK */
 
index 9639b844dd31997298e1673c4500bbb3051a7a58..bb4e91bf5492eaeeb0fbbb3195050078faa771e5 100644 (file)
@@ -72,7 +72,7 @@
  * Lock ordering:
  *
  *  ->i_mmap_rwsem             (truncate_pagecache)
- *    ->private_lock           (__free_pte->__set_page_dirty_buffers)
+ *    ->private_lock           (__free_pte->block_dirty_folio)
  *      ->swap_lock            (exclusive_swap_page, others)
  *        ->i_pages lock
  *
  *    ->memcg->move_lock       (page_remove_rmap->lock_page_memcg)
  *    bdi.wb->list_lock                (zap_pte_range->set_page_dirty)
  *    ->inode->i_lock          (zap_pte_range->set_page_dirty)
- *    ->private_lock           (zap_pte_range->__set_page_dirty_buffers)
+ *    ->private_lock           (zap_pte_range->block_dirty_folio)
  *
  * ->i_mmap_rwsem
  *   ->tasklist_lock            (memory_failure, collect_procs_ao)
index 27a87ae4502c777123e87cd1a9c9103e918f7a62..e890db239fae0b1908299d8732fc96c083639585 100644 (file)
@@ -2530,7 +2530,7 @@ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
  * This is also sometimes used by filesystems which use buffer_heads when
  * a single buffer is being dirtied: we want to set the folio dirty in
  * that case, but not all the buffers.  This is a "bottom-up" dirtying,
- * whereas __set_page_dirty_buffers() is a "top-down" dirtying.
+ * whereas block_dirty_folio() is a "top-down" dirtying.
  *
  * The caller must ensure this doesn't race with truncation.  Most will
  * simply hold the folio lock, but e.g. zap_pte_range() calls with the
index 6a1e8c7f62136110da1aaadf398524424d41ed2b..4f3391fa4ca90fa9cdf4d7b0bf1c1bfa94390b1e 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -31,8 +31,8 @@
  *               mm->page_table_lock or pte_lock
  *                 swap_lock (in swap_duplicate, swap_info_get)
  *                   mmlist_lock (in mmput, drain_mmlist and others)
- *                   mapping->private_lock (in __set_page_dirty_buffers)
- *                     lock_page_memcg move_lock (in __set_page_dirty_buffers)
+ *                   mapping->private_lock (in block_dirty_folio)
+ *                     folio_lock_memcg move_lock (in block_dirty_folio)
  *                       i_pages lock (widely used)
  *                         lruvec->lru_lock (in folio_lruvec_lock_irq)
  *                   inode->i_lock (in set_page_dirty's __mark_inode_dirty)