mm/writeback: Add folio_start_writeback()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sat, 24 Apr 2021 16:00:48 +0000 (12:00 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 18 Oct 2021 11:49:39 +0000 (07:49 -0400)
Rename set_page_writeback() to folio_start_writeback() to match
folio_end_writeback().  Do not bother with wrappers that return void;
callers are perfectly capable of ignoring return values.

Add wrappers for set_page_writeback(), set_page_writeback_keepwrite() and
test_set_page_writeback() for compatibililty with existing filesystems.
The main advantage of this patch is getting the statistics right,
although it does eliminate a couple of calls to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
include/linux/page-flags.h
mm/folio-compat.c
mm/page-writeback.c

index 3aebb2060a2632aa3f8b7caf916a22459dcaacb3..a68af80649a403be1087712cda98e1b3e415dede 100644 (file)
@@ -657,21 +657,22 @@ static __always_inline void SetPageUptodate(struct page *page)
 
 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
 
-int __test_set_page_writeback(struct page *page, bool keep_write);
+bool __folio_start_writeback(struct folio *folio, bool keep_write);
+bool set_page_writeback(struct page *page);
 
-#define test_set_page_writeback(page)                  \
-       __test_set_page_writeback(page, false)
-#define test_set_page_writeback_keepwrite(page)        \
-       __test_set_page_writeback(page, true)
+#define folio_start_writeback(folio)                   \
+       __folio_start_writeback(folio, false)
+#define folio_start_writeback_keepwrite(folio) \
+       __folio_start_writeback(folio, true)
 
-static inline void set_page_writeback(struct page *page)
+static inline void set_page_writeback_keepwrite(struct page *page)
 {
-       test_set_page_writeback(page);
+       folio_start_writeback_keepwrite(page_folio(page));
 }
 
-static inline void set_page_writeback_keepwrite(struct page *page)
+static inline bool test_set_page_writeback(struct page *page)
 {
-       test_set_page_writeback_keepwrite(page);
+       return set_page_writeback(page);
 }
 
 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
index 2ccd8f213fc4dabafe7603b08f5d5d51c2a86e71..10ce5582d86987a266fb300d8000dfbc994fad4b 100644 (file)
@@ -71,3 +71,9 @@ void migrate_page_copy(struct page *newpage, struct page *page)
 }
 EXPORT_SYMBOL(migrate_page_copy);
 #endif
+
+bool set_page_writeback(struct page *page)
+{
+       return folio_start_writeback(page_folio(page));
+}
+EXPORT_SYMBOL(set_page_writeback);
index 9ff9a33bced1994f57baa2ad8e332ea17fea441b..82938b0371030fbc4d7666cb47373fe8acdb2402 100644 (file)
@@ -2811,21 +2811,23 @@ bool __folio_end_writeback(struct folio *folio)
        return ret;
 }
 
-int __test_set_page_writeback(struct page *page, bool keep_write)
+bool __folio_start_writeback(struct folio *folio, bool keep_write)
 {
-       struct address_space *mapping = page_mapping(page);
-       int ret, access_ret;
+       long nr = folio_nr_pages(folio);
+       struct address_space *mapping = folio_mapping(folio);
+       bool ret;
+       int access_ret;
 
-       lock_page_memcg(page);
+       folio_memcg_lock(folio);
        if (mapping && mapping_use_writeback_tags(mapping)) {
-               XA_STATE(xas, &mapping->i_pages, page_index(page));
+               XA_STATE(xas, &mapping->i_pages, folio_index(folio));
                struct inode *inode = mapping->host;
                struct backing_dev_info *bdi = inode_to_bdi(inode);
                unsigned long flags;
 
                xas_lock_irqsave(&xas, flags);
                xas_load(&xas);
-               ret = TestSetPageWriteback(page);
+               ret = folio_test_set_writeback(folio);
                if (!ret) {
                        bool on_wblist;
 
@@ -2836,43 +2838,42 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
                        if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
                                struct bdi_writeback *wb = inode_to_wb(inode);
 
-                               inc_wb_stat(wb, WB_WRITEBACK);
+                               wb_stat_mod(wb, WB_WRITEBACK, nr);
                                if (!on_wblist)
                                        wb_inode_writeback_start(wb);
                        }
 
                        /*
-                        * We can come through here when swapping anonymous
-                        * pages, so we don't necessarily have an inode to track
-                        * for sync.
+                        * We can come through here when swapping
+                        * anonymous folios, so we don't necessarily
+                        * have an inode to track for sync.
                         */
                        if (mapping->host && !on_wblist)
                                sb_mark_inode_writeback(mapping->host);
                }
-               if (!PageDirty(page))
+               if (!folio_test_dirty(folio))
                        xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
                if (!keep_write)
                        xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
                xas_unlock_irqrestore(&xas, flags);
        } else {
-               ret = TestSetPageWriteback(page);
+               ret = folio_test_set_writeback(folio);
        }
        if (!ret) {
-               inc_lruvec_page_state(page, NR_WRITEBACK);
-               inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
+               lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
+               zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
        }
-       unlock_page_memcg(page);
-       access_ret = arch_make_page_accessible(page);
+       folio_memcg_unlock(folio);
+       access_ret = arch_make_folio_accessible(folio);
        /*
         * If writeback has been triggered on a page that cannot be made
         * accessible, it is too late to recover here.
         */
-       VM_BUG_ON_PAGE(access_ret != 0, page);
+       VM_BUG_ON_FOLIO(access_ret != 0, folio);
 
        return ret;
-
 }
-EXPORT_SYMBOL(__test_set_page_writeback);
+EXPORT_SYMBOL(__folio_start_writeback);
 
 /**
  * folio_wait_writeback - Wait for a folio to finish writeback.