mm: make __end_folio_writeback() return void
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 4 Oct 2023 16:53:16 +0000 (17:53 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 18 Oct 2023 21:34:17 +0000 (14:34 -0700)
Rather than check the result of test-and-clear, just check that we have
the writeback bit set at the start.  This wouldn't catch every case, but
it's good enough (and enables the next patch).

Link: https://lkml.kernel.org/r/20231004165317.1061855-17-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Henderson <richard.henderson@linaro.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/filemap.c
mm/internal.h
mm/page-writeback.c

index e9c636f57777248779fe1ed2aedb1e1ecfc90e04..523d9e15b3b0871ea912c53ff809f10a83d1abaf 100644 (file)
@@ -1593,9 +1593,15 @@ EXPORT_SYMBOL(folio_wait_private_2_killable);
 /**
  * folio_end_writeback - End writeback against a folio.
  * @folio: The folio.
+ *
+ * The folio must actually be under writeback.
+ *
+ * Context: May be called from process or interrupt context.
  */
 void folio_end_writeback(struct folio *folio)
 {
+       VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio);
+
        /*
         * folio_test_clear_reclaim() could be used here but it is an
         * atomic operation and overkill in this particular case. Failing
@@ -1615,8 +1621,7 @@ void folio_end_writeback(struct folio *folio)
         * reused before the folio_wake().
         */
        folio_get(folio);
-       if (!__folio_end_writeback(folio))
-               BUG();
+       __folio_end_writeback(folio);
 
        smp_mb__after_atomic();
        folio_wake(folio, PG_writeback);
index 2b79da9deb64acfa80f6dd87cb1cab47ca368e33..ac4bbe2e2ca6a14542ff02c9a1caa4fcd3267827 100644 (file)
@@ -105,7 +105,7 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat)
 
 vm_fault_t do_swap_page(struct vm_fault *vmf);
 void folio_rotate_reclaimable(struct folio *folio);
-bool __folio_end_writeback(struct folio *folio);
+void __folio_end_writeback(struct folio *folio);
 void deactivate_file_folio(struct folio *folio);
 void folio_activate(struct folio *folio);
 
index 001adbb4a180dc6ce97b545eac08a3cb2c56a649..a37e4c33d1ab739c980a423ec9a5d82649456f17 100644 (file)
@@ -2940,11 +2940,10 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
        spin_unlock_irqrestore(&wb->work_lock, flags);
 }
 
-bool __folio_end_writeback(struct folio *folio)
+void __folio_end_writeback(struct folio *folio)
 {
        long nr = folio_nr_pages(folio);
        struct address_space *mapping = folio_mapping(folio);
-       bool ret;
 
        folio_memcg_lock(folio);
        if (mapping && mapping_use_writeback_tags(mapping)) {
@@ -2953,19 +2952,16 @@ bool __folio_end_writeback(struct folio *folio)
                unsigned long flags;
 
                xa_lock_irqsave(&mapping->i_pages, flags);
-               ret = folio_test_clear_writeback(folio);
-               if (ret) {
-                       __xa_clear_mark(&mapping->i_pages, folio_index(folio),
-                                               PAGECACHE_TAG_WRITEBACK);
-                       if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
-                               struct bdi_writeback *wb = inode_to_wb(inode);
-
-                               wb_stat_mod(wb, WB_WRITEBACK, -nr);
-                               __wb_writeout_add(wb, nr);
-                               if (!mapping_tagged(mapping,
-                                                   PAGECACHE_TAG_WRITEBACK))
-                                       wb_inode_writeback_end(wb);
-                       }
+               folio_test_clear_writeback(folio);
+               __xa_clear_mark(&mapping->i_pages, folio_index(folio),
+                                       PAGECACHE_TAG_WRITEBACK);
+               if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
+                       struct bdi_writeback *wb = inode_to_wb(inode);
+
+                       wb_stat_mod(wb, WB_WRITEBACK, -nr);
+                       __wb_writeout_add(wb, nr);
+                       if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
+                               wb_inode_writeback_end(wb);
                }
 
                if (mapping->host && !mapping_tagged(mapping,
@@ -2974,15 +2970,13 @@ bool __folio_end_writeback(struct folio *folio)
 
                xa_unlock_irqrestore(&mapping->i_pages, flags);
        } else {
-               ret = folio_test_clear_writeback(folio);
-       }
-       if (ret) {
-               lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
-               zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
-               node_stat_mod_folio(folio, NR_WRITTEN, nr);
+               folio_test_clear_writeback(folio);
        }
+
+       lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
+       zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
+       node_stat_mod_folio(folio, NR_WRITTEN, nr);
        folio_memcg_unlock(folio);
-       return ret;
 }
 
 bool __folio_start_writeback(struct folio *folio, bool keep_write)