mm/swap: convert deactivate_page() to folio_deactivate()
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Wed, 21 Dec 2022 18:08:48 +0000 (10:08 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 19 Jan 2023 01:12:47 +0000 (17:12 -0800)
Deactivate_page() has already been converted to use folios, this change
converts it to take in a folio argument instead of calling page_folio().
It also renames the function folio_deactivate() to be more consistent with
other folio functions.

[akpm@linux-foundation.org: fix left-over comments, per Yu Zhao]
Link: https://lkml.kernel.org/r/20221221180848.20774-5-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/swap.h
mm/damon/paddr.c
mm/madvise.c
mm/page-writeback.c
mm/swap.c
mm/vmscan.c

index 93f1cebd85454ebe5468929249816c0042d50e90..87cecb8c0bdc6b16ed5305cd03c6eb756bdb6687 100644 (file)
@@ -401,7 +401,7 @@ extern void lru_add_drain(void);
 extern void lru_add_drain_cpu(int cpu);
 extern void lru_add_drain_cpu_zone(struct zone *zone);
 extern void lru_add_drain_all(void);
-extern void deactivate_page(struct page *page);
+void folio_deactivate(struct folio *folio);
 void folio_mark_lazyfree(struct folio *folio);
 extern void swap_setup(void);
 
index 884c8bf18b125af5e81cb8b353169ce26fce17ae..6334c99e515227a7959604ee227eeb91537cdeac 100644 (file)
@@ -297,7 +297,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
                if (mark_accessed)
                        folio_mark_accessed(folio);
                else
-                       deactivate_page(&folio->page);
+                       folio_deactivate(folio);
                folio_put(folio);
                applied += folio_nr_pages(folio);
        }
index 575ebf0363b8e67e5dc32455dff6d52f11bfbf93..e407d335e61455b8cf4985ea7e3b110b7329206f 100644 (file)
@@ -416,7 +416,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
                                        list_add(&folio->lru, &folio_list);
                        }
                } else
-                       deactivate_page(&folio->page);
+                       folio_deactivate(folio);
 huge_unlock:
                spin_unlock(ptl);
                if (pageout)
@@ -510,7 +510,7 @@ regular_folio:
                                        list_add(&folio->lru, &folio_list);
                        }
                } else
-                       deactivate_page(&folio->page);
+                       folio_deactivate(folio);
        }
 
        arch_leave_lazy_mmu_mode();
index ad608ef2a24365e7f47e514fe841d14b750c1716..41128ea9c9973a5098966582cadff4eca1ca6599 100644 (file)
@@ -2846,11 +2846,11 @@ bool folio_mark_dirty(struct folio *folio)
 
        if (likely(mapping)) {
                /*
-                * readahead/lru_deactivate_page could remain
+                * readahead/folio_deactivate could remain
                 * PG_readahead/PG_reclaim due to race with folio_end_writeback
                 * About readahead, if the folio is written, the flags would be
                 * reset. So no problem.
-                * About lru_deactivate_page, if the folio is redirtied,
+                * About folio_deactivate, if the folio is redirtied,
                 * the flag will be reset. So no problem. but if the
                 * folio is used by readahead it will confuse readahead
                 * and make it restart the size rampup process. But it's
index 5e5eba18693031f9470fab515c223829c4218c02..e54e2a252e27e41ba1c57285149ade2083a3788e 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -733,17 +733,15 @@ void deactivate_file_folio(struct folio *folio)
 }
 
 /*
- * deactivate_page - deactivate a page
- * @page: page to deactivate
+ * folio_deactivate - deactivate a folio
+ * @folio: folio to deactivate
  *
- * deactivate_page() moves @page to the inactive list if @page was on the active
- * list and was not an unevictable page.  This is done to accelerate the reclaim
- * of @page.
+ * folio_deactivate() moves @folio to the inactive list if @folio was on the
+ * active list and was not unevictable. This is done to accelerate the
+ * reclaim of @folio.
  */
-void deactivate_page(struct page *page)
+void folio_deactivate(struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
-
        if (folio_test_lru(folio) && !folio_test_unevictable(folio) &&
            (folio_test_active(folio) || lru_gen_enabled())) {
                struct folio_batch *fbatch;
index bd6637fcd8f9b19a69045e79e73d6b0c44d9f15b..aa8c252949dad478671414f7f86fe726c59945b9 100644 (file)
@@ -1920,7 +1920,7 @@ retry:
                             !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
                                /*
                                 * Immediately reclaim when written back.
-                                * Similar in principle to deactivate_page()
+                                * Similar in principle to folio_deactivate()
                                 * except we already have the folio isolated
                                 * and know it's dirty
                                 */