mm/vmscan: convert reclaim_clean_pages_from_list() to folios
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 17 Jun 2022 15:42:44 +0000 (16:42 +0100)
committerakpm <akpm@linux-foundation.org>
Mon, 4 Jul 2022 01:08:44 +0000 (18:08 -0700)
Patch series "nvert much of vmscan to folios"

vmscan always operates on folios since it puts the pages on the LRU list.
Switching all of these functions from pages to folios saves 1483 bytes of
text from removing all the baggage around calling compound_page() and
similar functions.

This patch (of 5):

This is a straightforward conversion which removes several hidden calls
to compound_head, saving 330 bytes of kernel text.

Link: https://lkml.kernel.org/r/20220617154248.700416-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20220617154248.700416-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/page-flags.h
mm/vmscan.c

index e66f7aa3191df471e7b1759cb91fc19001e8a380..f32aade2a6e0af816e838f820596f19918ecdcbb 100644 (file)
@@ -670,6 +670,12 @@ static __always_inline bool PageAnon(struct page *page)
        return folio_test_anon(page_folio(page));
 }
 
+static __always_inline bool __folio_test_movable(const struct folio *folio)
+{
+       return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
+                       PAGE_MAPPING_MOVABLE;
+}
+
 static __always_inline int __PageMovable(struct page *page)
 {
        return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
index 97ac6c6c026dcd9d28650dcad424719cd83a2f5d..2ecca45672e20349cb73da2bfd491f7e81bcbd7e 100644 (file)
@@ -2041,7 +2041,7 @@ keep:
 }
 
 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
-                                           struct list_head *page_list)
+                                           struct list_head *folio_list)
 {
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
@@ -2049,16 +2049,16 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
        };
        struct reclaim_stat stat;
        unsigned int nr_reclaimed;
-       struct page *page, *next;
-       LIST_HEAD(clean_pages);
+       struct folio *folio, *next;
+       LIST_HEAD(clean_folios);
        unsigned int noreclaim_flag;
 
-       list_for_each_entry_safe(page, next, page_list, lru) {
-               if (!PageHuge(page) && page_is_file_lru(page) &&
-                   !PageDirty(page) && !__PageMovable(page) &&
-                   !PageUnevictable(page)) {
-                       ClearPageActive(page);
-                       list_move(&page->lru, &clean_pages);
+       list_for_each_entry_safe(folio, next, folio_list, lru) {
+               if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) &&
+                   !folio_test_dirty(folio) && !__folio_test_movable(folio) &&
+                   !folio_test_unevictable(folio)) {
+                       folio_clear_active(folio);
+                       list_move(&folio->lru, &clean_folios);
                }
        }
 
@@ -2069,11 +2069,11 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
         * change in the future.
         */
        noreclaim_flag = memalloc_noreclaim_save();
-       nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
+       nr_reclaimed = shrink_page_list(&clean_folios, zone->zone_pgdat, &sc,
                                        &stat, true);
        memalloc_noreclaim_restore(noreclaim_flag);
 
-       list_splice(&clean_pages, page_list);
+       list_splice(&clean_folios, folio_list);
        mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
                            -(long)nr_reclaimed);
        /*