memcg: convert mem_cgroup_swap_full() to take a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 2 Sep 2022 19:46:43 +0000 (20:46 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 3 Oct 2022 21:02:53 +0000 (14:02 -0700)
All callers now have a folio, so convert the function to take a folio.
Saves a couple of calls to compound_head().

Link: https://lkml.kernel.org/r/20220902194653.1739778-48-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/swap.h
mm/memcontrol.c
mm/memory.c
mm/swapfile.c
mm/vmscan.c

index 42cbef554de68e19b1e68484b39a7e218f1cd36b..d8bd6401c3e7dad86bc7b46e06d6fe2353314ae3 100644 (file)
@@ -692,7 +692,7 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p
 }
 
 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
-extern bool mem_cgroup_swap_full(struct page *page);
+extern bool mem_cgroup_swap_full(struct folio *folio);
 #else
 static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
 {
@@ -714,7 +714,7 @@ static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
        return get_nr_swap_pages();
 }
 
-static inline bool mem_cgroup_swap_full(struct page *page)
+static inline bool mem_cgroup_swap_full(struct folio *folio)
 {
        return vm_swap_full();
 }
index 9863fb5889729bd4e1e8da36758d0a212a6b2171..632402001bca16ba8ca93110faeceec150c69b6a 100644 (file)
@@ -7406,18 +7406,18 @@ long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
        return nr_swap_pages;
 }
 
-bool mem_cgroup_swap_full(struct page *page)
+bool mem_cgroup_swap_full(struct folio *folio)
 {
        struct mem_cgroup *memcg;
 
-       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 
        if (vm_swap_full())
                return true;
        if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
                return false;
 
-       memcg = page_memcg(page);
+       memcg = folio_memcg(folio);
        if (!memcg)
                return false;
 
index b8e4dae18ac15fb7207ad5dc94e0ee71f9de83e6..2f1a6da7f1e6582c9968945293be16d2620205b7 100644 (file)
@@ -3647,7 +3647,7 @@ static inline bool should_try_to_free_swap(struct folio *folio,
 {
        if (!folio_test_swapcache(folio))
                return false;
-       if (mem_cgroup_swap_full(&folio->page) || (vma->vm_flags & VM_LOCKED) ||
+       if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
            folio_test_mlocked(folio))
                return true;
        /*
index 3820b5ab64d94b489ac24cf379b4de8c877b5a0a..4efcfe34e45b95cf47ceb9331e5772548103e360 100644 (file)
@@ -148,7 +148,7 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
        if (folio_trylock(folio)) {
                if ((flags & TTRS_ANYWAY) ||
                    ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
-                   ((flags & TTRS_FULL) && mem_cgroup_swap_full(&folio->page)))
+                   ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)))
                        ret = folio_free_swap(folio);
                folio_unlock(folio);
        }
index 1707e3bfcfe42c45ec81ebbf07095411fb6a9dd5..c5a4bff11da695533f1e50ba561b5e5d472121d7 100644 (file)
@@ -2047,8 +2047,7 @@ activate_locked_split:
 activate_locked:
                /* Not a candidate for swapping, so reclaim swap space. */
                if (folio_test_swapcache(folio) &&
-                   (mem_cgroup_swap_full(&folio->page) ||
-                    folio_test_mlocked(folio)))
+                   (mem_cgroup_swap_full(folio) || folio_test_mlocked(folio)))
                        folio_free_swap(folio);
                VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
                if (!folio_test_mlocked(folio)) {