mm/page_alloc: export free_frozen_pages() instead of free_unref_page()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 25 Nov 2024 21:01:35 +0000 (21:01 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 14 Jan 2025 06:40:31 +0000 (22:40 -0800)
We already have the concept of "frozen pages" (eg page_ref_freeze()), so
let's not complicate things by also having the concept of "unref pages".

Link: https://lkml.kernel.org/r/20241125210149.2976098-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/internal.h
mm/page_alloc.c
mm/page_frag_cache.c
mm/swap.c

index 9826f7dce6072443261e96ceb20de4e7944c73c1..b650a7cb7b4690719120227a3185866d31541d39 100644 (file)
@@ -741,7 +741,7 @@ extern bool free_pages_prepare(struct page *page, unsigned int order);
 
 extern int user_min_free_kbytes;
 
-void free_unref_page(struct page *page, unsigned int order);
+void free_frozen_pages(struct page *page, unsigned int order);
 void free_unref_folios(struct folio_batch *fbatch);
 
 extern void zone_pcp_reset(struct zone *zone);
index a44f9ff04b1a23c32253831d497ebf50c7565bbb..03f2491d13d264978ee29c1d2be7a2011b2272aa 100644 (file)
@@ -2592,9 +2592,9 @@ static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
        return high;
 }
 
-static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
-                                  struct page *page, int migratetype,
-                                  unsigned int order)
+static void free_frozen_page_commit(struct zone *zone,
+               struct per_cpu_pages *pcp, struct page *page, int migratetype,
+               unsigned int order)
 {
        int high, batch;
        int pindex;
@@ -2643,7 +2643,7 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
 /*
  * Free a pcp page
  */
-void free_unref_page(struct page *page, unsigned int order)
+void free_frozen_pages(struct page *page, unsigned int order)
 {
        unsigned long __maybe_unused UP_flags;
        struct per_cpu_pages *pcp;
@@ -2679,7 +2679,7 @@ void free_unref_page(struct page *page, unsigned int order)
        pcp_trylock_prepare(UP_flags);
        pcp = pcp_spin_trylock(zone->per_cpu_pageset);
        if (pcp) {
-               free_unref_page_commit(zone, pcp, page, migratetype, order);
+               free_frozen_page_commit(zone, pcp, page, migratetype, order);
                pcp_spin_unlock(pcp);
        } else {
                free_one_page(zone, page, pfn, order, FPI_NONE);
@@ -2743,7 +2743,7 @@ void free_unref_folios(struct folio_batch *folios)
 
                        /*
                         * Free isolated pages directly to the
-                        * allocator, see comment in free_unref_page.
+                        * allocator, see comment in free_frozen_pages.
                         */
                        if (is_migrate_isolate(migratetype)) {
                                free_one_page(zone, &folio->page, pfn,
@@ -2774,7 +2774,7 @@ void free_unref_folios(struct folio_batch *folios)
                        migratetype = MIGRATE_MOVABLE;
 
                trace_mm_page_free_batched(&folio->page);
-               free_unref_page_commit(zone, pcp, &folio->page, migratetype,
+               free_frozen_page_commit(zone, pcp, &folio->page, migratetype,
                                order);
        }
 
@@ -4837,11 +4837,11 @@ void __free_pages(struct page *page, unsigned int order)
        struct alloc_tag *tag = pgalloc_tag_get(page);
 
        if (put_page_testzero(page))
-               free_unref_page(page, order);
+               free_frozen_pages(page, order);
        else if (!head) {
                pgalloc_tag_sub_pages(tag, (1 << order) - 1);
                while (order-- > 0)
-                       free_unref_page(page + (1 << order), order);
+                       free_frozen_pages(page + (1 << order), order);
        }
 }
 EXPORT_SYMBOL(__free_pages);
index 3f7a203d35c6409193df5b64adb5301193b4141b..d2423f30577e49da55f80f7ac96d7f0040463531 100644 (file)
@@ -86,7 +86,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
        VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
 
        if (page_ref_sub_and_test(page, count))
-               free_unref_page(page, compound_order(page));
+               free_frozen_pages(page, compound_order(page));
 }
 EXPORT_SYMBOL(__page_frag_cache_drain);
 
@@ -138,7 +138,7 @@ refill:
                        goto refill;
 
                if (unlikely(encoded_page_decode_pfmemalloc(encoded_page))) {
-                       free_unref_page(page,
+                       free_frozen_pages(page,
                                        encoded_page_decode_order(encoded_page));
                        goto refill;
                }
@@ -166,6 +166,6 @@ void page_frag_free(void *addr)
        struct page *page = virt_to_head_page(addr);
 
        if (unlikely(put_page_testzero(page)))
-               free_unref_page(page, compound_order(page));
+               free_frozen_pages(page, compound_order(page));
 }
 EXPORT_SYMBOL(page_frag_free);
index 10decd9dffa172899f27e6968e3a95405da9ecf3..3a01acfd5a89404911f94fe4577f2de990cc224d 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -109,7 +109,7 @@ void __folio_put(struct folio *folio)
        page_cache_release(folio);
        folio_unqueue_deferred_split(folio);
        mem_cgroup_uncharge(folio);
-       free_unref_page(&folio->page, folio_order(folio));
+       free_frozen_pages(&folio->page, folio_order(folio));
 }
 EXPORT_SYMBOL(__folio_put);