mm/page_alloc: move set_page_refcounted() to callers of prep_new_page()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 25 Nov 2024 21:01:37 +0000 (21:01 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 14 Jan 2025 06:40:31 +0000 (22:40 -0800)
In preparation for allocating frozen pages, stop initialising the page
refcount in prep_new_page().

Link: https://lkml.kernel.org/r/20241125210149.2976098-6-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/page_alloc.c

index a3072047c705927764cd70fb28f31b1d34204de6..08cc1e0bd95030afb008e0546611a3fa88ef3a20 100644 (file)
@@ -1563,7 +1563,6 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
                                                        unsigned int alloc_flags)
 {
        post_alloc_hook(page, order, gfp_flags);
-       set_page_refcounted(page);
 
        if (order && (gfp_flags & __GFP_COMP))
                prep_compound_page(page, order);
@@ -3474,6 +3473,7 @@ try_this_zone:
                                gfp_mask, alloc_flags, ac->migratetype);
                if (page) {
                        prep_new_page(page, order, gfp_mask, alloc_flags);
+                       set_page_refcounted(page);
 
                        /*
                         * If this is a high-order atomic allocation then check
@@ -3698,8 +3698,10 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        count_vm_event(COMPACTSTALL);
 
        /* Prep a captured page if available */
-       if (page)
+       if (page) {
                prep_new_page(page, order, gfp_mask, alloc_flags);
+               set_page_refcounted(page);
+       }
 
        /* Try get a page from the freelist if available */
        if (!page)
@@ -4678,6 +4680,7 @@ retry_this_zone:
                nr_account++;
 
                prep_new_page(page, 0, gfp, 0);
+               set_page_refcounted(page);
                if (page_list)
                        list_add(&page->lru, page_list);
                else
@@ -6500,6 +6503,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
 
                check_new_pages(head, order);
                prep_new_page(head, order, gfp_mask, 0);
+               set_page_refcounted(head);
        } else {
                ret = -EINVAL;
                WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n",