mm/page_alloc: move set_page_refcounted() to callers of post_alloc_hook()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 25 Nov 2024 21:01:36 +0000 (21:01 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 14 Jan 2025 06:40:31 +0000 (22:40 -0800)
In preparation for allocating frozen pages, stop initialising the page
refcount in post_alloc_hook().

Link: https://lkml.kernel.org/r/20241125210149.2976098-5-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/compaction.c
mm/internal.h
mm/page_alloc.c

index a2b16b08cbbff70c1decefb357c5523d8483fbac..07bd22789f07c48acaa4a59742869d4187a0d0d1 100644 (file)
@@ -83,6 +83,7 @@ static inline bool is_via_compact_memory(int order) { return false; }
 static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags)
 {
        post_alloc_hook(page, order, __GFP_MOVABLE);
+       set_page_refcounted(page);
        return page;
 }
 #define mark_allocated(...)    alloc_hooks(mark_allocated_noprof(__VA_ARGS__))
@@ -1868,6 +1869,7 @@ again:
        dst = (struct folio *)freepage;
 
        post_alloc_hook(&dst->page, order, __GFP_MOVABLE);
+       set_page_refcounted(&dst->page);
        if (order)
                prep_compound_page(&dst->page, order);
        cc->nr_freepages -= 1 << order;
index b650a7cb7b4690719120227a3185866d31541d39..9c941af5bdb6e74ba36dd72059b0579fde80d3f1 100644 (file)
@@ -735,8 +735,7 @@ static inline void prep_compound_tail(struct page *head, int tail_idx)
 
 extern void prep_compound_page(struct page *page, unsigned int order);
 
-extern void post_alloc_hook(struct page *page, unsigned int order,
-                                       gfp_t gfp_flags);
+void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags);
 extern bool free_pages_prepare(struct page *page, unsigned int order);
 
 extern int user_min_free_kbytes;
index 03f2491d13d264978ee29c1d2be7a2011b2272aa..a3072047c705927764cd70fb28f31b1d34204de6 100644 (file)
@@ -1508,7 +1508,6 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
        int i;
 
        set_page_private(page, 0);
-       set_page_refcounted(page);
 
        arch_alloc_page(page, order);
        debug_pagealloc_map_pages(page, 1 << order);
@@ -1564,6 +1563,7 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
                                                        unsigned int alloc_flags)
 {
        post_alloc_hook(page, order, gfp_flags);
+       set_page_refcounted(page);
 
        if (order && (gfp_flags & __GFP_COMP))
                prep_compound_page(page, order);
@@ -6360,6 +6360,7 @@ static void split_free_pages(struct list_head *list)
                        int i;
 
                        post_alloc_hook(page, order, __GFP_MOVABLE);
+                       set_page_refcounted(page);
                        if (!order)
                                continue;