mm/page_alloc: add __alloc_frozen_pages()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 25 Nov 2024 21:01:45 +0000 (21:01 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 14 Jan 2025 06:40:33 +0000 (22:40 -0800)
Defer the initialisation of the page refcount to the new __alloc_pages()
wrapper and turn the old __alloc_pages() into __alloc_frozen_pages().

Link: https://lkml.kernel.org/r/20241125210149.2976098-14-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/internal.h
mm/page_alloc.c

index 9c941af5bdb6e74ba36dd72059b0579fde80d3f1..b831688a71e8165bd63a2b92193e78248dd0eadf 100644 (file)
@@ -740,6 +740,10 @@ extern bool free_pages_prepare(struct page *page, unsigned int order);
 
 extern int user_min_free_kbytes;
 
+struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid,
+               nodemask_t *);
+#define __alloc_frozen_pages(...) \
+       alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__))
 void free_frozen_pages(struct page *page, unsigned int order);
 void free_unref_folios(struct folio_batch *fbatch);
 
index df5b61592792f45f6c828eb17ccdf070b9428758..7a2853b7967dc189026ca6e335b956e6e59c5a03 100644 (file)
@@ -4713,8 +4713,8 @@ EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
 /*
  * This is the 'heart' of the zoned buddy allocator.
  */
-struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
-                                     int preferred_nid, nodemask_t *nodemask)
+struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order,
+               int preferred_nid, nodemask_t *nodemask)
 {
        struct page *page;
        unsigned int alloc_flags = ALLOC_WMARK_LOW;
@@ -4770,14 +4770,24 @@ out:
                free_frozen_pages(page, order);
                page = NULL;
        }
-       if (page)
-               set_page_refcounted(page);
 
        trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
        kmsan_alloc_page(page, order, alloc_gfp);
 
        return page;
 }
+EXPORT_SYMBOL(__alloc_frozen_pages_noprof);
+
+struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
+               int preferred_nid, nodemask_t *nodemask)
+{
+       struct page *page;
+
+       page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask);
+       if (page)
+               set_page_refcounted(page);
+       return page;
+}
 EXPORT_SYMBOL(__alloc_pages_noprof);
 
 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,