slab: allocate frozen pages
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 25 Nov 2024 21:01:47 +0000 (21:01 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 14 Jan 2025 06:40:34 +0000 (22:40 -0800)
Since slab does not use the page refcount, it can allocate and free frozen
pages, saving one atomic operation per free.

Link: https://lkml.kernel.org/r/20241125210149.2976098-16-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: David Hildenbrand <david@redhat.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/slub.c

index c2151c9fee228d121a9cbcc220c3ae054769dacf..cef25d9a476a8f2a92292ad319e9ff177cc29903 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2420,9 +2420,9 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
        unsigned int order = oo_order(oo);
 
        if (node == NUMA_NO_NODE)
-               folio = (struct folio *)alloc_pages(flags, order);
+               folio = (struct folio *)alloc_frozen_pages(flags, order);
        else
-               folio = (struct folio *)__alloc_pages_node(node, flags, order);
+               folio = (struct folio *)__alloc_frozen_pages(flags, order, node, NULL);
 
        if (!folio)
                return NULL;
@@ -2656,7 +2656,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
        __folio_clear_slab(folio);
        mm_account_reclaimed_pages(pages);
        unaccount_slab(slab, order, s);
-       __free_pages(&folio->page, order);
+       free_frozen_pages(&folio->page, order);
 }
 
 static void rcu_free_slab(struct rcu_head *h)