slab: Mark large folios for debugging purposes
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 3 Mar 2025 17:28:05 +0000 (17:28 +0000)
committerVlastimil Babka <vbabka@suse.cz>
Tue, 4 Mar 2025 07:57:48 +0000 (08:57 +0100)
If a user calls p = kmalloc(1024); kfree(p); kfree(p); and 'p' was the
only object in the slab, we may free the slab after the first call to
kfree().  If we do, we clear PGTY_slab and the second call to kfree()
will call free_large_kmalloc().  That will leave a trace in the logs
("object pointer: 0x%p"), but otherwise proceed to free the memory,
which is likely to corrupt the page allocator's metadata.

Allocate a new page type for large kmalloc and mark the memory with it
while it's allocated.  That lets us detect this double-free and return
without harming any data structures.

Reported-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
include/linux/page-flags.h
mm/slub.c

index 36d283552f80e9e969a29d00fe627c453d24c2ab..df9234e5f478a897bd3bedf4a99839a95dbcb929 100644 (file)
@@ -925,14 +925,15 @@ FOLIO_FLAG_FALSE(has_hwpoisoned)
 enum pagetype {
        /* 0x00-0x7f are positive numbers, ie mapcount */
        /* Reserve 0x80-0xef for mapcount overflow. */
-       PGTY_buddy      = 0xf0,
-       PGTY_offline    = 0xf1,
-       PGTY_table      = 0xf2,
-       PGTY_guard      = 0xf3,
-       PGTY_hugetlb    = 0xf4,
-       PGTY_slab       = 0xf5,
-       PGTY_zsmalloc   = 0xf6,
-       PGTY_unaccepted = 0xf7,
+       PGTY_buddy              = 0xf0,
+       PGTY_offline            = 0xf1,
+       PGTY_table              = 0xf2,
+       PGTY_guard              = 0xf3,
+       PGTY_hugetlb            = 0xf4,
+       PGTY_slab               = 0xf5,
+       PGTY_zsmalloc           = 0xf6,
+       PGTY_unaccepted         = 0xf7,
+       PGTY_large_kmalloc      = 0xf8,
 
        PGTY_mapcount_underflow = 0xff
 };
@@ -1075,6 +1076,7 @@ PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
  * Serialized with zone lock.
  */
 PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
+FOLIO_TYPE_OPS(large_kmalloc, large_kmalloc)
 
 /**
  * PageHuge - Determine if the page belongs to hugetlbfs
index d94af020b305ef369961f854153d1b092ed7f8d2..3e6ab4986f8f41cefc56a3420f73a0c5917c11b2 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4255,6 +4255,7 @@ static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
                ptr = folio_address(folio);
                lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
                                      PAGE_SIZE << order);
+               __folio_set_large_kmalloc(folio);
        }
 
        ptr = kasan_kmalloc_large(ptr, size, flags);
@@ -4730,6 +4731,11 @@ static void free_large_kmalloc(struct folio *folio, void *object)
 {
        unsigned int order = folio_order(folio);
 
+       if (WARN_ON_ONCE(!folio_test_large_kmalloc(folio))) {
+               dump_page(&folio->page, "Not a kmalloc allocation");
+               return;
+       }
+
        if (WARN_ON_ONCE(order == 0))
                pr_warn_once("object pointer: 0x%p\n", object);
 
@@ -4739,6 +4745,7 @@ static void free_large_kmalloc(struct folio *folio, void *object)
 
        lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
                              -(PAGE_SIZE << order));
+       __folio_clear_large_kmalloc(folio);
        folio_put(folio);
 }