mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc()
authorUladzislau Rezki (Sony) <urezki@gmail.com>
Sun, 31 Aug 2025 12:10:58 +0000 (14:10 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 9 Sep 2025 06:45:11 +0000 (23:45 -0700)
kasan_populate_vmalloc() and its helpers ignore the caller's gfp_mask and
always allocate memory using the hardcoded GFP_KERNEL flag.  This makes
them inconsistent with vmalloc(), which was recently extended to support
GFP_NOFS and GFP_NOIO allocations.

Page table allocations performed during shadow population also ignore the
external gfp_mask.  To preserve the intended semantics of GFP_NOFS and
GFP_NOIO, wrap the apply_to_page_range() calls into the appropriate
memalloc scope.

xfs calls vmalloc with GFP_NOFS, so this bug could lead to deadlock.

There was a report here
https://lkml.kernel.org/r/686ea951.050a0220.385921.0016.GAE@google.com

This patch:
 - Extends kasan_populate_vmalloc() and helpers to take gfp_mask;
 - Passes gfp_mask down to alloc_pages_bulk() and __get_free_page();
 - Enforces GFP_NOFS/NOIO semantics with memalloc_*_save()/restore()
   around apply_to_page_range();
 - Updates vmalloc.c and percpu allocator call sites accordingly.

Link: https://lkml.kernel.org/r/20250831121058.92971-1-urezki@gmail.com
Fixes: 451769ebb7e7 ("mm/vmalloc: alloc GFP_NO{FS,IO} for vmalloc")
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reported-by: syzbot+3470c9ffee63e4abafeb@syzkaller.appspotmail.com
Reviewed-by: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/kasan.h
mm/kasan/shadow.c
mm/vmalloc.c

index 890011071f2b142b77ecf7eb0d08dd17546a2dbf..fe5ce9215821db13b1d4a941659ba30a0420d9d0 100644 (file)
@@ -562,7 +562,7 @@ static inline void kasan_init_hw_tags(void) { }
 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 
 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
+int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
 void kasan_release_vmalloc(unsigned long start, unsigned long end,
                           unsigned long free_region_start,
                           unsigned long free_region_end,
@@ -574,7 +574,7 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
                                                       unsigned long size)
 { }
 static inline int kasan_populate_vmalloc(unsigned long start,
-                                       unsigned long size)
+                                       unsigned long size, gfp_t gfp_mask)
 {
        return 0;
 }
@@ -610,7 +610,7 @@ static __always_inline void kasan_poison_vmalloc(const void *start,
 static inline void kasan_populate_early_vm_area_shadow(void *start,
                                                       unsigned long size) { }
 static inline int kasan_populate_vmalloc(unsigned long start,
-                                       unsigned long size)
+                                       unsigned long size, gfp_t gfp_mask)
 {
        return 0;
 }
index e2ceebf737ef7b281d3b96862ae968d42b4bd90d..11d472a5c4e8d0390fa282e76e3d8e57666ef9c6 100644 (file)
@@ -336,13 +336,13 @@ static void ___free_pages_bulk(struct page **pages, int nr_pages)
        }
 }
 
-static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
+static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask)
 {
        unsigned long nr_populated, nr_total = nr_pages;
        struct page **page_array = pages;
 
        while (nr_pages) {
-               nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
+               nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages);
                if (!nr_populated) {
                        ___free_pages_bulk(page_array, nr_total - nr_pages);
                        return -ENOMEM;
@@ -354,25 +354,42 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
        return 0;
 }
 
-static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
+static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
 {
        unsigned long nr_pages, nr_total = PFN_UP(end - start);
        struct vmalloc_populate_data data;
+       unsigned int flags;
        int ret = 0;
 
-       data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+       data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO);
        if (!data.pages)
                return -ENOMEM;
 
        while (nr_total) {
                nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
-               ret = ___alloc_pages_bulk(data.pages, nr_pages);
+               ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask);
                if (ret)
                        break;
 
                data.start = start;
+
+               /*
+                * page tables allocations ignore external gfp mask, enforce it
+                * by the scope API
+                */
+               if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
+                       flags = memalloc_nofs_save();
+               else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
+                       flags = memalloc_noio_save();
+
                ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
                                          kasan_populate_vmalloc_pte, &data);
+
+               if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
+                       memalloc_nofs_restore(flags);
+               else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
+                       memalloc_noio_restore(flags);
+
                ___free_pages_bulk(data.pages, nr_pages);
                if (ret)
                        break;
@@ -386,7 +403,7 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
        return ret;
 }
 
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
+int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
 {
        unsigned long shadow_start, shadow_end;
        int ret;
@@ -415,7 +432,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
        shadow_start = PAGE_ALIGN_DOWN(shadow_start);
        shadow_end = PAGE_ALIGN(shadow_end);
 
-       ret = __kasan_populate_vmalloc(shadow_start, shadow_end);
+       ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
        if (ret)
                return ret;
 
index 6dbcdceecae13484c764e8fab3a86769a0a7064e..5edd536ba9d2a540491884b6def1728bbe4dc6df 100644 (file)
@@ -2026,6 +2026,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
        if (unlikely(!vmap_initialized))
                return ERR_PTR(-EBUSY);
 
+       /* Only reclaim behaviour flags are relevant. */
+       gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
        might_sleep();
 
        /*
@@ -2038,8 +2040,6 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
         */
        va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
        if (!va) {
-               gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
-
                va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
                if (unlikely(!va))
                        return ERR_PTR(-ENOMEM);
@@ -2089,7 +2089,7 @@ retry:
        BUG_ON(va->va_start < vstart);
        BUG_ON(va->va_end > vend);
 
-       ret = kasan_populate_vmalloc(addr, size);
+       ret = kasan_populate_vmalloc(addr, size, gfp_mask);
        if (ret) {
                free_vmap_area(va);
                return ERR_PTR(ret);
@@ -4826,7 +4826,7 @@ retry:
 
        /* populate the kasan shadow space */
        for (area = 0; area < nr_vms; area++) {
-               if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
+               if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL))
                        goto err_free_shadow;
        }