mm: vmalloc: support more granular vrealloc() sizing
authorKees Cook <kees@kernel.org>
Sat, 26 Apr 2025 00:11:07 +0000 (17:11 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 8 May 2025 06:39:41 +0000 (23:39 -0700)
Introduce struct vm_struct::requested_size so that the requested
(re)allocation size is retained separately from the allocated area size.
This means that KASAN will correctly poison the correct spans of requested
bytes.  This also means we can support growing the usable portion of an
allocation that can already be supported by the existing area's existing
allocation.

Link: https://lkml.kernel.org/r/20250426001105.it.679-kees@kernel.org
Fixes: 3ddc2fefe6f3 ("mm: vmalloc: implement vrealloc()")
Signed-off-by: Kees Cook <kees@kernel.org>
Reported-by: Erhard Furtner <erhard_f@mailbox.org>
Closes: https://lore.kernel.org/all/20250408192503.6149a816@outsider.home/
Reviewed-by: Danilo Krummrich <dakr@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/vmalloc.h
mm/vmalloc.c

index 31e9ffd936e39334ddaff910222d4751c18da5e7..5ca8d4dd149d4e9ef92b7796f32941bd76ecc5d1 100644 (file)
@@ -61,6 +61,7 @@ struct vm_struct {
        unsigned int            nr_pages;
        phys_addr_t             phys_addr;
        const void              *caller;
+       unsigned long           requested_size;
 };
 
 struct vmap_area {
index 3ed720a787ecd88a3eba63549c4f66ca1b4f2b23..2d7511654831e94f71c966e74f2f7a3ae16ec456 100644 (file)
@@ -1940,7 +1940,7 @@ static inline void setup_vmalloc_vm(struct vm_struct *vm,
 {
        vm->flags = flags;
        vm->addr = (void *)va->va_start;
-       vm->size = va_size(va);
+       vm->size = vm->requested_size = va_size(va);
        vm->caller = caller;
        va->vm = vm;
 }
@@ -3133,6 +3133,7 @@ struct vm_struct *__get_vm_area_node(unsigned long size,
 
        area->flags = flags;
        area->caller = caller;
+       area->requested_size = requested_size;
 
        va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
        if (IS_ERR(va)) {
@@ -4063,6 +4064,8 @@ EXPORT_SYMBOL(vzalloc_node_noprof);
  */
 void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
 {
+       struct vm_struct *vm = NULL;
+       size_t alloced_size = 0;
        size_t old_size = 0;
        void *n;
 
@@ -4072,15 +4075,17 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
        }
 
        if (p) {
-               struct vm_struct *vm;
-
                vm = find_vm_area(p);
                if (unlikely(!vm)) {
                        WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
                        return NULL;
                }
 
-               old_size = get_vm_area_size(vm);
+               alloced_size = get_vm_area_size(vm);
+               old_size = vm->requested_size;
+               if (WARN(alloced_size < old_size,
+                        "vrealloc() has mismatched area vs requested sizes (%p)\n", p))
+                       return NULL;
        }
 
        /*
@@ -4088,14 +4093,26 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
         * would be a good heuristic for when to shrink the vm_area?
         */
        if (size <= old_size) {
-               /* Zero out spare memory. */
-               if (want_init_on_alloc(flags))
+               /* Zero out "freed" memory. */
+               if (want_init_on_free())
                        memset((void *)p + size, 0, old_size - size);
+               vm->requested_size = size;
                kasan_poison_vmalloc(p + size, old_size - size);
-               kasan_unpoison_vmalloc(p, size, KASAN_VMALLOC_PROT_NORMAL);
                return (void *)p;
        }
 
+       /*
+        * We already have the bytes available in the allocation; use them.
+        */
+       if (size <= alloced_size) {
+               kasan_unpoison_vmalloc(p + old_size, size - old_size,
+                                      KASAN_VMALLOC_PROT_NORMAL);
+               /* Zero out "alloced" memory. */
+               if (want_init_on_alloc(flags))
+                       memset((void *)p + old_size, 0, size - old_size);
+               vm->requested_size = size;
+       }
+
        /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
        n = __vmalloc_noprof(size, flags);
        if (!n)