mm/vmalloc: refactor __vmalloc_node_range_noprof()
authorLiu Ye <liuye@kylinos.cn>
Thu, 6 Mar 2025 07:21:31 +0000 (15:21 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 18 Mar 2025 05:06:58 +0000 (22:06 -0700)
According to the code logic, the first parameter of the sub-function
__get_vm_area_node() should be size instead of real_size.

Then in __get_vm_area_node(), the size will be aligned, so the redundant
alignment operation is deleted.

The use of the real_size variable causes code redundancy, so it is removed
to simplify the code.

The real prefix is generally used to indicate the adjusted value of a
parameter, but according to the code logic, it should indicate the
original value, so it is recommended to rename it to original_align.

Link: https://lkml.kernel.org/r/20250306072131.800499-1-liuye@kylinos.cn
Signed-off-by: Liu Ye <liuye@kylinos.cn>
Reviewed-by: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
Cc: Christop Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmalloc.c

index 61981ee1c9d2f769d4a06ab542fc84334c1b0cbd..3ed720a787ecd88a3eba63549c4f66ca1b4f2b23 100644 (file)
@@ -3771,8 +3771,7 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
        struct vm_struct *area;
        void *ret;
        kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
-       unsigned long real_size = size;
-       unsigned long real_align = align;
+       unsigned long original_align = align;
        unsigned int shift = PAGE_SHIFT;
 
        if (WARN_ON_ONCE(!size))
@@ -3781,7 +3780,7 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
        if ((size >> PAGE_SHIFT) > totalram_pages()) {
                warn_alloc(gfp_mask, NULL,
                        "vmalloc error: size %lu, exceeds total pages",
-                       real_size);
+                       size);
                return NULL;
        }
 
@@ -3798,19 +3797,18 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
                else
                        shift = arch_vmap_pte_supported_shift(size);
 
-               align = max(real_align, 1UL << shift);
-               size = ALIGN(real_size, 1UL << shift);
+               align = max(original_align, 1UL << shift);
        }
 
 again:
-       area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
+       area = __get_vm_area_node(size, align, shift, VM_ALLOC |
                                  VM_UNINITIALIZED | vm_flags, start, end, node,
                                  gfp_mask, caller);
        if (!area) {
                bool nofail = gfp_mask & __GFP_NOFAIL;
                warn_alloc(gfp_mask, NULL,
                        "vmalloc error: size %lu, vm_struct allocation failed%s",
-                       real_size, (nofail) ? ". Retrying." : "");
+                       size, (nofail) ? ". Retrying." : "");
                if (nofail) {
                        schedule_timeout_uninterruptible(1);
                        goto again;
@@ -3860,7 +3858,7 @@ again:
            (gfp_mask & __GFP_SKIP_ZERO))
                kasan_flags |= KASAN_VMALLOC_INIT;
        /* KASAN_VMALLOC_PROT_NORMAL already set if required. */
-       area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
+       area->addr = kasan_unpoison_vmalloc(area->addr, size, kasan_flags);
 
        /*
         * In this function, newly allocated vm_struct has VM_UNINITIALIZED
@@ -3869,17 +3867,15 @@ again:
         */
        clear_vm_uninitialized_flag(area);
 
-       size = PAGE_ALIGN(size);
        if (!(vm_flags & VM_DEFER_KMEMLEAK))
-               kmemleak_vmalloc(area, size, gfp_mask);
+               kmemleak_vmalloc(area, PAGE_ALIGN(size), gfp_mask);
 
        return area->addr;
 
 fail:
        if (shift > PAGE_SHIFT) {
                shift = PAGE_SHIFT;
-               align = real_align;
-               size = real_size;
+               align = original_align;
                goto again;
        }