mm: vmalloc: move vmap_init_free_space() down in vmalloc.c
authorUladzislau Rezki (Sony) <urezki@gmail.com>
Tue, 2 Jan 2024 18:46:25 +0000 (19:46 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 24 Feb 2024 01:48:18 +0000 (17:48 -0800)
A vmap_init_free_space() is a function that setups a vmap space and is
considered as part of initialization phase.  Since a main entry which is
vmalloc_init(), has been moved down in vmalloc.c it makes sense to follow
the pattern.

There is no a functional change as a result of this patch.

Link: https://lkml.kernel.org/r/20240102184633.748113-4-urezki@gmail.com
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reviewed-by: Baoquan He <bhe@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Joel Fernandes (Google) <joel@joelfernandes.org>
Cc: Kazuhito Hagio <k-hagio-ab@nec.com>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sony.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmalloc.c

index 10f289e865122a431cb878be394c5121f1cb4b28..06bd843d18ae999f5c38078ae2994d3a1ecf6045 100644 (file)
@@ -2512,47 +2512,6 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align)
        kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
 }
 
-static void vmap_init_free_space(void)
-{
-       unsigned long vmap_start = 1;
-       const unsigned long vmap_end = ULONG_MAX;
-       struct vmap_area *busy, *free;
-
-       /*
-        *     B     F     B     B     B     F
-        * -|-----|.....|-----|-----|-----|.....|-
-        *  |           The KVA space           |
-        *  |<--------------------------------->|
-        */
-       list_for_each_entry(busy, &vmap_area_list, list) {
-               if (busy->va_start - vmap_start > 0) {
-                       free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
-                       if (!WARN_ON_ONCE(!free)) {
-                               free->va_start = vmap_start;
-                               free->va_end = busy->va_start;
-
-                               insert_vmap_area_augment(free, NULL,
-                                       &free_vmap_area_root,
-                                               &free_vmap_area_list);
-                       }
-               }
-
-               vmap_start = busy->va_end;
-       }
-
-       if (vmap_end - vmap_start > 0) {
-               free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
-               if (!WARN_ON_ONCE(!free)) {
-                       free->va_start = vmap_start;
-                       free->va_end = vmap_end;
-
-                       insert_vmap_area_augment(free, NULL,
-                               &free_vmap_area_root,
-                                       &free_vmap_area_list);
-               }
-       }
-}
-
 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
        struct vmap_area *va, unsigned long flags, const void *caller)
 {
@@ -4465,6 +4424,47 @@ module_init(proc_vmalloc_init);
 
 #endif
 
+static void vmap_init_free_space(void)
+{
+       unsigned long vmap_start = 1;
+       const unsigned long vmap_end = ULONG_MAX;
+       struct vmap_area *busy, *free;
+
+       /*
+        *     B     F     B     B     B     F
+        * -|-----|.....|-----|-----|-----|.....|-
+        *  |           The KVA space           |
+        *  |<--------------------------------->|
+        */
+       list_for_each_entry(busy, &vmap_area_list, list) {
+               if (busy->va_start - vmap_start > 0) {
+                       free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
+                       if (!WARN_ON_ONCE(!free)) {
+                               free->va_start = vmap_start;
+                               free->va_end = busy->va_start;
+
+                               insert_vmap_area_augment(free, NULL,
+                                       &free_vmap_area_root,
+                                               &free_vmap_area_list);
+                       }
+               }
+
+               vmap_start = busy->va_end;
+       }
+
+       if (vmap_end - vmap_start > 0) {
+               free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
+               if (!WARN_ON_ONCE(!free)) {
+                       free->va_start = vmap_start;
+                       free->va_end = vmap_end;
+
+                       insert_vmap_area_augment(free, NULL,
+                               &free_vmap_area_root,
+                                       &free_vmap_area_list);
+               }
+       }
+}
+
 void __init vmalloc_init(void)
 {
        struct vmap_area *va;