mm/vmalloc: extend __alloc_vmap_area() with extra arguments
authorUladzislau Rezki (Sony) <urezki@gmail.com>
Tue, 7 Jun 2022 09:34:46 +0000 (11:34 +0200)
committerakpm <akpm@linux-foundation.org>
Mon, 4 Jul 2022 01:08:41 +0000 (18:08 -0700)
It implies that __alloc_vmap_area() allocates only from the global vmap
space, therefore a list-head and rb-tree, which represent a free vmap
space, are not passed as parameters to this function and are accessed
directly from this function.

Extend the __alloc_vmap_area() and other dependent functions to have a
possibility to allocate from different trees making an interface common
and not specific.

There is no functional change as a result of this patch.

Link: https://lkml.kernel.org/r/20220607093449.3100-3-urezki@gmail.com
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reviewed-by: Baoquan He <bhe@redhat.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sony.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmalloc.c

index 2504c83814fecc1bb8890689d08ba52463017ff1..5dce7593c075feeda5a3f05dc862f228017ef6f7 100644 (file)
@@ -1233,15 +1233,15 @@ is_within_this_va(struct vmap_area *va, unsigned long size,
  * overhead.
  */
 static __always_inline struct vmap_area *
-find_vmap_lowest_match(unsigned long size, unsigned long align,
-       unsigned long vstart, bool adjust_search_size)
+find_vmap_lowest_match(struct rb_root *root, unsigned long size,
+       unsigned long align, unsigned long vstart, bool adjust_search_size)
 {
        struct vmap_area *va;
        struct rb_node *node;
        unsigned long length;
 
        /* Start from the root. */
-       node = free_vmap_area_root.rb_node;
+       node = root->rb_node;
 
        /* Adjust the search size for alignment overhead. */
        length = adjust_search_size ? size + align - 1 : size;
@@ -1369,8 +1369,9 @@ classify_va_fit_type(struct vmap_area *va,
 }
 
 static __always_inline int
-adjust_va_to_fit_type(struct vmap_area *va,
-       unsigned long nva_start_addr, unsigned long size)
+adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
+                     struct vmap_area *va, unsigned long nva_start_addr,
+                     unsigned long size)
 {
        struct vmap_area *lva = NULL;
        enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
@@ -1383,7 +1384,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
                 * V      NVA      V
                 * |---------------|
                 */
-               unlink_va_augment(va, &free_vmap_area_root);
+               unlink_va_augment(va, root);
                kmem_cache_free(vmap_area_cachep, va);
        } else if (type == LE_FIT_TYPE) {
                /*
@@ -1461,8 +1462,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
                augment_tree_propagate_from(va);
 
                if (lva)        /* type == NE_FIT_TYPE */
-                       insert_vmap_area_augment(lva, &va->rb_node,
-                               &free_vmap_area_root, &free_vmap_area_list);
+                       insert_vmap_area_augment(lva, &va->rb_node, root, head);
        }
 
        return 0;
@@ -1473,7 +1473,8 @@ adjust_va_to_fit_type(struct vmap_area *va,
  * Otherwise a vend is returned that indicates failure.
  */
 static __always_inline unsigned long
-__alloc_vmap_area(unsigned long size, unsigned long align,
+__alloc_vmap_area(struct rb_root *root, struct list_head *head,
+       unsigned long size, unsigned long align,
        unsigned long vstart, unsigned long vend)
 {
        bool adjust_search_size = true;
@@ -1493,7 +1494,7 @@ __alloc_vmap_area(unsigned long size, unsigned long align,
        if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
                adjust_search_size = false;
 
-       va = find_vmap_lowest_match(size, align, vstart, adjust_search_size);
+       va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
        if (unlikely(!va))
                return vend;
 
@@ -1507,7 +1508,7 @@ __alloc_vmap_area(unsigned long size, unsigned long align,
                return vend;
 
        /* Update the free vmap_area. */
-       ret = adjust_va_to_fit_type(va, nva_start_addr, size);
+       ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
        if (WARN_ON_ONCE(ret))
                return vend;
 
@@ -1598,7 +1599,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
 
 retry:
        preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
-       addr = __alloc_vmap_area(size, align, vstart, vend);
+       addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
+               size, align, vstart, vend);
        spin_unlock(&free_vmap_area_lock);
 
        /*
@@ -3874,7 +3876,9 @@ retry:
                        /* It is a BUG(), but trigger recovery instead. */
                        goto recovery;
 
-               ret = adjust_va_to_fit_type(va, start, size);
+               ret = adjust_va_to_fit_type(&free_vmap_area_root,
+                                           &free_vmap_area_list,
+                                           va, start, size);
                if (WARN_ON_ONCE(unlikely(ret)))
                        /* It is a BUG(), but trigger recovery instead. */
                        goto recovery;