mm/vmalloc: check free space in vmap_block lockless
authorThomas Gleixner <tglx@linutronix.de>
Thu, 25 May 2023 12:57:07 +0000 (14:57 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 9 Jun 2023 23:25:41 +0000 (16:25 -0700)
vb_alloc() unconditionally locks a vmap_block on the free list to check
the free space.

This can be done locklessly because vmap_block::free never increases, it's
only decreased on allocations.

Check the free space lockless and only if that succeeds, recheck under the
lock.

Link: https://lkml.kernel.org/r/20230525124504.750481992@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
Reviewed-by: Baoquan He <bhe@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmalloc.c

index ad9a1d9e314ffb8c1c4e4d121ac9361043e8ec88..679112e2ffd28f85561e8b3de5a4a3547375024a 100644 (file)
@@ -2168,6 +2168,9 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
        list_for_each_entry_rcu(vb, &vbq->free, free_list) {
                unsigned long pages_off;
 
+               if (READ_ONCE(vb->free) < (1UL << order))
+                       continue;
+
                spin_lock(&vb->lock);
                if (vb->free < (1UL << order)) {
                        spin_unlock(&vb->lock);
@@ -2176,7 +2179,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
 
                pages_off = VMAP_BBMAP_BITS - vb->free;
                vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
-               vb->free -= 1UL << order;
+               WRITE_ONCE(vb->free, vb->free - (1UL << order));
                bitmap_set(vb->used_map, pages_off, (1UL << order));
                if (vb->free == 0) {
                        spin_lock(&vbq->lock);