binder: use per-vma lock in page reclaiming
authorCarlos Llamas <cmllamas@google.com>
Tue, 10 Dec 2024 14:31:05 +0000 (14:31 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 24 Dec 2024 08:35:23 +0000 (09:35 +0100)
Use per-vma locking in the shrinker's callback when reclaiming pages,
similar to the page installation logic. This minimizes contention with
unrelated vmas improving performance. The mmap_sem is still acquired if
the per-vma lock cannot be obtained.

Cc: Suren Baghdasaryan <surenb@google.com>
Suggested-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Carlos Llamas <cmllamas@google.com>
Link: https://lore.kernel.org/r/20241210143114.661252-10-cmllamas@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/android/binder_alloc.c

index b2b97ff19ba2dd920149e978e7707aca095ae103..fcfaf1b899c8fba0697ff104d9de3555b4a9a89b 100644 (file)
@@ -1143,19 +1143,28 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
        struct vm_area_struct *vma;
        struct page *page_to_free;
        unsigned long page_addr;
+       int mm_locked = 0;
        size_t index;
 
        if (!mmget_not_zero(mm))
                goto err_mmget;
-       if (!mmap_read_trylock(mm))
-               goto err_mmap_read_lock_failed;
-       if (!mutex_trylock(&alloc->mutex))
-               goto err_get_alloc_mutex_failed;
 
        index = mdata->page_index;
        page_addr = alloc->vm_start + index * PAGE_SIZE;
 
-       vma = vma_lookup(mm, page_addr);
+       /* attempt per-vma lock first */
+       vma = lock_vma_under_rcu(mm, page_addr);
+       if (!vma) {
+               /* fall back to mmap_lock */
+               if (!mmap_read_trylock(mm))
+                       goto err_mmap_read_lock_failed;
+               mm_locked = 1;
+               vma = vma_lookup(mm, page_addr);
+       }
+
+       if (!mutex_trylock(&alloc->mutex))
+               goto err_get_alloc_mutex_failed;
+
        /*
         * Since a binder_alloc can only be mapped once, we ensure
         * the vma corresponds to this mapping by checking whether
@@ -1183,7 +1192,10 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
        }
 
        mutex_unlock(&alloc->mutex);
-       mmap_read_unlock(mm);
+       if (mm_locked)
+               mmap_read_unlock(mm);
+       else
+               vma_end_read(vma);
        mmput_async(mm);
        binder_free_page(page_to_free);
 
@@ -1192,7 +1204,10 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 err_invalid_vma:
        mutex_unlock(&alloc->mutex);
 err_get_alloc_mutex_failed:
-       mmap_read_unlock(mm);
+       if (mm_locked)
+               mmap_read_unlock(mm);
+       else
+               vma_end_read(vma);
 err_mmap_read_lock_failed:
        mmput_async(mm);
 err_mmget: