struct vm_area_struct *vma;
struct page *page_to_free;
unsigned long page_addr;
+ int mm_locked = 0;
size_t index;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!mmap_read_trylock(mm))
- goto err_mmap_read_lock_failed;
- if (!mutex_trylock(&alloc->mutex))
- goto err_get_alloc_mutex_failed;
index = mdata->page_index;
page_addr = alloc->vm_start + index * PAGE_SIZE;
- vma = vma_lookup(mm, page_addr);
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, page_addr);
+ if (!vma) {
+ /* fall back to mmap_lock */
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
+ mm_locked = 1;
+ vma = vma_lookup(mm, page_addr);
+ }
+
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
/*
* Since a binder_alloc can only be mapped once, we ensure
* the vma corresponds to this mapping by checking whether
}
mutex_unlock(&alloc->mutex);
- mmap_read_unlock(mm);
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
mmput_async(mm);
binder_free_page(page_to_free);
err_invalid_vma:
mutex_unlock(&alloc->mutex);
err_get_alloc_mutex_failed:
- mmap_read_unlock(mm);
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget: