mm/mmap: prevent pagefault handler from racing with mmu_notifier registration
authorSuren Baghdasaryan <surenb@google.com>
Mon, 27 Feb 2023 17:36:20 +0000 (09:36 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 6 Apr 2023 03:02:59 +0000 (20:02 -0700)
Page fault handlers might need to fire MMU notifications while a new
notifier is being registered.  Modify mm_take_all_locks to write-lock all
VMAs and prevent this race with page fault handlers that would hold VMA
locks.  VMAs are locked before i_mmap_rwsem and anon_vma to keep the same
locking order as in page fault handlers.

Link: https://lkml.kernel.org/r/20230227173632.3292573-22-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mmap.c

index 58704ca5acd2c00bac63c3309afb06f7ac13e322..18aed0ea6bd317e1906791359539cc894be8d1a8 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3494,6 +3494,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
  * of mm/rmap.c:
  *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
  *     hugetlb mapping);
+ *   - all vmas marked locked
  *   - all i_mmap_rwsem locks;
  *   - all anon_vma->rwseml
  *
@@ -3516,6 +3517,13 @@ int mm_take_all_locks(struct mm_struct *mm)
 
        mutex_lock(&mm_all_locks_mutex);
 
+       mas_for_each(&mas, vma, ULONG_MAX) {
+               if (signal_pending(current))
+                       goto out_unlock;
+               vma_start_write(vma);
+       }
+
+       mas_set(&mas, 0);
        mas_for_each(&mas, vma, ULONG_MAX) {
                if (signal_pending(current))
                        goto out_unlock;
@@ -3605,6 +3613,7 @@ void mm_drop_all_locks(struct mm_struct *mm)
                if (vma->vm_file && vma->vm_file->f_mapping)
                        vm_unlock_mapping(vma->vm_file->f_mapping);
        }
+       vma_end_write_all(mm);
 
        mutex_unlock(&mm_all_locks_mutex);
 }