mm: introduce vma detached flag
authorSuren Baghdasaryan <surenb@google.com>
Mon, 27 Feb 2023 17:36:21 +0000 (09:36 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 6 Apr 2023 03:02:59 +0000 (20:02 -0700)
Per-vma locking mechanism will search for VMA under RCU protection and
then after locking it, has to ensure it was not removed from the VMA tree
after we found it.  To make this check efficient, introduce a
vma->detached flag to mark VMAs which were removed from the VMA tree.

Link: https://lkml.kernel.org/r/20230227173632.3292573-23-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
include/linux/mm_types.h
mm/mmap.c

index d6a2abc51e3d4d92d318c26d0aee63d57f874ba5..d0f289bfef015f53d4733188bde899e87c629e56 100644 (file)
@@ -711,6 +711,14 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma)
        VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
 }
 
+static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
+{
+       /* When detaching vma should be write-locked */
+       if (detached)
+               vma_assert_write_locked(vma);
+       vma->detached = detached;
+}
+
 #else /* CONFIG_PER_VMA_LOCK */
 
 static inline void vma_init_lock(struct vm_area_struct *vma) {}
@@ -721,6 +729,8 @@ static inline void vma_start_write(struct vm_area_struct *vma) {}
 static inline bool vma_try_start_write(struct vm_area_struct *vma)
                { return true; }
 static inline void vma_assert_write_locked(struct vm_area_struct *vma) {}
+static inline void vma_mark_detached(struct vm_area_struct *vma,
+                                    bool detached) {}
 
 #endif /* CONFIG_PER_VMA_LOCK */
 
@@ -732,6 +742,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
        vma->vm_mm = mm;
        vma->vm_ops = &dummy_vm_ops;
        INIT_LIST_HEAD(&vma->anon_vma_chain);
+       vma_mark_detached(vma, false);
        vma_init_lock(vma);
 }
 
index 843a45893991774e1233128a46ca2a7b2345338f..3248ae45cb2e075b0b48711dc9d35a5c3767272b 100644 (file)
@@ -506,6 +506,9 @@ struct vm_area_struct {
 #ifdef CONFIG_PER_VMA_LOCK
        int vm_lock_seq;
        struct rw_semaphore lock;
+
+       /* Flag to indicate areas detached from the mm->mm_mt tree */
+       bool detached;
 #endif
 
        /*
index 18aed0ea6bd317e1906791359539cc894be8d1a8..b42f58591b9a53f61949a0fb05bd81cadd99b5e8 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -599,6 +599,7 @@ static inline void vma_complete(struct vma_prepare *vp,
 
        if (vp->remove) {
 again:
+               vma_mark_detached(vp->remove, true);
                if (vp->file) {
                        uprobe_munmap(vp->remove, vp->remove->vm_start,
                                      vp->remove->vm_end);
@@ -2276,6 +2277,7 @@ static inline int munmap_sidetree(struct vm_area_struct *vma,
        if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
                return -ENOMEM;
 
+       vma_mark_detached(vma, true);
        if (vma->vm_flags & VM_LOCKED)
                vma->vm_mm->locked_vm -= vma_pages(vma);