mm/munmap: replace can_modify_mm with can_modify_vma
authorPedro Falcato <pedro.falcato@gmail.com>
Sat, 17 Aug 2024 00:18:29 +0000 (01:18 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 4 Sep 2024 04:15:41 +0000 (21:15 -0700)
We were doing an extra mmap tree traversal just to check if the entire
range is modifiable.  This can be done when we iterate through the VMAs
instead.

Link: https://lkml.kernel.org/r/20240817-mseal-depessimize-v3-2-d8d2e037df30@gmail.com
Signed-off-by: Pedro Falcato <pedro.falcato@gmail.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
LGTM, Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Jeff Xu <jeffxu@chromium.org>
Cc: Kees Cook <kees@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mmap.c
mm/vma.c

index 7cba84f8e3a5898fae41cfe41f0c2e85d2584c99..548bc45a27bf82194445a67a3027b9e4c6c4aee3 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1740,16 +1740,7 @@ int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
                unsigned long start, unsigned long end, struct list_head *uf,
                bool unlock)
 {
-       struct mm_struct *mm = vma->vm_mm;
-
-       /*
-        * Check if memory is sealed, prevent unmapping a sealed VMA.
-        * can_modify_mm assumes we have acquired the lock on MM.
-        */
-       if (unlikely(!can_modify_mm(mm, start, end)))
-               return -EPERM;
-
-       return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
+       return do_vmi_align_munmap(vmi, vma, vma->vm_mm, start, end, uf, unlock);
 }
 
 /*
index 84965f2cd5800d335a44f00e7eab1dc52d627b08..5850f7c0949b027f1a103d25cba7898ecdfbd2b8 100644 (file)
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -712,6 +712,12 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
                if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
                        goto map_count_exceeded;
 
+               /* Don't bother splitting the VMA if we can't unmap it anyway */
+               if (!can_modify_vma(vma)) {
+                       error = -EPERM;
+                       goto start_split_failed;
+               }
+
                error = __split_vma(vmi, vma, start, 1);
                if (error)
                        goto start_split_failed;
@@ -723,6 +729,11 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
         */
        next = vma;
        do {
+               if (!can_modify_vma(next)) {
+                       error = -EPERM;
+                       goto modify_vma_failed;
+               }
+
                /* Does it split the end? */
                if (next->vm_end > end) {
                        error = __split_vma(vmi, next, end, 0);
@@ -815,6 +826,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
        __mt_destroy(&mt_detach);
        return 0;
 
+modify_vma_failed:
 clear_tree_failed:
 userfaultfd_error:
 munmap_gather_failed:
@@ -860,13 +872,6 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
        if (end == start)
                return -EINVAL;
 
-       /*
-        * Check if memory is sealed, prevent unmapping a sealed VMA.
-        * can_modify_mm assumes we have acquired the lock on MM.
-        */
-       if (unlikely(!can_modify_mm(mm, start, end)))
-               return -EPERM;
-
        /* Find the first overlapping VMA */
        vma = vma_find(vmi, end);
        if (!vma) {