Merge tag 'kbuild-fixes-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/masah...
[linux-2.6-block.git] / mm / mmap.c
index 3490af70f2592e05eb5740f5b6e9427c5ca3942f..83b4682ec85cfa0398ab0e50f9c4c8f35f993d90 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1114,21 +1114,21 @@ static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_
  */
 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
 {
-       MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end);
        struct anon_vma *anon_vma = NULL;
        struct vm_area_struct *prev, *next;
+       VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
 
        /* Try next first. */
-       next = mas_walk(&mas);
+       next = vma_iter_load(&vmi);
        if (next) {
                anon_vma = reusable_anon_vma(next, vma, next);
                if (anon_vma)
                        return anon_vma;
        }
 
-       prev = mas_prev(&mas, 0);
+       prev = vma_prev(&vmi);
        VM_BUG_ON_VMA(prev != vma, vma);
-       prev = mas_prev(&mas, 0);
+       prev = vma_prev(&vmi);
        /* Try prev next. */
        if (prev)
                anon_vma = reusable_anon_vma(prev, prev, vma);
@@ -1255,17 +1255,15 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
        if (mm->map_count > sysctl_max_map_count)
                return -ENOMEM;
 
-       /* Obtain the address to map to. we verify (or select) it and ensure
-        * that it represents a valid section of the address space.
+       /*
+        * addr is returned from get_unmapped_area,
+        * There are two cases:
+        * 1> MAP_FIXED == false
+        *      unallocated memory, no need to check sealing.
+        * 1> MAP_FIXED == true
+        *      sealing is checked inside mmap_region when
+        *      do_vmi_munmap is called.
         */
-       addr = get_unmapped_area(file, addr, len, pgoff, flags);
-       if (IS_ERR_VALUE(addr))
-               return addr;
-
-       if (flags & MAP_FIXED_NOREPLACE) {
-               if (find_vma_intersection(mm, addr, addr + len))
-                       return -EEXIST;
-       }
 
        if (prot == PROT_EXEC) {
                pkey = execute_only_pkey(mm);
@@ -1280,6 +1278,18 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
        vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
                        mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 
+       /* Obtain the address to map to. we verify (or select) it and ensure
+        * that it represents a valid section of the address space.
+        */
+       addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags);
+       if (IS_ERR_VALUE(addr))
+               return addr;
+
+       if (flags & MAP_FIXED_NOREPLACE) {
+               if (find_vma_intersection(mm, addr, addr + len))
+                       return -EEXIST;
+       }
+
        if (flags & MAP_LOCKED)
                if (!can_do_mlock())
                        return -EPERM;
@@ -1516,32 +1526,32 @@ bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
  * to the private version (using protection_map[] without the
  * VM_SHARED bit).
  */
-int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
+bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
 {
        /* If it was private or non-writable, the write bit is already clear */
        if (!vma_is_shared_writable(vma))
-               return 0;
+               return false;
 
        /* The backer wishes to know when pages are first written to? */
        if (vm_ops_needs_writenotify(vma->vm_ops))
-               return 1;
+               return true;
 
        /* The open routine did something to the protections that pgprot_modify
         * won't preserve? */
        if (pgprot_val(vm_page_prot) !=
            pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
-               return 0;
+               return false;
 
        /*
         * Do we need to track softdirty? hugetlb does not support softdirty
         * tracking yet.
         */
        if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
-               return 1;
+               return true;
 
        /* Do we need write faults for uffd-wp tracking? */
        if (userfaultfd_wp(vma))
-               return 1;
+               return true;
 
        /* Can the mapping track the dirty pages? */
        return vma_fs_can_writeback(vma);
@@ -1551,14 +1561,14 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
  * We account for memory if it's a private writeable mapping,
  * not hugepages and VM_NORESERVE wasn't set.
  */
-static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
+static inline bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
 {
        /*
         * hugetlb has its own accounting separate from the core VM
         * VM_HUGETLB may not be set yet so we cannot check for that flag.
         */
        if (file && is_file_hugepages(file))
-               return 0;
+               return false;
 
        return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
 }
@@ -1578,11 +1588,10 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
        unsigned long length, gap;
        unsigned long low_limit, high_limit;
        struct vm_area_struct *tmp;
-
-       MA_STATE(mas, &current->mm->mm_mt, 0, 0);
+       VMA_ITERATOR(vmi, current->mm, 0);
 
        /* Adjust search length to account for worst case alignment overhead */
-       length = info->length + info->align_mask;
+       length = info->length + info->align_mask + info->start_gap;
        if (length < info->length)
                return -ENOMEM;
 
@@ -1591,23 +1600,29 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
                low_limit = mmap_min_addr;
        high_limit = info->high_limit;
 retry:
-       if (mas_empty_area(&mas, low_limit, high_limit - 1, length))
+       if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
                return -ENOMEM;
 
-       gap = mas.index;
+       /*
+        * Adjust for the gap first so it doesn't interfere with the
+        * later alignment. The first step is the minimum needed to
+        * fulill the start gap, the next steps is the minimum to align
+        * that. It is the minimum needed to fulill both.
+        */
+       gap = vma_iter_addr(&vmi) + info->start_gap;
        gap += (info->align_offset - gap) & info->align_mask;
-       tmp = mas_next(&mas, ULONG_MAX);
+       tmp = vma_next(&vmi);
        if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
                if (vm_start_gap(tmp) < gap + length - 1) {
                        low_limit = tmp->vm_end;
-                       mas_reset(&mas);
+                       vma_iter_reset(&vmi);
                        goto retry;
                }
        } else {
-               tmp = mas_prev(&mas, 0);
+               tmp = vma_prev(&vmi);
                if (tmp && vm_end_gap(tmp) > gap) {
                        low_limit = vm_end_gap(tmp);
-                       mas_reset(&mas);
+                       vma_iter_reset(&vmi);
                        goto retry;
                }
        }
@@ -1630,10 +1645,10 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
        unsigned long length, gap, gap_end;
        unsigned long low_limit, high_limit;
        struct vm_area_struct *tmp;
+       VMA_ITERATOR(vmi, current->mm, 0);
 
-       MA_STATE(mas, &current->mm->mm_mt, 0, 0);
        /* Adjust search length to account for worst case alignment overhead */
-       length = info->length + info->align_mask;
+       length = info->length + info->align_mask + info->start_gap;
        if (length < info->length)
                return -ENOMEM;
 
@@ -1642,24 +1657,24 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
                low_limit = mmap_min_addr;
        high_limit = info->high_limit;
 retry:
-       if (mas_empty_area_rev(&mas, low_limit, high_limit - 1, length))
+       if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
                return -ENOMEM;
 
-       gap = mas.last + 1 - info->length;
+       gap = vma_iter_end(&vmi) - info->length;
        gap -= (gap - info->align_offset) & info->align_mask;
-       gap_end = mas.last;
-       tmp = mas_next(&mas, ULONG_MAX);
+       gap_end = vma_iter_end(&vmi);
+       tmp = vma_next(&vmi);
        if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
-               if (vm_start_gap(tmp) <= gap_end) {
+               if (vm_start_gap(tmp) < gap_end) {
                        high_limit = vm_start_gap(tmp);
-                       mas_reset(&mas);
+                       vma_iter_reset(&vmi);
                        goto retry;
                }
        } else {
-               tmp = mas_prev(&mas, 0);
+               tmp = vma_prev(&vmi);
                if (tmp && vm_end_gap(tmp) > gap) {
                        high_limit = tmp->vm_start;
-                       mas_reset(&mas);
+                       vma_iter_reset(&vmi);
                        goto retry;
                }
        }
@@ -1707,7 +1722,7 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr,
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma, *prev;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
        const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
 
        if (len > mmap_end - mmap_min_addr)
@@ -1725,12 +1740,9 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr,
                        return addr;
        }
 
-       info.flags = 0;
        info.length = len;
        info.low_limit = mm->mmap_base;
        info.high_limit = mmap_end;
-       info.align_mask = 0;
-       info.align_offset = 0;
        return vm_unmapped_area(&info);
 }
 
@@ -1755,7 +1767,7 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
 {
        struct vm_area_struct *vma, *prev;
        struct mm_struct *mm = current->mm;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
        const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
 
        /* requested length too big for entire address space */
@@ -1779,8 +1791,6 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
        info.length = len;
        info.low_limit = PAGE_SIZE;
        info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
-       info.align_mask = 0;
-       info.align_offset = 0;
        addr = vm_unmapped_area(&info);
 
        /*
@@ -1810,12 +1820,41 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
 }
 #endif
 
+#ifndef HAVE_ARCH_UNMAPPED_AREA_VMFLAGS
+unsigned long
+arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len,
+                              unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
+{
+       return arch_get_unmapped_area(filp, addr, len, pgoff, flags);
+}
+
 unsigned long
-get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
-               unsigned long pgoff, unsigned long flags)
+arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr,
+                                      unsigned long len, unsigned long pgoff,
+                                      unsigned long flags, vm_flags_t vm_flags)
+{
+       return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
+}
+#endif
+
+unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp,
+                                          unsigned long addr, unsigned long len,
+                                          unsigned long pgoff, unsigned long flags,
+                                          vm_flags_t vm_flags)
+{
+       if (test_bit(MMF_TOPDOWN, &mm->flags))
+               return arch_get_unmapped_area_topdown_vmflags(filp, addr, len, pgoff,
+                                                             flags, vm_flags);
+       return arch_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, vm_flags);
+}
+
+unsigned long
+__get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+               unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
 {
        unsigned long (*get_area)(struct file *, unsigned long,
-                                 unsigned long, unsigned long, unsigned long);
+                                 unsigned long, unsigned long, unsigned long)
+                                 = NULL;
 
        unsigned long error = arch_mmap_check(addr, len, flags);
        if (error)
@@ -1825,7 +1864,6 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        if (len > TASK_SIZE)
                return -ENOMEM;
 
-       get_area = current->mm->get_unmapped_area;
        if (file) {
                if (file->f_op->get_unmapped_area)
                        get_area = file->f_op->get_unmapped_area;
@@ -1835,16 +1873,22 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                 * so use shmem's get_unmapped_area in case it can be huge.
                 */
                get_area = shmem_get_unmapped_area;
-       } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
-               /* Ensures that larger anonymous mappings are THP aligned. */
-               get_area = thp_get_unmapped_area;
        }
 
        /* Always treat pgoff as zero for anonymous memory. */
        if (!file)
                pgoff = 0;
 
-       addr = get_area(file, addr, len, pgoff, flags);
+       if (get_area) {
+               addr = get_area(file, addr, len, pgoff, flags);
+       } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+               /* Ensures that larger anonymous mappings are THP aligned. */
+               addr = thp_get_unmapped_area_vmflags(file, addr, len,
+                                                    pgoff, flags, vm_flags);
+       } else {
+               addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len,
+                                                   pgoff, flags, vm_flags);
+       }
        if (IS_ERR_VALUE(addr))
                return addr;
 
@@ -1857,7 +1901,16 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        return error ? error : addr;
 }
 
-EXPORT_SYMBOL(get_unmapped_area);
+unsigned long
+mm_get_unmapped_area(struct mm_struct *mm, struct file *file,
+                    unsigned long addr, unsigned long len,
+                    unsigned long pgoff, unsigned long flags)
+{
+       if (test_bit(MMF_TOPDOWN, &mm->flags))
+               return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags);
+       return arch_get_unmapped_area(file, addr, len, pgoff, flags);
+}
+EXPORT_SYMBOL(mm_get_unmapped_area);
 
 /**
  * find_vma_intersection() - Look up the first VMA which intersects the interval
@@ -1914,12 +1967,12 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
                        struct vm_area_struct **pprev)
 {
        struct vm_area_struct *vma;
-       MA_STATE(mas, &mm->mm_mt, addr, addr);
+       VMA_ITERATOR(vmi, mm, addr);
 
-       vma = mas_walk(&mas);
-       *pprev = mas_prev(&mas, 0);
+       vma = vma_iter_load(&vmi);
+       *pprev = vma_prev(&vmi);
        if (!vma)
-               vma = mas_next(&mas, ULONG_MAX);
+               vma = vma_next(&vmi);
        return vma;
 }
 
@@ -1973,7 +2026,7 @@ static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        struct vm_area_struct *next;
        unsigned long gap_addr;
        int error = 0;
-       MA_STATE(mas, &mm->mm_mt, vma->vm_start, address);
+       VMA_ITERATOR(vmi, mm, vma->vm_start);
 
        if (!(vma->vm_flags & VM_GROWSUP))
                return -EFAULT;
@@ -1999,15 +2052,15 @@ static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        }
 
        if (next)
-               mas_prev_range(&mas, address);
+               vma_iter_prev_range_limit(&vmi, address);
 
-       __mas_set_range(&mas, vma->vm_start, address - 1);
-       if (mas_preallocate(&mas, vma, GFP_KERNEL))
+       vma_iter_config(&vmi, vma->vm_start, address);
+       if (vma_iter_prealloc(&vmi, vma))
                return -ENOMEM;
 
        /* We must make sure the anon_vma is allocated. */
        if (unlikely(anon_vma_prepare(vma))) {
-               mas_destroy(&mas);
+               vma_iter_free(&vmi);
                return -ENOMEM;
        }
 
@@ -2047,7 +2100,7 @@ static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                                anon_vma_interval_tree_pre_update_vma(vma);
                                vma->vm_end = address;
                                /* Overwrite old entry in mtree. */
-                               mas_store_prealloc(&mas, vma);
+                               vma_iter_store(&vmi, vma);
                                anon_vma_interval_tree_post_update_vma(vma);
                                spin_unlock(&mm->page_table_lock);
 
@@ -2056,7 +2109,7 @@ static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                }
        }
        anon_vma_unlock_write(vma->anon_vma);
-       mas_destroy(&mas);
+       vma_iter_free(&vmi);
        validate_mm(mm);
        return error;
 }
@@ -2069,9 +2122,9 @@ static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
 {
        struct mm_struct *mm = vma->vm_mm;
-       MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start);
        struct vm_area_struct *prev;
        int error = 0;
+       VMA_ITERATOR(vmi, mm, vma->vm_start);
 
        if (!(vma->vm_flags & VM_GROWSDOWN))
                return -EFAULT;
@@ -2081,7 +2134,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
                return -EPERM;
 
        /* Enforce stack_guard_gap */
-       prev = mas_prev(&mas, 0);
+       prev = vma_prev(&vmi);
        /* Check that both stack segments have the same anon_vma? */
        if (prev) {
                if (!(prev->vm_flags & VM_GROWSDOWN) &&
@@ -2091,15 +2144,15 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
        }
 
        if (prev)
-               mas_next_range(&mas, vma->vm_start);
+               vma_iter_next_range_limit(&vmi, vma->vm_start);
 
-       __mas_set_range(&mas, address, vma->vm_end - 1);
-       if (mas_preallocate(&mas, vma, GFP_KERNEL))
+       vma_iter_config(&vmi, address, vma->vm_end);
+       if (vma_iter_prealloc(&vmi, vma))
                return -ENOMEM;
 
        /* We must make sure the anon_vma is allocated. */
        if (unlikely(anon_vma_prepare(vma))) {
-               mas_destroy(&mas);
+               vma_iter_free(&vmi);
                return -ENOMEM;
        }
 
@@ -2140,7 +2193,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
                                vma->vm_start = address;
                                vma->vm_pgoff -= grow;
                                /* Overwrite old entry in mtree. */
-                               mas_store_prealloc(&mas, vma);
+                               vma_iter_store(&vmi, vma);
                                anon_vma_interval_tree_post_update_vma(vma);
                                spin_unlock(&mm->page_table_lock);
 
@@ -2149,7 +2202,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
                }
        }
        anon_vma_unlock_write(vma->anon_vma);
-       mas_destroy(&mas);
+       vma_iter_free(&vmi);
        validate_mm(mm);
        return error;
 }
@@ -2684,6 +2737,14 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
        if (end == start)
                return -EINVAL;
 
+       /*
+        * Check if memory is sealed before arch_unmap.
+        * Prevent unmapping a sealed VMA.
+        * can_modify_mm assumes we have acquired the lock on MM.
+        */
+       if (unlikely(!can_modify_mm(mm, start, end)))
+               return -EPERM;
+
         /* arch_unmap() might do unmaps itself.  */
        arch_unmap(mm, start, end);
 
@@ -2746,7 +2807,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
        }
 
        /* Unmap any existing mapping in the area */
-       if (do_vmi_munmap(&vmi, mm, addr, len, uf, false))
+       error = do_vmi_munmap(&vmi, mm, addr, len, uf, false);
+       if (error == -EPERM)
+               return error;
+       else if (error)
                return -ENOMEM;
 
        /*
@@ -3096,6 +3160,14 @@ int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
 {
        struct mm_struct *mm = vma->vm_mm;
 
+       /*
+        * Check if memory is sealed before arch_unmap.
+        * Prevent unmapping a sealed VMA.
+        * can_modify_mm assumes we have acquired the lock on MM.
+        */
+       if (unlikely(!can_modify_mm(mm, start, end)))
+               return -EPERM;
+
        arch_unmap(mm, start, end);
        return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
 }
@@ -3244,7 +3316,7 @@ void exit_mmap(struct mm_struct *mm)
        struct mmu_gather tlb;
        struct vm_area_struct *vma;
        unsigned long nr_accounted = 0;
-       MA_STATE(mas, &mm->mm_mt, 0, 0);
+       VMA_ITERATOR(vmi, mm, 0);
        int count = 0;
 
        /* mm's last user has gone, and its about to be pulled down */
@@ -3253,7 +3325,7 @@ void exit_mmap(struct mm_struct *mm)
        mmap_read_lock(mm);
        arch_exit_mmap(mm);
 
-       vma = mas_find(&mas, ULONG_MAX);
+       vma = vma_next(&vmi);
        if (!vma || unlikely(xa_is_zero(vma))) {
                /* Can happen if dup_mmap() received an OOM */
                mmap_read_unlock(mm);
@@ -3266,7 +3338,7 @@ void exit_mmap(struct mm_struct *mm)
        tlb_gather_mmu_fullmm(&tlb, mm);
        /* update_hiwater_rss(mm) here? but nobody should be looking */
        /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
-       unmap_vmas(&tlb, &mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
+       unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
        mmap_read_unlock(mm);
 
        /*
@@ -3276,8 +3348,8 @@ void exit_mmap(struct mm_struct *mm)
        set_bit(MMF_OOM_SKIP, &mm->flags);
        mmap_write_lock(mm);
        mt_clear_in_rcu(&mm->mm_mt);
-       mas_set(&mas, vma->vm_end);
-       free_pgtables(&tlb, &mas, vma, FIRST_USER_ADDRESS,
+       vma_iter_set(&vmi, vma->vm_end);
+       free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
                      USER_PGTABLES_CEILING, true);
        tlb_finish_mmu(&tlb);
 
@@ -3286,14 +3358,14 @@ void exit_mmap(struct mm_struct *mm)
         * enabled, without holding any MM locks besides the unreachable
         * mmap_write_lock.
         */
-       mas_set(&mas, vma->vm_end);
+       vma_iter_set(&vmi, vma->vm_end);
        do {
                if (vma->vm_flags & VM_ACCOUNT)
                        nr_accounted += vma_pages(vma);
                remove_vma(vma, true);
                count++;
                cond_resched();
-               vma = mas_find(&mas, ULONG_MAX);
+               vma = vma_next(&vmi);
        } while (vma && likely(!xa_is_zero(vma)));
 
        BUG_ON(count != mm->map_count);
@@ -3715,7 +3787,7 @@ int mm_take_all_locks(struct mm_struct *mm)
 {
        struct vm_area_struct *vma;
        struct anon_vma_chain *avc;
-       MA_STATE(mas, &mm->mm_mt, 0, 0);
+       VMA_ITERATOR(vmi, mm, 0);
 
        mmap_assert_write_locked(mm);
 
@@ -3727,14 +3799,14 @@ int mm_take_all_locks(struct mm_struct *mm)
         * being written to until mmap_write_unlock() or mmap_write_downgrade()
         * is reached.
         */
-       mas_for_each(&mas, vma, ULONG_MAX) {
+       for_each_vma(vmi, vma) {
                if (signal_pending(current))
                        goto out_unlock;
                vma_start_write(vma);
        }
 
-       mas_set(&mas, 0);
-       mas_for_each(&mas, vma, ULONG_MAX) {
+       vma_iter_init(&vmi, mm, 0);
+       for_each_vma(vmi, vma) {
                if (signal_pending(current))
                        goto out_unlock;
                if (vma->vm_file && vma->vm_file->f_mapping &&
@@ -3742,8 +3814,8 @@ int mm_take_all_locks(struct mm_struct *mm)
                        vm_lock_mapping(mm, vma->vm_file->f_mapping);
        }
 
-       mas_set(&mas, 0);
-       mas_for_each(&mas, vma, ULONG_MAX) {
+       vma_iter_init(&vmi, mm, 0);
+       for_each_vma(vmi, vma) {
                if (signal_pending(current))
                        goto out_unlock;
                if (vma->vm_file && vma->vm_file->f_mapping &&
@@ -3751,8 +3823,8 @@ int mm_take_all_locks(struct mm_struct *mm)
                        vm_lock_mapping(mm, vma->vm_file->f_mapping);
        }
 
-       mas_set(&mas, 0);
-       mas_for_each(&mas, vma, ULONG_MAX) {
+       vma_iter_init(&vmi, mm, 0);
+       for_each_vma(vmi, vma) {
                if (signal_pending(current))
                        goto out_unlock;
                if (vma->anon_vma)
@@ -3811,12 +3883,12 @@ void mm_drop_all_locks(struct mm_struct *mm)
 {
        struct vm_area_struct *vma;
        struct anon_vma_chain *avc;
-       MA_STATE(mas, &mm->mm_mt, 0, 0);
+       VMA_ITERATOR(vmi, mm, 0);
 
        mmap_assert_write_locked(mm);
        BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
 
-       mas_for_each(&mas, vma, ULONG_MAX) {
+       for_each_vma(vmi, vma) {
                if (vma->anon_vma)
                        list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
                                vm_unlock_anon_vma(avc->anon_vma);