task_mmu: convert to vma iterator
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Fri, 20 Jan 2023 16:26:22 +0000 (11:26 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 10 Feb 2023 00:51:34 +0000 (16:51 -0800)
Use the vma iterator so that the iterator can be invalidated or updated to
avoid each caller doing so.

Update the comments to how the vma iterator works.  The vma iterator will
keep track of the last vm_end and start the search from vm_end + 1.

Link: https://lkml.kernel.org/r/20230120162650.984577-22-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/proc/task_mmu.c

index a44339a77a75e126c1851690f57d8321841772db..a944e1816364dd8ba9e0ceccfebed1fc1f262414 100644 (file)
@@ -890,7 +890,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
        struct vm_area_struct *vma;
        unsigned long vma_start = 0, last_vma_end = 0;
        int ret = 0;
-       MA_STATE(mas, &mm->mm_mt, 0, 0);
+       VMA_ITERATOR(vmi, mm, 0);
 
        priv->task = get_proc_task(priv->inode);
        if (!priv->task)
@@ -908,7 +908,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
                goto out_put_mm;
 
        hold_task_mempolicy(priv);
-       vma = mas_find(&mas, ULONG_MAX);
+       vma = vma_next(&vmi);
 
        if (unlikely(!vma))
                goto empty_set;
@@ -923,7 +923,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
                 * access it for write request.
                 */
                if (mmap_lock_is_contended(mm)) {
-                       mas_pause(&mas);
+                       vma_iter_invalidate(&vmi);
                        mmap_read_unlock(mm);
                        ret = mmap_read_lock_killable(mm);
                        if (ret) {
@@ -948,31 +948,31 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
                         *
                         * 1) VMA2 is freed, but VMA3 exists:
                         *
-                        *    find_vma(mm, 16k - 1) will return VMA3.
+                        *    vma_next(vmi) will return VMA3.
                         *    In this case, just continue from VMA3.
                         *
                         * 2) VMA2 still exists:
                         *
-                        *    find_vma(mm, 16k - 1) will return VMA2.
-                        *    Iterate the loop like the original one.
+                        *    vma_next(vmi) will return VMA3.
+                        *    In this case, just continue from VMA3.
                         *
                         * 3) No more VMAs can be found:
                         *
-                        *    find_vma(mm, 16k - 1) will return NULL.
+                        *    vma_next(vmi) will return NULL.
                         *    No more things to do, just break.
                         *
                         * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
                         *
-                        *    find_vma(mm, 16k - 1) will return VMA' whose range
+                        *    vma_next(vmi) will return VMA' whose range
                         *    contains last_vma_end.
                         *    Iterate VMA' from last_vma_end.
                         */
-                       vma = mas_find(&mas, ULONG_MAX);
+                       vma = vma_next(&vmi);
                        /* Case 3 above */
                        if (!vma)
                                break;
 
-                       /* Case 1 above */
+                       /* Case 1 and 2 above */
                        if (vma->vm_start >= last_vma_end)
                                continue;
 
@@ -980,8 +980,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
                        if (vma->vm_end > last_vma_end)
                                smap_gather_stats(vma, &mss, last_vma_end);
                }
-               /* Case 2 above */
-       } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
+       } for_each_vma(vmi, vma);
 
 empty_set:
        show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
@@ -1277,7 +1276,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                return -ESRCH;
        mm = get_task_mm(task);
        if (mm) {
-               MA_STATE(mas, &mm->mm_mt, 0, 0);
+               VMA_ITERATOR(vmi, mm, 0);
                struct mmu_notifier_range range;
                struct clear_refs_private cp = {
                        .type = type,
@@ -1297,7 +1296,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                }
 
                if (type == CLEAR_REFS_SOFT_DIRTY) {
-                       mas_for_each(&mas, vma, ULONG_MAX) {
+                       for_each_vma(vmi, vma) {
                                if (!(vma->vm_flags & VM_SOFTDIRTY))
                                        continue;
                                vma->vm_flags &= ~VM_SOFTDIRTY;