sched: use maple tree iterator to walk VMAs
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 6 Sep 2022 19:48:59 +0000 (19:48 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 27 Sep 2022 02:46:22 +0000 (19:46 -0700)
The linked list is slower than walking the VMAs using the maple tree.  We
can't use the VMA iterator here because it doesn't support moving to an
earlier position.

Link: https://lkml.kernel.org/r/20220906194824.2110408-49-Liam.Howlett@oracle.com
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Tested-by: Yu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
kernel/sched/fair.c

index 0e3e08a093d497319bcc8aef941fa1a6bac946d4..ff49f28391ea380f95a3b3fad8362ee45124984b 100644 (file)
@@ -2930,6 +2930,7 @@ static void task_numa_work(struct callback_head *work)
        struct task_struct *p = current;
        struct mm_struct *mm = p->mm;
        u64 runtime = p->se.sum_exec_runtime;
+       MA_STATE(mas, &mm->mm_mt, 0, 0);
        struct vm_area_struct *vma;
        unsigned long start, end;
        unsigned long nr_pte_updates = 0;
@@ -2986,13 +2987,16 @@ static void task_numa_work(struct callback_head *work)
 
        if (!mmap_read_trylock(mm))
                return;
-       vma = find_vma(mm, start);
+       mas_set(&mas, start);
+       vma = mas_find(&mas, ULONG_MAX);
        if (!vma) {
                reset_ptenuma_scan(p);
                start = 0;
-               vma = mm->mmap;
+               mas_set(&mas, start);
+               vma = mas_find(&mas, ULONG_MAX);
        }
-       for (; vma; vma = vma->vm_next) {
+
+       for (; vma; vma = mas_find(&mas, ULONG_MAX)) {
                if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
                        is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
                        continue;