tlb: mmu_gather: Remove unused start/end arguments from tlb_finish_mmu()
authorWill Deacon <will@kernel.org>
Wed, 27 Jan 2021 23:53:43 +0000 (23:53 +0000)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 29 Jan 2021 19:02:28 +0000 (20:02 +0100)
Since commit 7a30df49f63a ("mm: mmu_gather: remove __tlb_reset_range()
for force flush"), the 'start' and 'end' arguments to tlb_finish_mmu()
are no longer used, since we flush the whole mm in case of a nested
invalidation.

Remove the unused arguments and update all callers.

Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Yu Zhao <yuzhao@google.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lkml.kernel.org/r/20210127235347.1402-3-will@kernel.org
arch/ia64/include/asm/tlb.h
arch/x86/kernel/ldt.c
fs/exec.c
include/linux/mm_types.h
mm/hugetlb.c
mm/madvise.c
mm/memory.c
mm/mmap.c
mm/mmu_gather.c
mm/oom_kill.c

index 8d9da6f08a62ec5ec12d389dc2350958e943ad97..7059eb2e867a342e6e1fd84674e3b516bd47f652 100644 (file)
@@ -36,7 +36,7 @@
  *         tlb_end_vma(tlb, vma);
  *       }
  *     }
- *     tlb_finish_mmu(tlb, start, end);        // finish unmap for address space MM
+ *     tlb_finish_mmu(tlb);                            // finish unmap for address space MM
  */
 #include <linux/mm.h>
 #include <linux/pagemap.h>
index b8aee71840ae507a0b93f3187350c75799dc8f94..0d4e1253c9c9024de21485381c9c313198a28b23 100644 (file)
@@ -400,7 +400,7 @@ static void free_ldt_pgtables(struct mm_struct *mm)
 
        tlb_gather_mmu(&tlb, mm, start, end);
        free_pgd_range(&tlb, start, end, start, end);
-       tlb_finish_mmu(&tlb, start, end);
+       tlb_finish_mmu(&tlb);
 #endif
 }
 
index 5d4d52039105cd3e017681c15f38324fd5681f4e..69d89a0c35e92d318ab406ed9f8e46c3244077ea 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -725,7 +725,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
                free_pgd_range(&tlb, old_start, old_end, new_end,
                        vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
        }
-       tlb_finish_mmu(&tlb, old_start, old_end);
+       tlb_finish_mmu(&tlb);
 
        /*
         * Shrink the vma to just the new range.  Always succeeds.
index 07d9acb5b19c4c5c33db90a93815384880f937fd..1fe6a51298a682c16616c3ded1b4e155ff3b90e6 100644 (file)
@@ -590,8 +590,7 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
 struct mmu_gather;
 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
                                unsigned long start, unsigned long end);
-extern void tlb_finish_mmu(struct mmu_gather *tlb,
-                               unsigned long start, unsigned long end);
+extern void tlb_finish_mmu(struct mmu_gather *tlb);
 
 static inline void init_tlb_flush_pending(struct mm_struct *mm)
 {
index 18f6ee3179002a7f218dfafb4e7724685ff1c215..33db4fa62c7b528ddd9610ced89cca1890e9c82a 100644 (file)
@@ -3985,7 +3985,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 
        tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
        __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
-       tlb_finish_mmu(&tlb, tlb_start, tlb_end);
+       tlb_finish_mmu(&tlb);
 }
 
 /*
index 6a660858784b8410287a2250778c2a4197ae21a1..1b68520ea3f4d40f1c27e91515d2000f222d8990 100644 (file)
@@ -508,7 +508,7 @@ static long madvise_cold(struct vm_area_struct *vma,
        lru_add_drain();
        tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
        madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
-       tlb_finish_mmu(&tlb, start_addr, end_addr);
+       tlb_finish_mmu(&tlb);
 
        return 0;
 }
@@ -560,7 +560,7 @@ static long madvise_pageout(struct vm_area_struct *vma,
        lru_add_drain();
        tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
        madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
-       tlb_finish_mmu(&tlb, start_addr, end_addr);
+       tlb_finish_mmu(&tlb);
 
        return 0;
 }
@@ -732,7 +732,7 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
                        &madvise_free_walk_ops, &tlb);
        tlb_end_vma(&tlb, vma);
        mmu_notifier_invalidate_range_end(&range);
-       tlb_finish_mmu(&tlb, range.start, range.end);
+       tlb_finish_mmu(&tlb);
 
        return 0;
 }
index feff48e1465a6ee652d130560617e2425109ee2f..7bd3f122bd10fab8a759fb32cb11c0087cf4b372 100644 (file)
@@ -1540,7 +1540,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
        for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
                unmap_single_vma(&tlb, vma, start, range.end, NULL);
        mmu_notifier_invalidate_range_end(&range);
-       tlb_finish_mmu(&tlb, start, range.end);
+       tlb_finish_mmu(&tlb);
 }
 
 /**
@@ -1566,7 +1566,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
        mmu_notifier_invalidate_range_start(&range);
        unmap_single_vma(&tlb, vma, address, range.end, details);
        mmu_notifier_invalidate_range_end(&range);
-       tlb_finish_mmu(&tlb, address, range.end);
+       tlb_finish_mmu(&tlb);
 }
 
 /**
index dc7206032387c65692883519072e357f3cff9152..7a9f493a4b83a376896c3ba2fcfff74c9eba0f15 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2676,7 +2676,7 @@ static void unmap_region(struct mm_struct *mm,
        unmap_vmas(&tlb, vma, start, end);
        free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
                                 next ? next->vm_start : USER_PGTABLES_CEILING);
-       tlb_finish_mmu(&tlb, start, end);
+       tlb_finish_mmu(&tlb);
 }
 
 /*
@@ -3219,7 +3219,7 @@ void exit_mmap(struct mm_struct *mm)
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
        unmap_vmas(&tlb, vma, 0, -1);
        free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
-       tlb_finish_mmu(&tlb, 0, -1);
+       tlb_finish_mmu(&tlb);
 
        /*
         * Walk the list again, actually closing and freeing it,
index 03c33c93a582b9044fe7afbf64e5f3978de158ae..b0be5a7aa08f6d054c5f69bd4af4e95955d2192b 100644 (file)
@@ -290,14 +290,11 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 /**
  * tlb_finish_mmu - finish an mmu_gather structure
  * @tlb: the mmu_gather structure to finish
- * @start: start of the region that will be removed from the page-table
- * @end: end of the region that will be removed from the page-table
  *
  * Called at the end of the shootdown operation to free up any resources that
  * were required.
  */
-void tlb_finish_mmu(struct mmu_gather *tlb,
-               unsigned long start, unsigned long end)
+void tlb_finish_mmu(struct mmu_gather *tlb)
 {
        /*
         * If there are parallel threads are doing PTE changes on same range
index 04b19b7b5435bf9827b7139503180e0ce2da6d12..757e557211fb7bcbf88c1d6b8129a6733f65af02 100644 (file)
@@ -548,13 +548,13 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
                                                vma->vm_end);
                        tlb_gather_mmu(&tlb, mm, range.start, range.end);
                        if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
-                               tlb_finish_mmu(&tlb, range.start, range.end);
+                               tlb_finish_mmu(&tlb);
                                ret = false;
                                continue;
                        }
                        unmap_page_range(&tlb, vma, range.start, range.end, NULL);
                        mmu_notifier_invalidate_range_end(&range);
-                       tlb_finish_mmu(&tlb, range.start, range.end);
+                       tlb_finish_mmu(&tlb);
                }
        }