asm-generic/tlb: Remove arch_tlb*_mmu()
authorPeter Zijlstra <peterz@infradead.org>
Thu, 20 Sep 2018 08:50:11 +0000 (10:50 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 3 Apr 2019 08:32:58 +0000 (10:32 +0200)
Now that all architectures are converted to the generic code, remove
the arch hooks.

No change in behavior intended.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
mm/mmu_gather.c

index ab220edcd7ef578d4dbf426301f2ad0a2a30f2cb..60ef38d200c07300bb4470c8e261cdfb7011e2d8 100644 (file)
@@ -93,33 +93,6 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
 
 #endif /* HAVE_MMU_GATHER_NO_GATHER */
 
-void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-                               unsigned long start, unsigned long end)
-{
-       tlb->mm = mm;
-
-       /* Is it from 0 to ~0? */
-       tlb->fullmm     = !(start | (end+1));
-
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
-       tlb->need_flush_all = 0;
-       tlb->local.next = NULL;
-       tlb->local.nr   = 0;
-       tlb->local.max  = ARRAY_SIZE(tlb->__pages);
-       tlb->active     = &tlb->local;
-       tlb->batch_count = 0;
-#endif
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-       tlb->batch = NULL;
-#endif
-#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
-       tlb->page_size = 0;
-#endif
-
-       __tlb_reset_range(tlb);
-}
-
 void tlb_flush_mmu_free(struct mmu_gather *tlb)
 {
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
@@ -136,27 +109,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
        tlb_flush_mmu_free(tlb);
 }
 
-/* tlb_finish_mmu
- *     Called at the end of the shootdown operation to free up any resources
- *     that were required.
- */
-void arch_tlb_finish_mmu(struct mmu_gather *tlb,
-               unsigned long start, unsigned long end, bool force)
-{
-       if (force) {
-               __tlb_reset_range(tlb);
-               __tlb_adjust_range(tlb, start, end - start);
-       }
-
-       tlb_flush_mmu(tlb);
-
-       /* keep the page table cache within bounds */
-       check_pgt_cache();
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
-       tlb_batch_list_free(tlb);
-#endif
-}
-
 #endif /* HAVE_GENERIC_MMU_GATHER */
 
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
@@ -258,10 +210,40 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
                        unsigned long start, unsigned long end)
 {
-       arch_tlb_gather_mmu(tlb, mm, start, end);
+       tlb->mm = mm;
+
+       /* Is it from 0 to ~0? */
+       tlb->fullmm     = !(start | (end+1));
+
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+       tlb->need_flush_all = 0;
+       tlb->local.next = NULL;
+       tlb->local.nr   = 0;
+       tlb->local.max  = ARRAY_SIZE(tlb->__pages);
+       tlb->active     = &tlb->local;
+       tlb->batch_count = 0;
+#endif
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+       tlb->batch = NULL;
+#endif
+#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+       tlb->page_size = 0;
+#endif
+
+       __tlb_reset_range(tlb);
        inc_tlb_flush_pending(tlb->mm);
 }
 
+/**
+ * tlb_finish_mmu - finish an mmu_gather structure
+ * @tlb: the mmu_gather structure to finish
+ * @start: start of the region that will be removed from the page-table
+ * @end: end of the region that will be removed from the page-table
+ *
+ * Called at the end of the shootdown operation to free up any resources that
+ * were required.
+ */
 void tlb_finish_mmu(struct mmu_gather *tlb,
                unsigned long start, unsigned long end)
 {
@@ -272,8 +254,17 @@ void tlb_finish_mmu(struct mmu_gather *tlb,
         * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
         * forcefully if we detect parallel PTE batching threads.
         */
-       bool force = mm_tlb_flush_nested(tlb->mm);
+       if (mm_tlb_flush_nested(tlb->mm)) {
+               __tlb_reset_range(tlb);
+               __tlb_adjust_range(tlb, start, end - start);
+       }
 
-       arch_tlb_finish_mmu(tlb, start, end, force);
+       tlb_flush_mmu(tlb);
+
+       /* keep the page table cache within bounds */
+       check_pgt_cache();
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+       tlb_batch_list_free(tlb);
+#endif
        dec_tlb_flush_pending(tlb->mm);
 }