* call before __tlb_remove_page*() to set the current page-size; implies a
* possible tlb_flush_mmu() call.
*
- * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() / tlb_flush_mmu_free()
+ * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
*
* tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
* related state, like the range)
*
- * tlb_flush_mmu_free() - frees the queued pages; make absolutely
- * sure no additional tlb_remove_page()
- * calls happen between _tlbonly() and this.
- *
- * tlb_flush_mmu() - the above two calls.
+ * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
+ * whatever pages are still batched.
*
* - mmu_gather::fullmm
*
void tlb_flush_mmu(struct mmu_gather *tlb);
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force);
-void tlb_flush_mmu_free(struct mmu_gather *tlb);
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
unsigned long address,
#endif /* HAVE_MMU_GATHER_NO_GATHER */
-void tlb_flush_mmu_free(struct mmu_gather *tlb)
+static void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);