2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU does not use a hash table to store virtual to
4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
5 * this does -not- include 603 however which shares the implementation with
6 * hash based processors)
10 * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
13 * Derived from arch/ppc/mm/init.c:
14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
16 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
17 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
18 * Copyright (C) 1996 Paul Mackerras
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
30 #include <linux/kernel.h>
32 #include <linux/init.h>
33 #include <linux/highmem.h>
34 #include <linux/pagemap.h>
35 #include <linux/preempt.h>
36 #include <linux/spinlock.h>
38 #include <asm/tlbflush.h>
44 * Base TLB flushing operations:
46 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
47 * - flush_tlb_page(vma, vmaddr) flushes one page
48 * - flush_tlb_range(vma, start, end) flushes a range of pages
49 * - flush_tlb_kernel_range(start, end) flushes kernel pages
51 * - local_* variants of page and mm only apply to the current
56 * These are the base non-SMP variants of page and mm flushing
58 void local_flush_tlb_mm(struct mm_struct *mm)
64 if (pid != MMU_NO_CONTEXT)
68 EXPORT_SYMBOL(local_flush_tlb_mm);
70 void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
76 pid = mm ? mm->context.id : 0;
77 if (pid != MMU_NO_CONTEXT)
78 _tlbil_va(vmaddr, pid, tsize, ind);
82 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
84 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
85 0 /* tsize unused for now */, 0);
87 EXPORT_SYMBOL(local_flush_tlb_page);
90 * And here are the SMP non-local implementations
94 static DEFINE_SPINLOCK(tlbivax_lock);
96 static int mm_is_core_local(struct mm_struct *mm)
98 return cpumask_subset(mm_cpumask(mm),
99 topology_thread_cpumask(smp_processor_id()));
102 struct tlb_flush_param {
109 static void do_flush_tlb_mm_ipi(void *param)
111 struct tlb_flush_param *p = param;
113 _tlbil_pid(p ? p->pid : 0);
116 static void do_flush_tlb_page_ipi(void *param)
118 struct tlb_flush_param *p = param;
120 _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
124 /* Note on invalidations and PID:
126 * We snapshot the PID with preempt disabled. At this point, it can still
127 * change either because:
128 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
129 * - we are invaliating some target that isn't currently running here
130 * and is concurrently acquiring a new PID on another CPU
131 * - some other CPU is re-acquiring a lost PID for this mm
134 * However, this shouldn't be a problem as we only guarantee
135 * invalidation of TLB entries present prior to this call, so we
136 * don't care about the PID changing, and invalidating a stale PID
137 * is generally harmless.
140 void flush_tlb_mm(struct mm_struct *mm)
145 pid = mm->context.id;
146 if (unlikely(pid == MMU_NO_CONTEXT))
148 if (!mm_is_core_local(mm)) {
149 struct tlb_flush_param p = { .pid = pid };
150 /* Ignores smp_processor_id() even if set. */
151 smp_call_function_many(mm_cpumask(mm),
152 do_flush_tlb_mm_ipi, &p, 1);
158 EXPORT_SYMBOL(flush_tlb_mm);
160 void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
163 struct cpumask *cpu_mask;
167 pid = mm ? mm->context.id : 0;
168 if (unlikely(pid == MMU_NO_CONTEXT))
170 cpu_mask = mm_cpumask(mm);
171 if (!mm_is_core_local(mm)) {
172 /* If broadcast tlbivax is supported, use it */
173 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
174 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
176 spin_lock(&tlbivax_lock);
177 _tlbivax_bcast(vmaddr, pid, tsize, ind);
179 spin_unlock(&tlbivax_lock);
182 struct tlb_flush_param p = {
188 /* Ignores smp_processor_id() even if set in cpu_mask */
189 smp_call_function_many(cpu_mask,
190 do_flush_tlb_page_ipi, &p, 1);
193 _tlbil_va(vmaddr, pid, tsize, ind);
198 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
200 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
201 0 /* tsize unused for now */, 0);
203 EXPORT_SYMBOL(flush_tlb_page);
205 #endif /* CONFIG_SMP */
208 * Flush kernel TLB entries in the given range
210 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
214 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
221 EXPORT_SYMBOL(flush_tlb_kernel_range);
224 * Currently, for range flushing, we just do a full mm flush. This should
225 * be optimized based on a threshold on the size of the range, since
226 * some implementation can stack multiple tlbivax before a tlbsync but
227 * for now, we keep it that way
229 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
233 flush_tlb_mm(vma->vm_mm);
235 EXPORT_SYMBOL(flush_tlb_range);
237 void tlb_flush(struct mmu_gather *tlb)
239 flush_tlb_mm(tlb->mm);
241 /* Push out batch of freed page tables */