Commit | Line | Data |
---|---|---|
c048fdfe GC |
1 | #include <linux/init.h> |
2 | ||
3 | #include <linux/mm.h> | |
4 | #include <linux/delay.h> | |
5 | #include <linux/spinlock.h> | |
6 | #include <linux/smp.h> | |
7 | #include <linux/kernel_stat.h> | |
8 | #include <linux/mc146818rtc.h> | |
9 | #include <linux/interrupt.h> | |
10 | ||
11 | #include <asm/mtrr.h> | |
12 | #include <asm/pgalloc.h> | |
13 | #include <asm/tlbflush.h> | |
c048fdfe GC |
14 | #include <asm/mmu_context.h> |
15 | #include <asm/proto.h> | |
16 | #include <asm/apicdef.h> | |
17 | #include <asm/idle.h> | |
5af5573e GC |
18 | |
19 | #include <mach_ipi.h> | |
c048fdfe GC |
20 | /* |
21 | * Smarter SMP flushing macros. | |
22 | * c/o Linus Torvalds. | |
23 | * | |
24 | * These mean you can really definitely utterly forget about | |
25 | * writing to user space from interrupts. (Its not allowed anyway). | |
26 | * | |
27 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | |
28 | * | |
29 | * More scalable flush, from Andi Kleen | |
30 | * | |
31 | * To avoid global state use 8 different call vectors. | |
32 | * Each CPU uses a specific vector to trigger flushes on other | |
33 | * CPUs. Depending on the received vector the target CPUs look into | |
34 | * the right per cpu variable for the flush data. | |
35 | * | |
36 | * With more than 8 CPUs they are hashed to the 8 available | |
37 | * vectors. The limited global vector space forces us to this right now. | |
38 | * In future when interrupts are split into per CPU domains this could be | |
39 | * fixed, at the cost of triggering multiple IPIs in some cases. | |
40 | */ | |
41 | ||
42 | union smp_flush_state { | |
43 | struct { | |
44 | cpumask_t flush_cpumask; | |
45 | struct mm_struct *flush_mm; | |
46 | unsigned long flush_va; | |
47 | spinlock_t tlbstate_lock; | |
48 | }; | |
49 | char pad[SMP_CACHE_BYTES]; | |
50 | } ____cacheline_aligned; | |
51 | ||
52 | /* State is put into the per CPU data section, but padded | |
53 | to a full cache line because other CPUs can access it and we don't | |
54 | want false sharing in the per cpu data segment. */ | |
55 | static DEFINE_PER_CPU(union smp_flush_state, flush_state); | |
56 | ||
57 | /* | |
58 | * We cannot call mmdrop() because we are in interrupt context, | |
59 | * instead update mm->cpu_vm_mask. | |
60 | */ | |
61 | void leave_mm(int cpu) | |
62 | { | |
63 | if (read_pda(mmu_state) == TLBSTATE_OK) | |
64 | BUG(); | |
65 | cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); | |
66 | load_cr3(swapper_pg_dir); | |
67 | } | |
68 | EXPORT_SYMBOL_GPL(leave_mm); | |
69 | ||
70 | /* | |
71 | * | |
72 | * The flush IPI assumes that a thread switch happens in this order: | |
73 | * [cpu0: the cpu that switches] | |
74 | * 1) switch_mm() either 1a) or 1b) | |
75 | * 1a) thread switch to a different mm | |
76 | * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); | |
77 | * Stop ipi delivery for the old mm. This is not synchronized with | |
78 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis | |
79 | * for the wrong mm, and in the worst case we perform a superfluous | |
80 | * tlb flush. | |
81 | * 1a2) set cpu mmu_state to TLBSTATE_OK | |
82 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 | |
83 | * was in lazy tlb mode. | |
84 | * 1a3) update cpu active_mm | |
85 | * Now cpu0 accepts tlb flushes for the new mm. | |
86 | * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); | |
87 | * Now the other cpus will send tlb flush ipis. | |
88 | * 1a4) change cr3. | |
89 | * 1b) thread switch without mm change | |
90 | * cpu active_mm is correct, cpu0 already handles | |
91 | * flush ipis. | |
92 | * 1b1) set cpu mmu_state to TLBSTATE_OK | |
93 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. | |
94 | * Atomically set the bit [other cpus will start sending flush ipis], | |
95 | * and test the bit. | |
96 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. | |
97 | * 2) switch %%esp, ie current | |
98 | * | |
99 | * The interrupt must handle 2 special cases: | |
100 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. | |
101 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only | |
102 | * runs in kernel space, the cpu could load tlb entries for user space | |
103 | * pages. | |
104 | * | |
105 | * The good news is that cpu mmu_state is local to each cpu, no | |
106 | * write/read ordering problems. | |
107 | */ | |
108 | ||
109 | /* | |
110 | * TLB flush IPI: | |
111 | * | |
112 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. | |
113 | * 2) Leave the mm if we are in the lazy tlb mode. | |
114 | * | |
115 | * Interrupts are disabled. | |
116 | */ | |
117 | ||
118 | asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) | |
119 | { | |
120 | int cpu; | |
121 | int sender; | |
122 | union smp_flush_state *f; | |
123 | ||
124 | cpu = smp_processor_id(); | |
125 | /* | |
126 | * orig_rax contains the negated interrupt vector. | |
127 | * Use that to determine where the sender put the data. | |
128 | */ | |
129 | sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; | |
130 | f = &per_cpu(flush_state, sender); | |
131 | ||
132 | if (!cpu_isset(cpu, f->flush_cpumask)) | |
133 | goto out; | |
134 | /* | |
135 | * This was a BUG() but until someone can quote me the | |
136 | * line from the intel manual that guarantees an IPI to | |
137 | * multiple CPUs is retried _only_ on the erroring CPUs | |
138 | * its staying as a return | |
139 | * | |
140 | * BUG(); | |
141 | */ | |
142 | ||
143 | if (f->flush_mm == read_pda(active_mm)) { | |
144 | if (read_pda(mmu_state) == TLBSTATE_OK) { | |
145 | if (f->flush_va == TLB_FLUSH_ALL) | |
146 | local_flush_tlb(); | |
147 | else | |
148 | __flush_tlb_one(f->flush_va); | |
149 | } else | |
150 | leave_mm(cpu); | |
151 | } | |
152 | out: | |
153 | ack_APIC_irq(); | |
154 | cpu_clear(cpu, f->flush_cpumask); | |
155 | add_pda(irq_tlb_count, 1); | |
156 | } | |
157 | ||
158 | void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, | |
159 | unsigned long va) | |
160 | { | |
161 | int sender; | |
162 | union smp_flush_state *f; | |
163 | cpumask_t cpumask = *cpumaskp; | |
164 | ||
165 | /* Caller has disabled preemption */ | |
166 | sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; | |
167 | f = &per_cpu(flush_state, sender); | |
168 | ||
169 | /* | |
170 | * Could avoid this lock when | |
171 | * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is | |
172 | * probably not worth checking this for a cache-hot lock. | |
173 | */ | |
174 | spin_lock(&f->tlbstate_lock); | |
175 | ||
176 | f->flush_mm = mm; | |
177 | f->flush_va = va; | |
178 | cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); | |
179 | ||
180 | /* | |
181 | * We have to send the IPI only to | |
182 | * CPUs affected. | |
183 | */ | |
184 | send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); | |
185 | ||
186 | while (!cpus_empty(f->flush_cpumask)) | |
187 | cpu_relax(); | |
188 | ||
189 | f->flush_mm = NULL; | |
190 | f->flush_va = 0; | |
191 | spin_unlock(&f->tlbstate_lock); | |
192 | } | |
193 | ||
a4928cff | 194 | static int __cpuinit init_smp_flush(void) |
c048fdfe GC |
195 | { |
196 | int i; | |
197 | ||
198 | for_each_cpu_mask(i, cpu_possible_map) { | |
199 | spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); | |
200 | } | |
201 | return 0; | |
202 | } | |
203 | core_initcall(init_smp_flush); | |
204 | ||
205 | void flush_tlb_current_task(void) | |
206 | { | |
207 | struct mm_struct *mm = current->mm; | |
208 | cpumask_t cpu_mask; | |
209 | ||
210 | preempt_disable(); | |
211 | cpu_mask = mm->cpu_vm_mask; | |
212 | cpu_clear(smp_processor_id(), cpu_mask); | |
213 | ||
214 | local_flush_tlb(); | |
215 | if (!cpus_empty(cpu_mask)) | |
216 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); | |
217 | preempt_enable(); | |
218 | } | |
219 | ||
220 | void flush_tlb_mm(struct mm_struct *mm) | |
221 | { | |
222 | cpumask_t cpu_mask; | |
223 | ||
224 | preempt_disable(); | |
225 | cpu_mask = mm->cpu_vm_mask; | |
226 | cpu_clear(smp_processor_id(), cpu_mask); | |
227 | ||
228 | if (current->active_mm == mm) { | |
229 | if (current->mm) | |
230 | local_flush_tlb(); | |
231 | else | |
232 | leave_mm(smp_processor_id()); | |
233 | } | |
234 | if (!cpus_empty(cpu_mask)) | |
235 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); | |
236 | ||
237 | preempt_enable(); | |
238 | } | |
239 | ||
240 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |
241 | { | |
242 | struct mm_struct *mm = vma->vm_mm; | |
243 | cpumask_t cpu_mask; | |
244 | ||
245 | preempt_disable(); | |
246 | cpu_mask = mm->cpu_vm_mask; | |
247 | cpu_clear(smp_processor_id(), cpu_mask); | |
248 | ||
249 | if (current->active_mm == mm) { | |
250 | if (current->mm) | |
251 | __flush_tlb_one(va); | |
252 | else | |
253 | leave_mm(smp_processor_id()); | |
254 | } | |
255 | ||
256 | if (!cpus_empty(cpu_mask)) | |
257 | flush_tlb_others(cpu_mask, mm, va); | |
258 | ||
259 | preempt_enable(); | |
260 | } | |
261 | ||
262 | static void do_flush_tlb_all(void *info) | |
263 | { | |
264 | unsigned long cpu = smp_processor_id(); | |
265 | ||
266 | __flush_tlb_all(); | |
267 | if (read_pda(mmu_state) == TLBSTATE_LAZY) | |
268 | leave_mm(cpu); | |
269 | } | |
270 | ||
271 | void flush_tlb_all(void) | |
272 | { | |
273 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | |
274 | } |