Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/smp.c | |
3 | * | |
4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
c97d4869 | 10 | #include <linux/module.h> |
1da177e4 LT |
11 | #include <linux/delay.h> |
12 | #include <linux/init.h> | |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/cache.h> | |
17 | #include <linux/profile.h> | |
18 | #include <linux/errno.h> | |
19 | #include <linux/mm.h> | |
4e950f6f | 20 | #include <linux/err.h> |
1da177e4 LT |
21 | #include <linux/cpu.h> |
22 | #include <linux/smp.h> | |
23 | #include <linux/seq_file.h> | |
c97d4869 | 24 | #include <linux/irq.h> |
bc28248e RK |
25 | #include <linux/percpu.h> |
26 | #include <linux/clockchips.h> | |
1da177e4 LT |
27 | |
28 | #include <asm/atomic.h> | |
29 | #include <asm/cacheflush.h> | |
30 | #include <asm/cpu.h> | |
42578c82 | 31 | #include <asm/cputype.h> |
e65f38ed RK |
32 | #include <asm/mmu_context.h> |
33 | #include <asm/pgtable.h> | |
34 | #include <asm/pgalloc.h> | |
1da177e4 LT |
35 | #include <asm/processor.h> |
36 | #include <asm/tlbflush.h> | |
37 | #include <asm/ptrace.h> | |
bc28248e | 38 | #include <asm/localtimer.h> |
1da177e4 | 39 | |
e65f38ed RK |
40 | /* |
41 | * as from 2.5, kernels no longer have an init_tasks structure | |
42 | * so we need some other way of telling a new secondary core | |
43 | * where to place its SVC stack | |
44 | */ | |
45 | struct secondary_data secondary_data; | |
46 | ||
1da177e4 LT |
47 | /* |
48 | * structures for inter-processor calls | |
49 | * - A collection of single bit ipi messages. | |
50 | */ | |
51 | struct ipi_data { | |
52 | spinlock_t lock; | |
53 | unsigned long ipi_count; | |
54 | unsigned long bits; | |
55 | }; | |
56 | ||
57 | static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { | |
58 | .lock = SPIN_LOCK_UNLOCKED, | |
59 | }; | |
60 | ||
61 | enum ipi_msg_type { | |
62 | IPI_TIMER, | |
63 | IPI_RESCHEDULE, | |
64 | IPI_CALL_FUNC, | |
f6dd9fa5 | 65 | IPI_CALL_FUNC_SINGLE, |
1da177e4 LT |
66 | IPI_CPU_STOP, |
67 | }; | |
68 | ||
bd6f68af | 69 | int __cpuinit __cpu_up(unsigned int cpu) |
1da177e4 | 70 | { |
71f512e8 RK |
71 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); |
72 | struct task_struct *idle = ci->idle; | |
e65f38ed RK |
73 | pgd_t *pgd; |
74 | pmd_t *pmd; | |
1da177e4 LT |
75 | int ret; |
76 | ||
77 | /* | |
71f512e8 RK |
78 | * Spawn a new process manually, if not already done. |
79 | * Grab a pointer to its task struct so we can mess with it | |
1da177e4 | 80 | */ |
71f512e8 RK |
81 | if (!idle) { |
82 | idle = fork_idle(cpu); | |
83 | if (IS_ERR(idle)) { | |
84 | printk(KERN_ERR "CPU%u: fork() failed\n", cpu); | |
85 | return PTR_ERR(idle); | |
86 | } | |
87 | ci->idle = idle; | |
1da177e4 LT |
88 | } |
89 | ||
e65f38ed RK |
90 | /* |
91 | * Allocate initial page tables to allow the new CPU to | |
92 | * enable the MMU safely. This essentially means a set | |
93 | * of our "standard" page tables, with the addition of | |
94 | * a 1:1 mapping for the physical address of the kernel. | |
95 | */ | |
96 | pgd = pgd_alloc(&init_mm); | |
058ddee5 | 97 | pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET); |
e65f38ed RK |
98 | *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | |
99 | PMD_TYPE_SECT | PMD_SECT_AP_WRITE); | |
e9fc7823 | 100 | flush_pmd_entry(pmd); |
e65f38ed RK |
101 | |
102 | /* | |
103 | * We need to tell the secondary core where to find | |
104 | * its stack and the page tables. | |
105 | */ | |
32d39a93 | 106 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; |
e65f38ed RK |
107 | secondary_data.pgdir = virt_to_phys(pgd); |
108 | wmb(); | |
109 | ||
1da177e4 LT |
110 | /* |
111 | * Now bring the CPU into our world. | |
112 | */ | |
113 | ret = boot_secondary(cpu, idle); | |
e65f38ed RK |
114 | if (ret == 0) { |
115 | unsigned long timeout; | |
116 | ||
117 | /* | |
118 | * CPU was successfully started, wait for it | |
119 | * to come online or time out. | |
120 | */ | |
121 | timeout = jiffies + HZ; | |
122 | while (time_before(jiffies, timeout)) { | |
123 | if (cpu_online(cpu)) | |
124 | break; | |
125 | ||
126 | udelay(10); | |
127 | barrier(); | |
128 | } | |
129 | ||
130 | if (!cpu_online(cpu)) | |
131 | ret = -EIO; | |
132 | } | |
133 | ||
5d43045b | 134 | secondary_data.stack = NULL; |
e65f38ed RK |
135 | secondary_data.pgdir = 0; |
136 | ||
058ddee5 | 137 | *pmd = __pmd(0); |
e9fc7823 | 138 | clean_pmd_entry(pmd); |
5e541973 | 139 | pgd_free(&init_mm, pgd); |
e65f38ed | 140 | |
1da177e4 | 141 | if (ret) { |
0908db22 RK |
142 | printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); |
143 | ||
1da177e4 LT |
144 | /* |
145 | * FIXME: We need to clean up the new idle thread. --rmk | |
146 | */ | |
147 | } | |
148 | ||
149 | return ret; | |
150 | } | |
151 | ||
a054a811 RK |
152 | #ifdef CONFIG_HOTPLUG_CPU |
153 | /* | |
154 | * __cpu_disable runs on the processor to be shutdown. | |
155 | */ | |
156 | int __cpuexit __cpu_disable(void) | |
157 | { | |
158 | unsigned int cpu = smp_processor_id(); | |
159 | struct task_struct *p; | |
160 | int ret; | |
161 | ||
162 | ret = mach_cpu_disable(cpu); | |
163 | if (ret) | |
164 | return ret; | |
165 | ||
166 | /* | |
167 | * Take this CPU offline. Once we clear this, we can't return, | |
168 | * and we must not schedule until we're ready to give up the cpu. | |
169 | */ | |
e03cdade | 170 | set_cpu_online(cpu, false); |
a054a811 RK |
171 | |
172 | /* | |
173 | * OK - migrate IRQs away from this CPU | |
174 | */ | |
175 | migrate_irqs(); | |
176 | ||
37ee16ae RK |
177 | /* |
178 | * Stop the local timer for this CPU. | |
179 | */ | |
ebac6546 | 180 | local_timer_stop(); |
37ee16ae | 181 | |
a054a811 RK |
182 | /* |
183 | * Flush user cache and TLB mappings, and then remove this CPU | |
184 | * from the vm mask set of all processes. | |
185 | */ | |
186 | flush_cache_all(); | |
187 | local_flush_tlb_all(); | |
188 | ||
189 | read_lock(&tasklist_lock); | |
190 | for_each_process(p) { | |
191 | if (p->mm) | |
192 | cpu_clear(cpu, p->mm->cpu_vm_mask); | |
193 | } | |
194 | read_unlock(&tasklist_lock); | |
195 | ||
196 | return 0; | |
197 | } | |
198 | ||
199 | /* | |
200 | * called on the thread which is asking for a CPU to be shutdown - | |
201 | * waits until shutdown has completed, or it is timed out. | |
202 | */ | |
203 | void __cpuexit __cpu_die(unsigned int cpu) | |
204 | { | |
205 | if (!platform_cpu_kill(cpu)) | |
206 | printk("CPU%u: unable to kill\n", cpu); | |
207 | } | |
208 | ||
209 | /* | |
210 | * Called from the idle thread for the CPU which has been shutdown. | |
211 | * | |
212 | * Note that we disable IRQs here, but do not re-enable them | |
213 | * before returning to the caller. This is also the behaviour | |
214 | * of the other hotplug-cpu capable cores, so presumably coming | |
215 | * out of idle fixes this. | |
216 | */ | |
217 | void __cpuexit cpu_die(void) | |
218 | { | |
219 | unsigned int cpu = smp_processor_id(); | |
220 | ||
221 | local_irq_disable(); | |
222 | idle_task_exit(); | |
223 | ||
224 | /* | |
225 | * actual CPU shutdown procedure is at least platform (if not | |
226 | * CPU) specific | |
227 | */ | |
228 | platform_cpu_die(cpu); | |
229 | ||
230 | /* | |
231 | * Do not return to the idle loop - jump back to the secondary | |
232 | * cpu initialisation. There's some initialisation which needs | |
233 | * to be repeated to undo the effects of taking the CPU offline. | |
234 | */ | |
235 | __asm__("mov sp, %0\n" | |
236 | " b secondary_start_kernel" | |
237 | : | |
32d39a93 | 238 | : "r" (task_stack_page(current) + THREAD_SIZE - 8)); |
a054a811 RK |
239 | } |
240 | #endif /* CONFIG_HOTPLUG_CPU */ | |
241 | ||
e65f38ed RK |
242 | /* |
243 | * This is the secondary CPU boot entry. We're using this CPUs | |
244 | * idle thread stack, but a set of temporary page tables. | |
245 | */ | |
bd6f68af | 246 | asmlinkage void __cpuinit secondary_start_kernel(void) |
e65f38ed RK |
247 | { |
248 | struct mm_struct *mm = &init_mm; | |
da2660d2 | 249 | unsigned int cpu = smp_processor_id(); |
e65f38ed RK |
250 | |
251 | printk("CPU%u: Booted secondary processor\n", cpu); | |
252 | ||
253 | /* | |
254 | * All kernel threads share the same mm context; grab a | |
255 | * reference and switch to it. | |
256 | */ | |
257 | atomic_inc(&mm->mm_users); | |
258 | atomic_inc(&mm->mm_count); | |
259 | current->active_mm = mm; | |
260 | cpu_set(cpu, mm->cpu_vm_mask); | |
261 | cpu_switch_mm(mm->pgd, mm); | |
262 | enter_lazy_tlb(mm, current); | |
505d7b19 | 263 | local_flush_tlb_all(); |
e65f38ed RK |
264 | |
265 | cpu_init(); | |
5bfb5d69 | 266 | preempt_disable(); |
e65f38ed RK |
267 | |
268 | /* | |
269 | * Give the platform a chance to do its own initialisation. | |
270 | */ | |
271 | platform_secondary_init(cpu); | |
272 | ||
273 | /* | |
274 | * Enable local interrupts. | |
275 | */ | |
e545a614 | 276 | notify_cpu_starting(cpu); |
e65f38ed RK |
277 | local_irq_enable(); |
278 | local_fiq_enable(); | |
279 | ||
a8655e83 | 280 | /* |
bc28248e | 281 | * Setup the percpu timer for this CPU. |
a8655e83 | 282 | */ |
bc28248e | 283 | percpu_timer_setup(); |
a8655e83 | 284 | |
e65f38ed RK |
285 | calibrate_delay(); |
286 | ||
287 | smp_store_cpu_info(cpu); | |
288 | ||
289 | /* | |
290 | * OK, now it's safe to let the boot CPU continue | |
291 | */ | |
e03cdade | 292 | set_cpu_online(cpu, true); |
e65f38ed RK |
293 | |
294 | /* | |
295 | * OK, it's off to the idle thread for us | |
296 | */ | |
297 | cpu_idle(); | |
298 | } | |
299 | ||
1da177e4 LT |
300 | /* |
301 | * Called by both boot and secondaries to move global data into | |
302 | * per-processor storage. | |
303 | */ | |
bd6f68af | 304 | void __cpuinit smp_store_cpu_info(unsigned int cpuid) |
1da177e4 LT |
305 | { |
306 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); | |
307 | ||
308 | cpu_info->loops_per_jiffy = loops_per_jiffy; | |
309 | } | |
310 | ||
311 | void __init smp_cpus_done(unsigned int max_cpus) | |
312 | { | |
313 | int cpu; | |
314 | unsigned long bogosum = 0; | |
315 | ||
316 | for_each_online_cpu(cpu) | |
317 | bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; | |
318 | ||
319 | printk(KERN_INFO "SMP: Total of %d processors activated " | |
320 | "(%lu.%02lu BogoMIPS).\n", | |
321 | num_online_cpus(), | |
322 | bogosum / (500000/HZ), | |
323 | (bogosum / (5000/HZ)) % 100); | |
324 | } | |
325 | ||
326 | void __init smp_prepare_boot_cpu(void) | |
327 | { | |
328 | unsigned int cpu = smp_processor_id(); | |
329 | ||
71f512e8 | 330 | per_cpu(cpu_data, cpu).idle = current; |
1da177e4 LT |
331 | } |
332 | ||
82668104 | 333 | static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) |
1da177e4 LT |
334 | { |
335 | unsigned long flags; | |
336 | unsigned int cpu; | |
337 | ||
338 | local_irq_save(flags); | |
339 | ||
82668104 | 340 | for_each_cpu(cpu, mask) { |
1da177e4 LT |
341 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); |
342 | ||
343 | spin_lock(&ipi->lock); | |
344 | ipi->bits |= 1 << msg; | |
345 | spin_unlock(&ipi->lock); | |
346 | } | |
347 | ||
348 | /* | |
349 | * Call the platform specific cross-CPU call function. | |
350 | */ | |
82668104 | 351 | smp_cross_call(mask); |
1da177e4 LT |
352 | |
353 | local_irq_restore(flags); | |
354 | } | |
355 | ||
82668104 | 356 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
1da177e4 | 357 | { |
f6dd9fa5 | 358 | send_ipi_message(mask, IPI_CALL_FUNC); |
1da177e4 LT |
359 | } |
360 | ||
f6dd9fa5 | 361 | void arch_send_call_function_single_ipi(int cpu) |
3e459990 | 362 | { |
82668104 | 363 | send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); |
3e459990 | 364 | } |
3e459990 | 365 | |
1da177e4 LT |
366 | void show_ipi_list(struct seq_file *p) |
367 | { | |
368 | unsigned int cpu; | |
369 | ||
370 | seq_puts(p, "IPI:"); | |
371 | ||
e11b2236 | 372 | for_each_present_cpu(cpu) |
1da177e4 LT |
373 | seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); |
374 | ||
375 | seq_putc(p, '\n'); | |
376 | } | |
377 | ||
37ee16ae RK |
378 | void show_local_irqs(struct seq_file *p) |
379 | { | |
380 | unsigned int cpu; | |
381 | ||
382 | seq_printf(p, "LOC: "); | |
383 | ||
384 | for_each_present_cpu(cpu) | |
385 | seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs); | |
386 | ||
387 | seq_putc(p, '\n'); | |
388 | } | |
389 | ||
bc28248e RK |
390 | /* |
391 | * Timer (local or broadcast) support | |
392 | */ | |
393 | static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); | |
394 | ||
c97d4869 | 395 | static void ipi_timer(void) |
1da177e4 | 396 | { |
bc28248e | 397 | struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); |
1da177e4 | 398 | irq_enter(); |
bc28248e | 399 | evt->event_handler(evt); |
1da177e4 LT |
400 | irq_exit(); |
401 | } | |
402 | ||
37ee16ae | 403 | #ifdef CONFIG_LOCAL_TIMERS |
b9811d7f | 404 | asmlinkage void __exception do_local_timer(struct pt_regs *regs) |
37ee16ae | 405 | { |
c97d4869 | 406 | struct pt_regs *old_regs = set_irq_regs(regs); |
37ee16ae RK |
407 | int cpu = smp_processor_id(); |
408 | ||
409 | if (local_timer_ack()) { | |
410 | irq_stat[cpu].local_timer_irqs++; | |
c97d4869 | 411 | ipi_timer(); |
37ee16ae | 412 | } |
c97d4869 RK |
413 | |
414 | set_irq_regs(old_regs); | |
37ee16ae RK |
415 | } |
416 | #endif | |
417 | ||
bc28248e RK |
418 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
419 | static void smp_timer_broadcast(const struct cpumask *mask) | |
420 | { | |
421 | send_ipi_message(mask, IPI_TIMER); | |
422 | } | |
423 | ||
424 | static void broadcast_timer_set_mode(enum clock_event_mode mode, | |
425 | struct clock_event_device *evt) | |
426 | { | |
427 | } | |
428 | ||
429 | static void local_timer_setup(struct clock_event_device *evt) | |
430 | { | |
431 | evt->name = "dummy_timer"; | |
432 | evt->features = CLOCK_EVT_FEAT_ONESHOT | | |
433 | CLOCK_EVT_FEAT_PERIODIC | | |
434 | CLOCK_EVT_FEAT_DUMMY; | |
435 | evt->rating = 400; | |
436 | evt->mult = 1; | |
437 | evt->set_mode = broadcast_timer_set_mode; | |
438 | evt->broadcast = smp_timer_broadcast; | |
439 | ||
440 | clockevents_register_device(evt); | |
441 | } | |
442 | #endif | |
443 | ||
444 | void __cpuinit percpu_timer_setup(void) | |
445 | { | |
446 | unsigned int cpu = smp_processor_id(); | |
447 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); | |
448 | ||
449 | evt->cpumask = cpumask_of(cpu); | |
450 | ||
451 | local_timer_setup(evt); | |
452 | } | |
453 | ||
1da177e4 LT |
454 | static DEFINE_SPINLOCK(stop_lock); |
455 | ||
456 | /* | |
457 | * ipi_cpu_stop - handle IPI from smp_send_stop() | |
458 | */ | |
459 | static void ipi_cpu_stop(unsigned int cpu) | |
460 | { | |
461 | spin_lock(&stop_lock); | |
462 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); | |
463 | dump_stack(); | |
464 | spin_unlock(&stop_lock); | |
465 | ||
e03cdade | 466 | set_cpu_online(cpu, false); |
1da177e4 LT |
467 | |
468 | local_fiq_disable(); | |
469 | local_irq_disable(); | |
470 | ||
471 | while (1) | |
472 | cpu_relax(); | |
473 | } | |
474 | ||
475 | /* | |
476 | * Main handler for inter-processor interrupts | |
477 | * | |
478 | * For ARM, the ipimask now only identifies a single | |
479 | * category of IPI (Bit 1 IPIs have been replaced by a | |
480 | * different mechanism): | |
481 | * | |
482 | * Bit 0 - Inter-processor function call | |
483 | */ | |
b9811d7f | 484 | asmlinkage void __exception do_IPI(struct pt_regs *regs) |
1da177e4 LT |
485 | { |
486 | unsigned int cpu = smp_processor_id(); | |
487 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | |
c97d4869 | 488 | struct pt_regs *old_regs = set_irq_regs(regs); |
1da177e4 LT |
489 | |
490 | ipi->ipi_count++; | |
491 | ||
492 | for (;;) { | |
493 | unsigned long msgs; | |
494 | ||
495 | spin_lock(&ipi->lock); | |
496 | msgs = ipi->bits; | |
497 | ipi->bits = 0; | |
498 | spin_unlock(&ipi->lock); | |
499 | ||
500 | if (!msgs) | |
501 | break; | |
502 | ||
503 | do { | |
504 | unsigned nextmsg; | |
505 | ||
506 | nextmsg = msgs & -msgs; | |
507 | msgs &= ~nextmsg; | |
508 | nextmsg = ffz(~nextmsg); | |
509 | ||
510 | switch (nextmsg) { | |
511 | case IPI_TIMER: | |
c97d4869 | 512 | ipi_timer(); |
1da177e4 LT |
513 | break; |
514 | ||
515 | case IPI_RESCHEDULE: | |
516 | /* | |
517 | * nothing more to do - eveything is | |
518 | * done on the interrupt return path | |
519 | */ | |
520 | break; | |
521 | ||
522 | case IPI_CALL_FUNC: | |
f6dd9fa5 JA |
523 | generic_smp_call_function_interrupt(); |
524 | break; | |
525 | ||
526 | case IPI_CALL_FUNC_SINGLE: | |
527 | generic_smp_call_function_single_interrupt(); | |
1da177e4 LT |
528 | break; |
529 | ||
530 | case IPI_CPU_STOP: | |
531 | ipi_cpu_stop(cpu); | |
532 | break; | |
533 | ||
534 | default: | |
535 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", | |
536 | cpu, nextmsg); | |
537 | break; | |
538 | } | |
539 | } while (msgs); | |
540 | } | |
c97d4869 RK |
541 | |
542 | set_irq_regs(old_regs); | |
1da177e4 LT |
543 | } |
544 | ||
545 | void smp_send_reschedule(int cpu) | |
546 | { | |
82668104 | 547 | send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); |
1da177e4 LT |
548 | } |
549 | ||
1da177e4 LT |
550 | void smp_send_stop(void) |
551 | { | |
552 | cpumask_t mask = cpu_online_map; | |
553 | cpu_clear(smp_processor_id(), mask); | |
82668104 | 554 | send_ipi_message(&mask, IPI_CPU_STOP); |
1da177e4 LT |
555 | } |
556 | ||
557 | /* | |
558 | * not supported here | |
559 | */ | |
5048bcba | 560 | int setup_profiling_timer(unsigned int multiplier) |
1da177e4 LT |
561 | { |
562 | return -EINVAL; | |
563 | } | |
4b0ef3b1 | 564 | |
82668104 RK |
565 | static void |
566 | on_each_cpu_mask(void (*func)(void *), void *info, int wait, | |
567 | const struct cpumask *mask) | |
4b0ef3b1 | 568 | { |
4b0ef3b1 RK |
569 | preempt_disable(); |
570 | ||
82668104 RK |
571 | smp_call_function_many(mask, func, info, wait); |
572 | if (cpumask_test_cpu(smp_processor_id(), mask)) | |
4b0ef3b1 RK |
573 | func(info); |
574 | ||
575 | preempt_enable(); | |
4b0ef3b1 RK |
576 | } |
577 | ||
578 | /**********************************************************************/ | |
579 | ||
580 | /* | |
581 | * TLB operations | |
582 | */ | |
583 | struct tlb_args { | |
584 | struct vm_area_struct *ta_vma; | |
585 | unsigned long ta_start; | |
586 | unsigned long ta_end; | |
587 | }; | |
588 | ||
faa7bc51 CM |
589 | /* all SMP configurations have the extended CPUID registers */ |
590 | static inline int tlb_ops_need_broadcast(void) | |
591 | { | |
592 | return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; | |
593 | } | |
594 | ||
4b0ef3b1 RK |
595 | static inline void ipi_flush_tlb_all(void *ignored) |
596 | { | |
597 | local_flush_tlb_all(); | |
598 | } | |
599 | ||
600 | static inline void ipi_flush_tlb_mm(void *arg) | |
601 | { | |
602 | struct mm_struct *mm = (struct mm_struct *)arg; | |
603 | ||
604 | local_flush_tlb_mm(mm); | |
605 | } | |
606 | ||
607 | static inline void ipi_flush_tlb_page(void *arg) | |
608 | { | |
609 | struct tlb_args *ta = (struct tlb_args *)arg; | |
610 | ||
611 | local_flush_tlb_page(ta->ta_vma, ta->ta_start); | |
612 | } | |
613 | ||
614 | static inline void ipi_flush_tlb_kernel_page(void *arg) | |
615 | { | |
616 | struct tlb_args *ta = (struct tlb_args *)arg; | |
617 | ||
618 | local_flush_tlb_kernel_page(ta->ta_start); | |
619 | } | |
620 | ||
621 | static inline void ipi_flush_tlb_range(void *arg) | |
622 | { | |
623 | struct tlb_args *ta = (struct tlb_args *)arg; | |
624 | ||
625 | local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); | |
626 | } | |
627 | ||
628 | static inline void ipi_flush_tlb_kernel_range(void *arg) | |
629 | { | |
630 | struct tlb_args *ta = (struct tlb_args *)arg; | |
631 | ||
632 | local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); | |
633 | } | |
634 | ||
635 | void flush_tlb_all(void) | |
636 | { | |
faa7bc51 CM |
637 | if (tlb_ops_need_broadcast()) |
638 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); | |
639 | else | |
640 | local_flush_tlb_all(); | |
4b0ef3b1 RK |
641 | } |
642 | ||
643 | void flush_tlb_mm(struct mm_struct *mm) | |
644 | { | |
faa7bc51 CM |
645 | if (tlb_ops_need_broadcast()) |
646 | on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); | |
647 | else | |
648 | local_flush_tlb_mm(mm); | |
4b0ef3b1 RK |
649 | } |
650 | ||
651 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | |
652 | { | |
faa7bc51 CM |
653 | if (tlb_ops_need_broadcast()) { |
654 | struct tlb_args ta; | |
655 | ta.ta_vma = vma; | |
656 | ta.ta_start = uaddr; | |
657 | on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); | |
658 | } else | |
659 | local_flush_tlb_page(vma, uaddr); | |
4b0ef3b1 RK |
660 | } |
661 | ||
662 | void flush_tlb_kernel_page(unsigned long kaddr) | |
663 | { | |
faa7bc51 CM |
664 | if (tlb_ops_need_broadcast()) { |
665 | struct tlb_args ta; | |
666 | ta.ta_start = kaddr; | |
667 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); | |
668 | } else | |
669 | local_flush_tlb_kernel_page(kaddr); | |
4b0ef3b1 RK |
670 | } |
671 | ||
672 | void flush_tlb_range(struct vm_area_struct *vma, | |
673 | unsigned long start, unsigned long end) | |
674 | { | |
faa7bc51 CM |
675 | if (tlb_ops_need_broadcast()) { |
676 | struct tlb_args ta; | |
677 | ta.ta_vma = vma; | |
678 | ta.ta_start = start; | |
679 | ta.ta_end = end; | |
680 | on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); | |
681 | } else | |
682 | local_flush_tlb_range(vma, start, end); | |
4b0ef3b1 RK |
683 | } |
684 | ||
685 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
686 | { | |
faa7bc51 CM |
687 | if (tlb_ops_need_broadcast()) { |
688 | struct tlb_args ta; | |
689 | ta.ta_start = start; | |
690 | ta.ta_end = end; | |
691 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); | |
692 | } else | |
693 | local_flush_tlb_kernel_range(start, end); | |
4b0ef3b1 | 694 | } |