Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/alpha/kernel/smp.c | |
3 | * | |
4 | * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com) | |
5 | * Renamed modified smp_call_function to smp_call_function_on_cpu() | |
6 | * Created an function that conforms to the old calling convention | |
7 | * of smp_call_function(). | |
8 | * | |
9 | * This is helpful for DCPI. | |
10 | * | |
11 | */ | |
12 | ||
13 | #include <linux/errno.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/kernel_stat.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/threads.h> | |
20 | #include <linux/smp.h> | |
1da177e4 LT |
21 | #include <linux/interrupt.h> |
22 | #include <linux/init.h> | |
23 | #include <linux/delay.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/irq.h> | |
26 | #include <linux/cache.h> | |
27 | #include <linux/profile.h> | |
28 | #include <linux/bitops.h> | |
29 | ||
30 | #include <asm/hwrpb.h> | |
31 | #include <asm/ptrace.h> | |
32 | #include <asm/atomic.h> | |
33 | ||
34 | #include <asm/io.h> | |
35 | #include <asm/irq.h> | |
36 | #include <asm/pgtable.h> | |
37 | #include <asm/pgalloc.h> | |
38 | #include <asm/mmu_context.h> | |
39 | #include <asm/tlbflush.h> | |
40 | ||
41 | #include "proto.h" | |
42 | #include "irq_impl.h" | |
43 | ||
44 | ||
45 | #define DEBUG_SMP 0 | |
46 | #if DEBUG_SMP | |
47 | #define DBGS(args) printk args | |
48 | #else | |
49 | #define DBGS(args) | |
50 | #endif | |
51 | ||
52 | /* A collection of per-processor data. */ | |
53 | struct cpuinfo_alpha cpu_data[NR_CPUS]; | |
cff52daf | 54 | EXPORT_SYMBOL(cpu_data); |
1da177e4 LT |
55 | |
56 | /* A collection of single bit ipi messages. */ | |
57 | static struct { | |
58 | unsigned long bits ____cacheline_aligned; | |
59 | } ipi_data[NR_CPUS] __cacheline_aligned; | |
60 | ||
61 | enum ipi_message_type { | |
62 | IPI_RESCHEDULE, | |
63 | IPI_CALL_FUNC, | |
64 | IPI_CPU_STOP, | |
65 | }; | |
66 | ||
67 | /* Set to a secondary's cpuid when it comes online. */ | |
cc040a8a | 68 | static int smp_secondary_alive __devinitdata = 0; |
1da177e4 LT |
69 | |
70 | /* Which cpus ids came online. */ | |
1da177e4 LT |
71 | cpumask_t cpu_online_map; |
72 | ||
73 | EXPORT_SYMBOL(cpu_online_map); | |
74 | ||
1da177e4 LT |
75 | int smp_num_probed; /* Internal processor count */ |
76 | int smp_num_cpus = 1; /* Number that came online. */ | |
cff52daf | 77 | EXPORT_SYMBOL(smp_num_cpus); |
1da177e4 LT |
78 | |
79 | extern void calibrate_delay(void); | |
80 | ||
81 | \f | |
82 | ||
83 | /* | |
84 | * Called by both boot and secondaries to move global data into | |
85 | * per-processor storage. | |
86 | */ | |
87 | static inline void __init | |
88 | smp_store_cpu_info(int cpuid) | |
89 | { | |
90 | cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy; | |
91 | cpu_data[cpuid].last_asn = ASN_FIRST_VERSION; | |
92 | cpu_data[cpuid].need_new_asn = 0; | |
93 | cpu_data[cpuid].asn_lock = 0; | |
94 | } | |
95 | ||
96 | /* | |
97 | * Ideally sets up per-cpu profiling hooks. Doesn't do much now... | |
98 | */ | |
99 | static inline void __init | |
100 | smp_setup_percpu_timer(int cpuid) | |
101 | { | |
102 | cpu_data[cpuid].prof_counter = 1; | |
103 | cpu_data[cpuid].prof_multiplier = 1; | |
104 | } | |
105 | ||
106 | static void __init | |
107 | wait_boot_cpu_to_stop(int cpuid) | |
108 | { | |
109 | unsigned long stop = jiffies + 10*HZ; | |
110 | ||
111 | while (time_before(jiffies, stop)) { | |
112 | if (!smp_secondary_alive) | |
113 | return; | |
114 | barrier(); | |
115 | } | |
116 | ||
117 | printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid); | |
118 | for (;;) | |
119 | barrier(); | |
120 | } | |
121 | ||
122 | /* | |
123 | * Where secondaries begin a life of C. | |
124 | */ | |
125 | void __init | |
126 | smp_callin(void) | |
127 | { | |
128 | int cpuid = hard_smp_processor_id(); | |
129 | ||
130 | if (cpu_test_and_set(cpuid, cpu_online_map)) { | |
131 | printk("??, cpu 0x%x already present??\n", cpuid); | |
132 | BUG(); | |
133 | } | |
134 | ||
135 | /* Turn on machine checks. */ | |
136 | wrmces(7); | |
137 | ||
138 | /* Set trap vectors. */ | |
139 | trap_init(); | |
140 | ||
141 | /* Set interrupt vector. */ | |
142 | wrent(entInt, 0); | |
143 | ||
144 | /* Get our local ticker going. */ | |
145 | smp_setup_percpu_timer(cpuid); | |
146 | ||
147 | /* Call platform-specific callin, if specified */ | |
148 | if (alpha_mv.smp_callin) alpha_mv.smp_callin(); | |
149 | ||
150 | /* All kernel threads share the same mm context. */ | |
151 | atomic_inc(&init_mm.mm_count); | |
152 | current->active_mm = &init_mm; | |
153 | ||
154 | /* Must have completely accurate bogos. */ | |
155 | local_irq_enable(); | |
156 | ||
157 | /* Wait boot CPU to stop with irq enabled before running | |
158 | calibrate_delay. */ | |
159 | wait_boot_cpu_to_stop(cpuid); | |
160 | mb(); | |
161 | calibrate_delay(); | |
162 | ||
163 | smp_store_cpu_info(cpuid); | |
164 | /* Allow master to continue only after we written loops_per_jiffy. */ | |
165 | wmb(); | |
166 | smp_secondary_alive = 1; | |
167 | ||
168 | DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n", | |
169 | cpuid, current, current->active_mm)); | |
170 | ||
171 | /* Do nothing. */ | |
172 | cpu_idle(); | |
173 | } | |
174 | ||
175 | /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */ | |
cc040a8a | 176 | static int __devinit |
1da177e4 LT |
177 | wait_for_txrdy (unsigned long cpumask) |
178 | { | |
179 | unsigned long timeout; | |
180 | ||
181 | if (!(hwrpb->txrdy & cpumask)) | |
182 | return 0; | |
183 | ||
184 | timeout = jiffies + 10*HZ; | |
185 | while (time_before(jiffies, timeout)) { | |
186 | if (!(hwrpb->txrdy & cpumask)) | |
187 | return 0; | |
188 | udelay(10); | |
189 | barrier(); | |
190 | } | |
191 | ||
192 | return -1; | |
193 | } | |
194 | ||
195 | /* | |
196 | * Send a message to a secondary's console. "START" is one such | |
197 | * interesting message. ;-) | |
198 | */ | |
199 | static void __init | |
200 | send_secondary_console_msg(char *str, int cpuid) | |
201 | { | |
202 | struct percpu_struct *cpu; | |
203 | register char *cp1, *cp2; | |
204 | unsigned long cpumask; | |
205 | size_t len; | |
206 | ||
207 | cpu = (struct percpu_struct *) | |
208 | ((char*)hwrpb | |
209 | + hwrpb->processor_offset | |
210 | + cpuid * hwrpb->processor_size); | |
211 | ||
212 | cpumask = (1UL << cpuid); | |
213 | if (wait_for_txrdy(cpumask)) | |
214 | goto timeout; | |
215 | ||
216 | cp2 = str; | |
217 | len = strlen(cp2); | |
218 | *(unsigned int *)&cpu->ipc_buffer[0] = len; | |
219 | cp1 = (char *) &cpu->ipc_buffer[1]; | |
220 | memcpy(cp1, cp2, len); | |
221 | ||
222 | /* atomic test and set */ | |
223 | wmb(); | |
224 | set_bit(cpuid, &hwrpb->rxrdy); | |
225 | ||
226 | if (wait_for_txrdy(cpumask)) | |
227 | goto timeout; | |
228 | return; | |
229 | ||
230 | timeout: | |
231 | printk("Processor %x not ready\n", cpuid); | |
232 | } | |
233 | ||
234 | /* | |
235 | * A secondary console wants to send a message. Receive it. | |
236 | */ | |
237 | static void | |
238 | recv_secondary_console_msg(void) | |
239 | { | |
240 | int mycpu, i, cnt; | |
241 | unsigned long txrdy = hwrpb->txrdy; | |
242 | char *cp1, *cp2, buf[80]; | |
243 | struct percpu_struct *cpu; | |
244 | ||
245 | DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy)); | |
246 | ||
247 | mycpu = hard_smp_processor_id(); | |
248 | ||
249 | for (i = 0; i < NR_CPUS; i++) { | |
250 | if (!(txrdy & (1UL << i))) | |
251 | continue; | |
252 | ||
253 | DBGS(("recv_secondary_console_msg: " | |
254 | "TXRDY contains CPU %d.\n", i)); | |
255 | ||
256 | cpu = (struct percpu_struct *) | |
257 | ((char*)hwrpb | |
258 | + hwrpb->processor_offset | |
259 | + i * hwrpb->processor_size); | |
260 | ||
261 | DBGS(("recv_secondary_console_msg: on %d from %d" | |
262 | " HALT_REASON 0x%lx FLAGS 0x%lx\n", | |
263 | mycpu, i, cpu->halt_reason, cpu->flags)); | |
264 | ||
265 | cnt = cpu->ipc_buffer[0] >> 32; | |
266 | if (cnt <= 0 || cnt >= 80) | |
267 | strcpy(buf, "<<< BOGUS MSG >>>"); | |
268 | else { | |
269 | cp1 = (char *) &cpu->ipc_buffer[11]; | |
270 | cp2 = buf; | |
271 | strcpy(cp2, cp1); | |
272 | ||
273 | while ((cp2 = strchr(cp2, '\r')) != 0) { | |
274 | *cp2 = ' '; | |
275 | if (cp2[1] == '\n') | |
276 | cp2[1] = ' '; | |
277 | } | |
278 | } | |
279 | ||
280 | DBGS((KERN_INFO "recv_secondary_console_msg: on %d " | |
281 | "message is '%s'\n", mycpu, buf)); | |
282 | } | |
283 | ||
284 | hwrpb->txrdy = 0; | |
285 | } | |
286 | ||
287 | /* | |
288 | * Convince the console to have a secondary cpu begin execution. | |
289 | */ | |
290 | static int __init | |
291 | secondary_cpu_start(int cpuid, struct task_struct *idle) | |
292 | { | |
293 | struct percpu_struct *cpu; | |
294 | struct pcb_struct *hwpcb, *ipcb; | |
295 | unsigned long timeout; | |
296 | ||
297 | cpu = (struct percpu_struct *) | |
298 | ((char*)hwrpb | |
299 | + hwrpb->processor_offset | |
300 | + cpuid * hwrpb->processor_size); | |
301 | hwpcb = (struct pcb_struct *) cpu->hwpcb; | |
37bfbaf9 | 302 | ipcb = &task_thread_info(idle)->pcb; |
1da177e4 LT |
303 | |
304 | /* Initialize the CPU's HWPCB to something just good enough for | |
305 | us to get started. Immediately after starting, we'll swpctx | |
306 | to the target idle task's pcb. Reuse the stack in the mean | |
307 | time. Precalculate the target PCBB. */ | |
308 | hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16; | |
309 | hwpcb->usp = 0; | |
310 | hwpcb->ptbr = ipcb->ptbr; | |
311 | hwpcb->pcc = 0; | |
312 | hwpcb->asn = 0; | |
313 | hwpcb->unique = virt_to_phys(ipcb); | |
314 | hwpcb->flags = ipcb->flags; | |
315 | hwpcb->res1 = hwpcb->res2 = 0; | |
316 | ||
317 | #if 0 | |
318 | DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n", | |
319 | hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique)); | |
320 | #endif | |
321 | DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n", | |
322 | cpuid, idle->state, ipcb->flags)); | |
323 | ||
324 | /* Setup HWRPB fields that SRM uses to activate secondary CPU */ | |
325 | hwrpb->CPU_restart = __smp_callin; | |
326 | hwrpb->CPU_restart_data = (unsigned long) __smp_callin; | |
327 | ||
328 | /* Recalculate and update the HWRPB checksum */ | |
329 | hwrpb_update_checksum(hwrpb); | |
330 | ||
331 | /* | |
332 | * Send a "start" command to the specified processor. | |
333 | */ | |
334 | ||
335 | /* SRM III 3.4.1.3 */ | |
336 | cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */ | |
337 | cpu->flags &= ~1; /* turn off Bootstrap In Progress */ | |
338 | wmb(); | |
339 | ||
340 | send_secondary_console_msg("START\r\n", cpuid); | |
341 | ||
342 | /* Wait 10 seconds for an ACK from the console. */ | |
343 | timeout = jiffies + 10*HZ; | |
344 | while (time_before(jiffies, timeout)) { | |
345 | if (cpu->flags & 1) | |
346 | goto started; | |
347 | udelay(10); | |
348 | barrier(); | |
349 | } | |
350 | printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid); | |
351 | return -1; | |
352 | ||
353 | started: | |
354 | DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid)); | |
355 | return 0; | |
356 | } | |
357 | ||
358 | /* | |
359 | * Bring one cpu online. | |
360 | */ | |
cc040a8a | 361 | static int __devinit |
1da177e4 LT |
362 | smp_boot_one_cpu(int cpuid) |
363 | { | |
364 | struct task_struct *idle; | |
365 | unsigned long timeout; | |
366 | ||
367 | /* Cook up an idler for this guy. Note that the address we | |
368 | give to kernel_thread is irrelevant -- it's going to start | |
369 | where HWRPB.CPU_restart says to start. But this gets all | |
370 | the other task-y sort of data structures set up like we | |
371 | wish. We can't use kernel_thread since we must avoid | |
372 | rescheduling the child. */ | |
373 | idle = fork_idle(cpuid); | |
374 | if (IS_ERR(idle)) | |
375 | panic("failed fork for CPU %d", cpuid); | |
376 | ||
377 | DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n", | |
378 | cpuid, idle->state, idle->flags)); | |
379 | ||
380 | /* Signal the secondary to wait a moment. */ | |
381 | smp_secondary_alive = -1; | |
382 | ||
383 | /* Whirrr, whirrr, whirrrrrrrrr... */ | |
384 | if (secondary_cpu_start(cpuid, idle)) | |
385 | return -1; | |
386 | ||
387 | /* Notify the secondary CPU it can run calibrate_delay. */ | |
388 | mb(); | |
389 | smp_secondary_alive = 0; | |
390 | ||
391 | /* We've been acked by the console; wait one second for | |
392 | the task to start up for real. */ | |
393 | timeout = jiffies + 1*HZ; | |
394 | while (time_before(jiffies, timeout)) { | |
395 | if (smp_secondary_alive == 1) | |
396 | goto alive; | |
397 | udelay(10); | |
398 | barrier(); | |
399 | } | |
400 | ||
401 | /* We failed to boot the CPU. */ | |
402 | ||
403 | printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid); | |
404 | return -1; | |
405 | ||
406 | alive: | |
407 | /* Another "Red Snapper". */ | |
408 | return 0; | |
409 | } | |
410 | ||
411 | /* | |
412 | * Called from setup_arch. Detect an SMP system and which processors | |
413 | * are present. | |
414 | */ | |
415 | void __init | |
416 | setup_smp(void) | |
417 | { | |
418 | struct percpu_struct *cpubase, *cpu; | |
419 | unsigned long i; | |
420 | ||
421 | if (boot_cpuid != 0) { | |
422 | printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n", | |
423 | boot_cpuid); | |
424 | } | |
425 | ||
426 | if (hwrpb->nr_processors > 1) { | |
427 | int boot_cpu_palrev; | |
428 | ||
429 | DBGS(("setup_smp: nr_processors %ld\n", | |
430 | hwrpb->nr_processors)); | |
431 | ||
432 | cpubase = (struct percpu_struct *) | |
433 | ((char*)hwrpb + hwrpb->processor_offset); | |
434 | boot_cpu_palrev = cpubase->pal_revision; | |
435 | ||
436 | for (i = 0; i < hwrpb->nr_processors; i++) { | |
437 | cpu = (struct percpu_struct *) | |
438 | ((char *)cpubase + i*hwrpb->processor_size); | |
439 | if ((cpu->flags & 0x1cc) == 0x1cc) { | |
440 | smp_num_probed++; | |
441 | /* Assume here that "whami" == index */ | |
c7d2d28b | 442 | cpu_set(i, cpu_present_map); |
1da177e4 LT |
443 | cpu->pal_revision = boot_cpu_palrev; |
444 | } | |
445 | ||
446 | DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n", | |
447 | i, cpu->flags, cpu->type)); | |
448 | DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n", | |
449 | i, cpu->pal_revision)); | |
450 | } | |
451 | } else { | |
452 | smp_num_probed = 1; | |
1da177e4 | 453 | } |
1da177e4 | 454 | |
c7d2d28b IK |
455 | printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n", |
456 | smp_num_probed, cpu_present_map.bits[0]); | |
1da177e4 LT |
457 | } |
458 | ||
459 | /* | |
460 | * Called by smp_init prepare the secondaries | |
461 | */ | |
462 | void __init | |
463 | smp_prepare_cpus(unsigned int max_cpus) | |
464 | { | |
1da177e4 LT |
465 | /* Take care of some initial bookkeeping. */ |
466 | memset(ipi_data, 0, sizeof(ipi_data)); | |
467 | ||
468 | current_thread_info()->cpu = boot_cpuid; | |
469 | ||
470 | smp_store_cpu_info(boot_cpuid); | |
471 | smp_setup_percpu_timer(boot_cpuid); | |
472 | ||
473 | /* Nothing to do on a UP box, or when told not to. */ | |
474 | if (smp_num_probed == 1 || max_cpus == 0) { | |
c7d2d28b | 475 | cpu_present_map = cpumask_of_cpu(boot_cpuid); |
1da177e4 LT |
476 | printk(KERN_INFO "SMP mode deactivated.\n"); |
477 | return; | |
478 | } | |
479 | ||
480 | printk(KERN_INFO "SMP starting up secondaries.\n"); | |
481 | ||
328c2a8a | 482 | smp_num_cpus = smp_num_probed; |
1da177e4 LT |
483 | } |
484 | ||
485 | void __devinit | |
486 | smp_prepare_boot_cpu(void) | |
487 | { | |
1da177e4 LT |
488 | } |
489 | ||
490 | int __devinit | |
491 | __cpu_up(unsigned int cpu) | |
492 | { | |
493 | smp_boot_one_cpu(cpu); | |
494 | ||
495 | return cpu_online(cpu) ? 0 : -ENOSYS; | |
496 | } | |
497 | ||
498 | void __init | |
499 | smp_cpus_done(unsigned int max_cpus) | |
500 | { | |
501 | int cpu; | |
502 | unsigned long bogosum = 0; | |
503 | ||
504 | for(cpu = 0; cpu < NR_CPUS; cpu++) | |
505 | if (cpu_online(cpu)) | |
506 | bogosum += cpu_data[cpu].loops_per_jiffy; | |
507 | ||
508 | printk(KERN_INFO "SMP: Total of %d processors activated " | |
509 | "(%lu.%02lu BogoMIPS).\n", | |
510 | num_online_cpus(), | |
511 | (bogosum + 2500) / (500000/HZ), | |
512 | ((bogosum + 2500) / (5000/HZ)) % 100); | |
513 | } | |
514 | ||
515 | \f | |
516 | void | |
517 | smp_percpu_timer_interrupt(struct pt_regs *regs) | |
518 | { | |
8774cb81 | 519 | struct pt_regs *old_regs; |
1da177e4 LT |
520 | int cpu = smp_processor_id(); |
521 | unsigned long user = user_mode(regs); | |
522 | struct cpuinfo_alpha *data = &cpu_data[cpu]; | |
523 | ||
8774cb81 AV |
524 | old_regs = set_irq_regs(regs); |
525 | ||
1da177e4 | 526 | /* Record kernel PC. */ |
8774cb81 | 527 | profile_tick(CPU_PROFILING); |
1da177e4 LT |
528 | |
529 | if (!--data->prof_counter) { | |
530 | /* We need to make like a normal interrupt -- otherwise | |
531 | timer interrupts ignore the global interrupt lock, | |
532 | which would be a Bad Thing. */ | |
533 | irq_enter(); | |
534 | ||
535 | update_process_times(user); | |
536 | ||
537 | data->prof_counter = data->prof_multiplier; | |
538 | ||
539 | irq_exit(); | |
540 | } | |
8774cb81 | 541 | set_irq_regs(old_regs); |
1da177e4 LT |
542 | } |
543 | ||
544 | int __init | |
545 | setup_profiling_timer(unsigned int multiplier) | |
546 | { | |
547 | return -EINVAL; | |
548 | } | |
549 | ||
550 | \f | |
551 | static void | |
552 | send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation) | |
553 | { | |
554 | int i; | |
555 | ||
556 | mb(); | |
557 | for_each_cpu_mask(i, to_whom) | |
558 | set_bit(operation, &ipi_data[i].bits); | |
559 | ||
560 | mb(); | |
561 | for_each_cpu_mask(i, to_whom) | |
562 | wripir(i); | |
563 | } | |
564 | ||
565 | /* Structure and data for smp_call_function. This is designed to | |
566 | minimize static memory requirements. Plus it looks cleaner. */ | |
567 | ||
568 | struct smp_call_struct { | |
569 | void (*func) (void *info); | |
570 | void *info; | |
571 | long wait; | |
572 | atomic_t unstarted_count; | |
573 | atomic_t unfinished_count; | |
574 | }; | |
575 | ||
576 | static struct smp_call_struct *smp_call_function_data; | |
577 | ||
578 | /* Atomicly drop data into a shared pointer. The pointer is free if | |
579 | it is initially locked. If retry, spin until free. */ | |
580 | ||
581 | static int | |
582 | pointer_lock (void *lock, void *data, int retry) | |
583 | { | |
584 | void *old, *tmp; | |
585 | ||
586 | mb(); | |
587 | again: | |
588 | /* Compare and swap with zero. */ | |
589 | asm volatile ( | |
590 | "1: ldq_l %0,%1\n" | |
591 | " mov %3,%2\n" | |
592 | " bne %0,2f\n" | |
593 | " stq_c %2,%1\n" | |
594 | " beq %2,1b\n" | |
595 | "2:" | |
596 | : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp) | |
597 | : "r"(data) | |
598 | : "memory"); | |
599 | ||
600 | if (old == 0) | |
601 | return 0; | |
602 | if (! retry) | |
603 | return -EBUSY; | |
604 | ||
605 | while (*(void **)lock) | |
606 | barrier(); | |
607 | goto again; | |
608 | } | |
609 | ||
610 | void | |
611 | handle_ipi(struct pt_regs *regs) | |
612 | { | |
613 | int this_cpu = smp_processor_id(); | |
614 | unsigned long *pending_ipis = &ipi_data[this_cpu].bits; | |
615 | unsigned long ops; | |
616 | ||
617 | #if 0 | |
618 | DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n", | |
619 | this_cpu, *pending_ipis, regs->pc)); | |
620 | #endif | |
621 | ||
622 | mb(); /* Order interrupt and bit testing. */ | |
623 | while ((ops = xchg(pending_ipis, 0)) != 0) { | |
624 | mb(); /* Order bit clearing and data access. */ | |
625 | do { | |
626 | unsigned long which; | |
627 | ||
628 | which = ops & -ops; | |
629 | ops &= ~which; | |
630 | which = __ffs(which); | |
631 | ||
632 | switch (which) { | |
633 | case IPI_RESCHEDULE: | |
634 | /* Reschedule callback. Everything to be done | |
635 | is done by the interrupt return path. */ | |
636 | break; | |
637 | ||
638 | case IPI_CALL_FUNC: | |
639 | { | |
640 | struct smp_call_struct *data; | |
641 | void (*func)(void *info); | |
642 | void *info; | |
643 | int wait; | |
644 | ||
645 | data = smp_call_function_data; | |
646 | func = data->func; | |
647 | info = data->info; | |
648 | wait = data->wait; | |
649 | ||
650 | /* Notify the sending CPU that the data has been | |
651 | received, and execution is about to begin. */ | |
652 | mb(); | |
653 | atomic_dec (&data->unstarted_count); | |
654 | ||
655 | /* At this point the structure may be gone unless | |
656 | wait is true. */ | |
657 | (*func)(info); | |
658 | ||
659 | /* Notify the sending CPU that the task is done. */ | |
660 | mb(); | |
661 | if (wait) atomic_dec (&data->unfinished_count); | |
662 | break; | |
663 | } | |
664 | ||
665 | case IPI_CPU_STOP: | |
666 | halt(); | |
667 | ||
668 | default: | |
669 | printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", | |
670 | this_cpu, which); | |
671 | break; | |
672 | } | |
673 | } while (ops); | |
674 | ||
675 | mb(); /* Order data access and bit testing. */ | |
676 | } | |
677 | ||
678 | cpu_data[this_cpu].ipi_count++; | |
679 | ||
680 | if (hwrpb->txrdy) | |
681 | recv_secondary_console_msg(); | |
682 | } | |
683 | ||
684 | void | |
685 | smp_send_reschedule(int cpu) | |
686 | { | |
687 | #ifdef DEBUG_IPI_MSG | |
688 | if (cpu == hard_smp_processor_id()) | |
689 | printk(KERN_WARNING | |
690 | "smp_send_reschedule: Sending IPI to self.\n"); | |
691 | #endif | |
692 | send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); | |
693 | } | |
694 | ||
695 | void | |
696 | smp_send_stop(void) | |
697 | { | |
698 | cpumask_t to_whom = cpu_possible_map; | |
699 | cpu_clear(smp_processor_id(), to_whom); | |
700 | #ifdef DEBUG_IPI_MSG | |
701 | if (hard_smp_processor_id() != boot_cpu_id) | |
702 | printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n"); | |
703 | #endif | |
704 | send_ipi_message(to_whom, IPI_CPU_STOP); | |
705 | } | |
706 | ||
707 | /* | |
708 | * Run a function on all other CPUs. | |
709 | * <func> The function to run. This must be fast and non-blocking. | |
710 | * <info> An arbitrary pointer to pass to the function. | |
711 | * <retry> If true, keep retrying until ready. | |
712 | * <wait> If true, wait until function has completed on other CPUs. | |
713 | * [RETURNS] 0 on success, else a negative status code. | |
714 | * | |
715 | * Does not return until remote CPUs are nearly ready to execute <func> | |
716 | * or are or have executed. | |
717 | * You must not call this function with disabled interrupts or from a | |
718 | * hardware interrupt handler or from a bottom half handler. | |
719 | */ | |
720 | ||
721 | int | |
722 | smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry, | |
723 | int wait, cpumask_t to_whom) | |
724 | { | |
725 | struct smp_call_struct data; | |
726 | unsigned long timeout; | |
727 | int num_cpus_to_call; | |
728 | ||
729 | /* Can deadlock when called with interrupts disabled */ | |
730 | WARN_ON(irqs_disabled()); | |
731 | ||
732 | data.func = func; | |
733 | data.info = info; | |
734 | data.wait = wait; | |
735 | ||
736 | cpu_clear(smp_processor_id(), to_whom); | |
737 | num_cpus_to_call = cpus_weight(to_whom); | |
738 | ||
739 | atomic_set(&data.unstarted_count, num_cpus_to_call); | |
740 | atomic_set(&data.unfinished_count, num_cpus_to_call); | |
741 | ||
742 | /* Acquire the smp_call_function_data mutex. */ | |
743 | if (pointer_lock(&smp_call_function_data, &data, retry)) | |
744 | return -EBUSY; | |
745 | ||
746 | /* Send a message to the requested CPUs. */ | |
747 | send_ipi_message(to_whom, IPI_CALL_FUNC); | |
748 | ||
749 | /* Wait for a minimal response. */ | |
750 | timeout = jiffies + HZ; | |
751 | while (atomic_read (&data.unstarted_count) > 0 | |
752 | && time_before (jiffies, timeout)) | |
753 | barrier(); | |
754 | ||
755 | /* If there's no response yet, log a message but allow a longer | |
756 | * timeout period -- if we get a response this time, log | |
757 | * a message saying when we got it.. | |
758 | */ | |
759 | if (atomic_read(&data.unstarted_count) > 0) { | |
760 | long start_time = jiffies; | |
761 | printk(KERN_ERR "%s: initial timeout -- trying long wait\n", | |
762 | __FUNCTION__); | |
763 | timeout = jiffies + 30 * HZ; | |
764 | while (atomic_read(&data.unstarted_count) > 0 | |
765 | && time_before(jiffies, timeout)) | |
766 | barrier(); | |
767 | if (atomic_read(&data.unstarted_count) <= 0) { | |
768 | long delta = jiffies - start_time; | |
769 | printk(KERN_ERR | |
770 | "%s: response %ld.%ld seconds into long wait\n", | |
771 | __FUNCTION__, delta / HZ, | |
772 | (100 * (delta - ((delta / HZ) * HZ))) / HZ); | |
773 | } | |
774 | } | |
775 | ||
776 | /* We either got one or timed out -- clear the lock. */ | |
777 | mb(); | |
778 | smp_call_function_data = NULL; | |
779 | ||
780 | /* | |
781 | * If after both the initial and long timeout periods we still don't | |
782 | * have a response, something is very wrong... | |
783 | */ | |
784 | BUG_ON(atomic_read (&data.unstarted_count) > 0); | |
785 | ||
786 | /* Wait for a complete response, if needed. */ | |
787 | if (wait) { | |
788 | while (atomic_read (&data.unfinished_count) > 0) | |
789 | barrier(); | |
790 | } | |
791 | ||
792 | return 0; | |
793 | } | |
cff52daf | 794 | EXPORT_SYMBOL(smp_call_function_on_cpu); |
1da177e4 LT |
795 | |
796 | int | |
797 | smp_call_function (void (*func) (void *info), void *info, int retry, int wait) | |
798 | { | |
799 | return smp_call_function_on_cpu (func, info, retry, wait, | |
800 | cpu_online_map); | |
801 | } | |
cff52daf | 802 | EXPORT_SYMBOL(smp_call_function); |
1da177e4 LT |
803 | |
804 | static void | |
805 | ipi_imb(void *ignored) | |
806 | { | |
807 | imb(); | |
808 | } | |
809 | ||
810 | void | |
811 | smp_imb(void) | |
812 | { | |
813 | /* Must wait other processors to flush their icache before continue. */ | |
814 | if (on_each_cpu(ipi_imb, NULL, 1, 1)) | |
815 | printk(KERN_CRIT "smp_imb: timed out\n"); | |
816 | } | |
cff52daf | 817 | EXPORT_SYMBOL(smp_imb); |
1da177e4 LT |
818 | |
819 | static void | |
820 | ipi_flush_tlb_all(void *ignored) | |
821 | { | |
822 | tbia(); | |
823 | } | |
824 | ||
825 | void | |
826 | flush_tlb_all(void) | |
827 | { | |
828 | /* Although we don't have any data to pass, we do want to | |
829 | synchronize with the other processors. */ | |
830 | if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) { | |
831 | printk(KERN_CRIT "flush_tlb_all: timed out\n"); | |
832 | } | |
833 | } | |
834 | ||
835 | #define asn_locked() (cpu_data[smp_processor_id()].asn_lock) | |
836 | ||
837 | static void | |
838 | ipi_flush_tlb_mm(void *x) | |
839 | { | |
840 | struct mm_struct *mm = (struct mm_struct *) x; | |
841 | if (mm == current->active_mm && !asn_locked()) | |
842 | flush_tlb_current(mm); | |
843 | else | |
844 | flush_tlb_other(mm); | |
845 | } | |
846 | ||
847 | void | |
848 | flush_tlb_mm(struct mm_struct *mm) | |
849 | { | |
850 | preempt_disable(); | |
851 | ||
852 | if (mm == current->active_mm) { | |
853 | flush_tlb_current(mm); | |
854 | if (atomic_read(&mm->mm_users) <= 1) { | |
855 | int cpu, this_cpu = smp_processor_id(); | |
856 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | |
857 | if (!cpu_online(cpu) || cpu == this_cpu) | |
858 | continue; | |
859 | if (mm->context[cpu]) | |
860 | mm->context[cpu] = 0; | |
861 | } | |
862 | preempt_enable(); | |
863 | return; | |
864 | } | |
865 | } | |
866 | ||
867 | if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { | |
868 | printk(KERN_CRIT "flush_tlb_mm: timed out\n"); | |
869 | } | |
870 | ||
871 | preempt_enable(); | |
872 | } | |
cff52daf | 873 | EXPORT_SYMBOL(flush_tlb_mm); |
1da177e4 LT |
874 | |
875 | struct flush_tlb_page_struct { | |
876 | struct vm_area_struct *vma; | |
877 | struct mm_struct *mm; | |
878 | unsigned long addr; | |
879 | }; | |
880 | ||
881 | static void | |
882 | ipi_flush_tlb_page(void *x) | |
883 | { | |
884 | struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x; | |
885 | struct mm_struct * mm = data->mm; | |
886 | ||
887 | if (mm == current->active_mm && !asn_locked()) | |
888 | flush_tlb_current_page(mm, data->vma, data->addr); | |
889 | else | |
890 | flush_tlb_other(mm); | |
891 | } | |
892 | ||
893 | void | |
894 | flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | |
895 | { | |
896 | struct flush_tlb_page_struct data; | |
897 | struct mm_struct *mm = vma->vm_mm; | |
898 | ||
899 | preempt_disable(); | |
900 | ||
901 | if (mm == current->active_mm) { | |
902 | flush_tlb_current_page(mm, vma, addr); | |
903 | if (atomic_read(&mm->mm_users) <= 1) { | |
904 | int cpu, this_cpu = smp_processor_id(); | |
905 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | |
906 | if (!cpu_online(cpu) || cpu == this_cpu) | |
907 | continue; | |
908 | if (mm->context[cpu]) | |
909 | mm->context[cpu] = 0; | |
910 | } | |
911 | preempt_enable(); | |
912 | return; | |
913 | } | |
914 | } | |
915 | ||
916 | data.vma = vma; | |
917 | data.mm = mm; | |
918 | data.addr = addr; | |
919 | ||
920 | if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { | |
921 | printk(KERN_CRIT "flush_tlb_page: timed out\n"); | |
922 | } | |
923 | ||
924 | preempt_enable(); | |
925 | } | |
cff52daf | 926 | EXPORT_SYMBOL(flush_tlb_page); |
1da177e4 LT |
927 | |
928 | void | |
929 | flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | |
930 | { | |
931 | /* On the Alpha we always flush the whole user tlb. */ | |
932 | flush_tlb_mm(vma->vm_mm); | |
933 | } | |
cff52daf | 934 | EXPORT_SYMBOL(flush_tlb_range); |
1da177e4 LT |
935 | |
936 | static void | |
937 | ipi_flush_icache_page(void *x) | |
938 | { | |
939 | struct mm_struct *mm = (struct mm_struct *) x; | |
940 | if (mm == current->active_mm && !asn_locked()) | |
941 | __load_new_mm_context(mm); | |
942 | else | |
943 | flush_tlb_other(mm); | |
944 | } | |
945 | ||
946 | void | |
947 | flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | |
948 | unsigned long addr, int len) | |
949 | { | |
950 | struct mm_struct *mm = vma->vm_mm; | |
951 | ||
952 | if ((vma->vm_flags & VM_EXEC) == 0) | |
953 | return; | |
954 | ||
955 | preempt_disable(); | |
956 | ||
957 | if (mm == current->active_mm) { | |
958 | __load_new_mm_context(mm); | |
959 | if (atomic_read(&mm->mm_users) <= 1) { | |
960 | int cpu, this_cpu = smp_processor_id(); | |
961 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | |
962 | if (!cpu_online(cpu) || cpu == this_cpu) | |
963 | continue; | |
964 | if (mm->context[cpu]) | |
965 | mm->context[cpu] = 0; | |
966 | } | |
967 | preempt_enable(); | |
968 | return; | |
969 | } | |
970 | } | |
971 | ||
972 | if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { | |
973 | printk(KERN_CRIT "flush_icache_page: timed out\n"); | |
974 | } | |
975 | ||
976 | preempt_enable(); | |
977 | } |