Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* smp.c: Sparc64 SMP support. |
2 | * | |
3 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | |
4 | */ | |
5 | ||
6 | #include <linux/module.h> | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/sched.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/pagemap.h> | |
11 | #include <linux/threads.h> | |
12 | #include <linux/smp.h> | |
13 | #include <linux/smp_lock.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/kernel_stat.h> | |
16 | #include <linux/delay.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/spinlock.h> | |
19 | #include <linux/fs.h> | |
20 | #include <linux/seq_file.h> | |
21 | #include <linux/cache.h> | |
22 | #include <linux/jiffies.h> | |
23 | #include <linux/profile.h> | |
24 | #include <linux/bootmem.h> | |
25 | ||
26 | #include <asm/head.h> | |
27 | #include <asm/ptrace.h> | |
28 | #include <asm/atomic.h> | |
29 | #include <asm/tlbflush.h> | |
30 | #include <asm/mmu_context.h> | |
31 | #include <asm/cpudata.h> | |
32 | ||
33 | #include <asm/irq.h> | |
34 | #include <asm/page.h> | |
35 | #include <asm/pgtable.h> | |
36 | #include <asm/oplib.h> | |
37 | #include <asm/uaccess.h> | |
38 | #include <asm/timer.h> | |
39 | #include <asm/starfire.h> | |
40 | #include <asm/tlb.h> | |
41 | ||
42 | extern int linux_num_cpus; | |
43 | extern void calibrate_delay(void); | |
44 | ||
45 | /* Please don't make this stuff initdata!!! --DaveM */ | |
46 | static unsigned char boot_cpu_id; | |
47 | ||
c12a8289 AM |
48 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; |
49 | cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; | |
1da177e4 LT |
50 | static cpumask_t smp_commenced_mask; |
51 | static cpumask_t cpu_callout_map; | |
52 | ||
53 | void smp_info(struct seq_file *m) | |
54 | { | |
55 | int i; | |
56 | ||
57 | seq_printf(m, "State:\n"); | |
58 | for (i = 0; i < NR_CPUS; i++) { | |
59 | if (cpu_online(i)) | |
60 | seq_printf(m, | |
61 | "CPU%d:\t\tonline\n", i); | |
62 | } | |
63 | } | |
64 | ||
65 | void smp_bogo(struct seq_file *m) | |
66 | { | |
67 | int i; | |
68 | ||
69 | for (i = 0; i < NR_CPUS; i++) | |
70 | if (cpu_online(i)) | |
71 | seq_printf(m, | |
72 | "Cpu%dBogo\t: %lu.%02lu\n" | |
73 | "Cpu%dClkTck\t: %016lx\n", | |
74 | i, cpu_data(i).udelay_val / (500000/HZ), | |
75 | (cpu_data(i).udelay_val / (5000/HZ)) % 100, | |
76 | i, cpu_data(i).clock_tick); | |
77 | } | |
78 | ||
79 | void __init smp_store_cpu_info(int id) | |
80 | { | |
81 | int cpu_node; | |
82 | ||
83 | /* multiplier and counter set by | |
84 | smp_setup_percpu_timer() */ | |
85 | cpu_data(id).udelay_val = loops_per_jiffy; | |
86 | ||
87 | cpu_find_by_mid(id, &cpu_node); | |
88 | cpu_data(id).clock_tick = prom_getintdefault(cpu_node, | |
89 | "clock-frequency", 0); | |
90 | ||
91 | cpu_data(id).pgcache_size = 0; | |
92 | cpu_data(id).pte_cache[0] = NULL; | |
93 | cpu_data(id).pte_cache[1] = NULL; | |
94 | cpu_data(id).pgd_cache = NULL; | |
95 | cpu_data(id).idle_volume = 1; | |
96 | } | |
97 | ||
98 | static void smp_setup_percpu_timer(void); | |
99 | ||
100 | static volatile unsigned long callin_flag = 0; | |
101 | ||
102 | extern void inherit_locked_prom_mappings(int save_p); | |
103 | ||
104 | static inline void cpu_setup_percpu_base(unsigned long cpu_id) | |
105 | { | |
106 | __asm__ __volatile__("mov %0, %%g5\n\t" | |
107 | "stxa %0, [%1] %2\n\t" | |
108 | "membar #Sync" | |
109 | : /* no outputs */ | |
110 | : "r" (__per_cpu_offset(cpu_id)), | |
111 | "r" (TSB_REG), "i" (ASI_IMMU)); | |
112 | } | |
113 | ||
114 | void __init smp_callin(void) | |
115 | { | |
116 | int cpuid = hard_smp_processor_id(); | |
117 | ||
118 | inherit_locked_prom_mappings(0); | |
119 | ||
120 | __flush_tlb_all(); | |
121 | ||
122 | cpu_setup_percpu_base(cpuid); | |
123 | ||
124 | smp_setup_percpu_timer(); | |
125 | ||
816242da DM |
126 | if (cheetah_pcache_forced_on) |
127 | cheetah_enable_pcache(); | |
128 | ||
1da177e4 LT |
129 | local_irq_enable(); |
130 | ||
131 | calibrate_delay(); | |
132 | smp_store_cpu_info(cpuid); | |
133 | callin_flag = 1; | |
134 | __asm__ __volatile__("membar #Sync\n\t" | |
135 | "flush %%g6" : : : "memory"); | |
136 | ||
137 | /* Clear this or we will die instantly when we | |
138 | * schedule back to this idler... | |
139 | */ | |
db7d9a4e | 140 | current_thread_info()->new_child = 0; |
1da177e4 LT |
141 | |
142 | /* Attach to the address space of init_task. */ | |
143 | atomic_inc(&init_mm.mm_count); | |
144 | current->active_mm = &init_mm; | |
145 | ||
146 | while (!cpu_isset(cpuid, smp_commenced_mask)) | |
4f07118f | 147 | rmb(); |
1da177e4 LT |
148 | |
149 | cpu_set(cpuid, cpu_online_map); | |
150 | } | |
151 | ||
152 | void cpu_panic(void) | |
153 | { | |
154 | printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); | |
155 | panic("SMP bolixed\n"); | |
156 | } | |
157 | ||
d369ddd2 | 158 | static unsigned long current_tick_offset __read_mostly; |
1da177e4 LT |
159 | |
160 | /* This tick register synchronization scheme is taken entirely from | |
161 | * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. | |
162 | * | |
163 | * The only change I've made is to rework it so that the master | |
164 | * initiates the synchonization instead of the slave. -DaveM | |
165 | */ | |
166 | ||
167 | #define MASTER 0 | |
168 | #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long)) | |
169 | ||
170 | #define NUM_ROUNDS 64 /* magic value */ | |
171 | #define NUM_ITERS 5 /* likewise */ | |
172 | ||
173 | static DEFINE_SPINLOCK(itc_sync_lock); | |
174 | static unsigned long go[SLAVE + 1]; | |
175 | ||
176 | #define DEBUG_TICK_SYNC 0 | |
177 | ||
178 | static inline long get_delta (long *rt, long *master) | |
179 | { | |
180 | unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; | |
181 | unsigned long tcenter, t0, t1, tm; | |
182 | unsigned long i; | |
183 | ||
184 | for (i = 0; i < NUM_ITERS; i++) { | |
185 | t0 = tick_ops->get_tick(); | |
186 | go[MASTER] = 1; | |
4f07118f | 187 | membar_storeload(); |
1da177e4 | 188 | while (!(tm = go[SLAVE])) |
4f07118f | 189 | rmb(); |
1da177e4 | 190 | go[SLAVE] = 0; |
4f07118f | 191 | wmb(); |
1da177e4 LT |
192 | t1 = tick_ops->get_tick(); |
193 | ||
194 | if (t1 - t0 < best_t1 - best_t0) | |
195 | best_t0 = t0, best_t1 = t1, best_tm = tm; | |
196 | } | |
197 | ||
198 | *rt = best_t1 - best_t0; | |
199 | *master = best_tm - best_t0; | |
200 | ||
201 | /* average best_t0 and best_t1 without overflow: */ | |
202 | tcenter = (best_t0/2 + best_t1/2); | |
203 | if (best_t0 % 2 + best_t1 % 2 == 2) | |
204 | tcenter++; | |
205 | return tcenter - best_tm; | |
206 | } | |
207 | ||
208 | void smp_synchronize_tick_client(void) | |
209 | { | |
210 | long i, delta, adj, adjust_latency = 0, done = 0; | |
211 | unsigned long flags, rt, master_time_stamp, bound; | |
212 | #if DEBUG_TICK_SYNC | |
213 | struct { | |
214 | long rt; /* roundtrip time */ | |
215 | long master; /* master's timestamp */ | |
216 | long diff; /* difference between midpoint and master's timestamp */ | |
217 | long lat; /* estimate of itc adjustment latency */ | |
218 | } t[NUM_ROUNDS]; | |
219 | #endif | |
220 | ||
221 | go[MASTER] = 1; | |
222 | ||
223 | while (go[MASTER]) | |
4f07118f | 224 | rmb(); |
1da177e4 LT |
225 | |
226 | local_irq_save(flags); | |
227 | { | |
228 | for (i = 0; i < NUM_ROUNDS; i++) { | |
229 | delta = get_delta(&rt, &master_time_stamp); | |
230 | if (delta == 0) { | |
231 | done = 1; /* let's lock on to this... */ | |
232 | bound = rt; | |
233 | } | |
234 | ||
235 | if (!done) { | |
236 | if (i > 0) { | |
237 | adjust_latency += -delta; | |
238 | adj = -delta + adjust_latency/4; | |
239 | } else | |
240 | adj = -delta; | |
241 | ||
242 | tick_ops->add_tick(adj, current_tick_offset); | |
243 | } | |
244 | #if DEBUG_TICK_SYNC | |
245 | t[i].rt = rt; | |
246 | t[i].master = master_time_stamp; | |
247 | t[i].diff = delta; | |
248 | t[i].lat = adjust_latency/4; | |
249 | #endif | |
250 | } | |
251 | } | |
252 | local_irq_restore(flags); | |
253 | ||
254 | #if DEBUG_TICK_SYNC | |
255 | for (i = 0; i < NUM_ROUNDS; i++) | |
256 | printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", | |
257 | t[i].rt, t[i].master, t[i].diff, t[i].lat); | |
258 | #endif | |
259 | ||
260 | printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles," | |
261 | "maxerr %lu cycles)\n", smp_processor_id(), delta, rt); | |
262 | } | |
263 | ||
264 | static void smp_start_sync_tick_client(int cpu); | |
265 | ||
266 | static void smp_synchronize_one_tick(int cpu) | |
267 | { | |
268 | unsigned long flags, i; | |
269 | ||
270 | go[MASTER] = 0; | |
271 | ||
272 | smp_start_sync_tick_client(cpu); | |
273 | ||
274 | /* wait for client to be ready */ | |
275 | while (!go[MASTER]) | |
4f07118f | 276 | rmb(); |
1da177e4 LT |
277 | |
278 | /* now let the client proceed into his loop */ | |
279 | go[MASTER] = 0; | |
4f07118f | 280 | membar_storeload(); |
1da177e4 LT |
281 | |
282 | spin_lock_irqsave(&itc_sync_lock, flags); | |
283 | { | |
284 | for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { | |
285 | while (!go[MASTER]) | |
4f07118f | 286 | rmb(); |
1da177e4 | 287 | go[MASTER] = 0; |
4f07118f | 288 | wmb(); |
1da177e4 | 289 | go[SLAVE] = tick_ops->get_tick(); |
4f07118f | 290 | membar_storeload(); |
1da177e4 LT |
291 | } |
292 | } | |
293 | spin_unlock_irqrestore(&itc_sync_lock, flags); | |
294 | } | |
295 | ||
296 | extern unsigned long sparc64_cpu_startup; | |
297 | ||
298 | /* The OBP cpu startup callback truncates the 3rd arg cookie to | |
299 | * 32-bits (I think) so to be safe we have it read the pointer | |
300 | * contained here so we work on >4GB machines. -DaveM | |
301 | */ | |
302 | static struct thread_info *cpu_new_thread = NULL; | |
303 | ||
304 | static int __devinit smp_boot_one_cpu(unsigned int cpu) | |
305 | { | |
306 | unsigned long entry = | |
307 | (unsigned long)(&sparc64_cpu_startup); | |
308 | unsigned long cookie = | |
309 | (unsigned long)(&cpu_new_thread); | |
310 | struct task_struct *p; | |
311 | int timeout, ret, cpu_node; | |
312 | ||
313 | p = fork_idle(cpu); | |
314 | callin_flag = 0; | |
315 | cpu_new_thread = p->thread_info; | |
316 | cpu_set(cpu, cpu_callout_map); | |
317 | ||
318 | cpu_find_by_mid(cpu, &cpu_node); | |
319 | prom_startcpu(cpu_node, entry, cookie); | |
320 | ||
321 | for (timeout = 0; timeout < 5000000; timeout++) { | |
322 | if (callin_flag) | |
323 | break; | |
324 | udelay(100); | |
325 | } | |
326 | if (callin_flag) { | |
327 | ret = 0; | |
328 | } else { | |
329 | printk("Processor %d is stuck.\n", cpu); | |
330 | cpu_clear(cpu, cpu_callout_map); | |
331 | ret = -ENODEV; | |
332 | } | |
333 | cpu_new_thread = NULL; | |
334 | ||
335 | return ret; | |
336 | } | |
337 | ||
338 | static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) | |
339 | { | |
340 | u64 result, target; | |
341 | int stuck, tmp; | |
342 | ||
343 | if (this_is_starfire) { | |
344 | /* map to real upaid */ | |
345 | cpu = (((cpu & 0x3c) << 1) | | |
346 | ((cpu & 0x40) >> 4) | | |
347 | (cpu & 0x3)); | |
348 | } | |
349 | ||
350 | target = (cpu << 14) | 0x70; | |
351 | again: | |
352 | /* Ok, this is the real Spitfire Errata #54. | |
353 | * One must read back from a UDB internal register | |
354 | * after writes to the UDB interrupt dispatch, but | |
355 | * before the membar Sync for that write. | |
356 | * So we use the high UDB control register (ASI 0x7f, | |
357 | * ADDR 0x20) for the dummy read. -DaveM | |
358 | */ | |
359 | tmp = 0x40; | |
360 | __asm__ __volatile__( | |
361 | "wrpr %1, %2, %%pstate\n\t" | |
362 | "stxa %4, [%0] %3\n\t" | |
363 | "stxa %5, [%0+%8] %3\n\t" | |
364 | "add %0, %8, %0\n\t" | |
365 | "stxa %6, [%0+%8] %3\n\t" | |
366 | "membar #Sync\n\t" | |
367 | "stxa %%g0, [%7] %3\n\t" | |
368 | "membar #Sync\n\t" | |
369 | "mov 0x20, %%g1\n\t" | |
370 | "ldxa [%%g1] 0x7f, %%g0\n\t" | |
371 | "membar #Sync" | |
372 | : "=r" (tmp) | |
373 | : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W), | |
374 | "r" (data0), "r" (data1), "r" (data2), "r" (target), | |
375 | "r" (0x10), "0" (tmp) | |
376 | : "g1"); | |
377 | ||
378 | /* NOTE: PSTATE_IE is still clear. */ | |
379 | stuck = 100000; | |
380 | do { | |
381 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | |
382 | : "=r" (result) | |
383 | : "i" (ASI_INTR_DISPATCH_STAT)); | |
384 | if (result == 0) { | |
385 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
386 | : : "r" (pstate)); | |
387 | return; | |
388 | } | |
389 | stuck -= 1; | |
390 | if (stuck == 0) | |
391 | break; | |
392 | } while (result & 0x1); | |
393 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
394 | : : "r" (pstate)); | |
395 | if (stuck == 0) { | |
396 | printk("CPU[%d]: mondo stuckage result[%016lx]\n", | |
397 | smp_processor_id(), result); | |
398 | } else { | |
399 | udelay(2); | |
400 | goto again; | |
401 | } | |
402 | } | |
403 | ||
404 | static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | |
405 | { | |
406 | u64 pstate; | |
407 | int i; | |
408 | ||
409 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | |
410 | for_each_cpu_mask(i, mask) | |
411 | spitfire_xcall_helper(data0, data1, data2, pstate, i); | |
412 | } | |
413 | ||
414 | /* Cheetah now allows to send the whole 64-bytes of data in the interrupt | |
415 | * packet, but we have no use for that. However we do take advantage of | |
416 | * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). | |
417 | */ | |
418 | static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | |
419 | { | |
420 | u64 pstate, ver; | |
421 | int nack_busy_id, is_jalapeno; | |
422 | ||
423 | if (cpus_empty(mask)) | |
424 | return; | |
425 | ||
426 | /* Unfortunately, someone at Sun had the brilliant idea to make the | |
427 | * busy/nack fields hard-coded by ITID number for this Ultra-III | |
428 | * derivative processor. | |
429 | */ | |
430 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | |
431 | is_jalapeno = ((ver >> 32) == 0x003e0016); | |
432 | ||
433 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | |
434 | ||
435 | retry: | |
436 | __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" | |
437 | : : "r" (pstate), "i" (PSTATE_IE)); | |
438 | ||
439 | /* Setup the dispatch data registers. */ | |
440 | __asm__ __volatile__("stxa %0, [%3] %6\n\t" | |
441 | "stxa %1, [%4] %6\n\t" | |
442 | "stxa %2, [%5] %6\n\t" | |
443 | "membar #Sync\n\t" | |
444 | : /* no outputs */ | |
445 | : "r" (data0), "r" (data1), "r" (data2), | |
446 | "r" (0x40), "r" (0x50), "r" (0x60), | |
447 | "i" (ASI_INTR_W)); | |
448 | ||
449 | nack_busy_id = 0; | |
450 | { | |
451 | int i; | |
452 | ||
453 | for_each_cpu_mask(i, mask) { | |
454 | u64 target = (i << 14) | 0x70; | |
455 | ||
456 | if (!is_jalapeno) | |
457 | target |= (nack_busy_id << 24); | |
458 | __asm__ __volatile__( | |
459 | "stxa %%g0, [%0] %1\n\t" | |
460 | "membar #Sync\n\t" | |
461 | : /* no outputs */ | |
462 | : "r" (target), "i" (ASI_INTR_W)); | |
463 | nack_busy_id++; | |
464 | } | |
465 | } | |
466 | ||
467 | /* Now, poll for completion. */ | |
468 | { | |
469 | u64 dispatch_stat; | |
470 | long stuck; | |
471 | ||
472 | stuck = 100000 * nack_busy_id; | |
473 | do { | |
474 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | |
475 | : "=r" (dispatch_stat) | |
476 | : "i" (ASI_INTR_DISPATCH_STAT)); | |
477 | if (dispatch_stat == 0UL) { | |
478 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
479 | : : "r" (pstate)); | |
480 | return; | |
481 | } | |
482 | if (!--stuck) | |
483 | break; | |
484 | } while (dispatch_stat & 0x5555555555555555UL); | |
485 | ||
486 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
487 | : : "r" (pstate)); | |
488 | ||
489 | if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) { | |
490 | /* Busy bits will not clear, continue instead | |
491 | * of freezing up on this cpu. | |
492 | */ | |
493 | printk("CPU[%d]: mondo stuckage result[%016lx]\n", | |
494 | smp_processor_id(), dispatch_stat); | |
495 | } else { | |
496 | int i, this_busy_nack = 0; | |
497 | ||
498 | /* Delay some random time with interrupts enabled | |
499 | * to prevent deadlock. | |
500 | */ | |
501 | udelay(2 * nack_busy_id); | |
502 | ||
503 | /* Clear out the mask bits for cpus which did not | |
504 | * NACK us. | |
505 | */ | |
506 | for_each_cpu_mask(i, mask) { | |
507 | u64 check_mask; | |
508 | ||
509 | if (is_jalapeno) | |
510 | check_mask = (0x2UL << (2*i)); | |
511 | else | |
512 | check_mask = (0x2UL << | |
513 | this_busy_nack); | |
514 | if ((dispatch_stat & check_mask) == 0) | |
515 | cpu_clear(i, mask); | |
516 | this_busy_nack += 2; | |
517 | } | |
518 | ||
519 | goto retry; | |
520 | } | |
521 | } | |
522 | } | |
523 | ||
524 | /* Send cross call to all processors mentioned in MASK | |
525 | * except self. | |
526 | */ | |
527 | static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask) | |
528 | { | |
529 | u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); | |
530 | int this_cpu = get_cpu(); | |
531 | ||
532 | cpus_and(mask, mask, cpu_online_map); | |
533 | cpu_clear(this_cpu, mask); | |
534 | ||
535 | if (tlb_type == spitfire) | |
536 | spitfire_xcall_deliver(data0, data1, data2, mask); | |
537 | else | |
538 | cheetah_xcall_deliver(data0, data1, data2, mask); | |
539 | /* NOTE: Caller runs local copy on master. */ | |
540 | ||
541 | put_cpu(); | |
542 | } | |
543 | ||
544 | extern unsigned long xcall_sync_tick; | |
545 | ||
546 | static void smp_start_sync_tick_client(int cpu) | |
547 | { | |
548 | cpumask_t mask = cpumask_of_cpu(cpu); | |
549 | ||
550 | smp_cross_call_masked(&xcall_sync_tick, | |
551 | 0, 0, 0, mask); | |
552 | } | |
553 | ||
554 | /* Send cross call to all processors except self. */ | |
555 | #define smp_cross_call(func, ctx, data1, data2) \ | |
556 | smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map) | |
557 | ||
558 | struct call_data_struct { | |
559 | void (*func) (void *info); | |
560 | void *info; | |
561 | atomic_t finished; | |
562 | int wait; | |
563 | }; | |
564 | ||
565 | static DEFINE_SPINLOCK(call_lock); | |
566 | static struct call_data_struct *call_data; | |
567 | ||
568 | extern unsigned long xcall_call_function; | |
569 | ||
570 | /* | |
571 | * You must not call this function with disabled interrupts or from a | |
572 | * hardware interrupt handler or from a bottom half handler. | |
573 | */ | |
574 | int smp_call_function(void (*func)(void *info), void *info, | |
575 | int nonatomic, int wait) | |
576 | { | |
577 | struct call_data_struct data; | |
578 | int cpus = num_online_cpus() - 1; | |
579 | long timeout; | |
580 | ||
581 | if (!cpus) | |
582 | return 0; | |
583 | ||
584 | /* Can deadlock when called with interrupts disabled */ | |
585 | WARN_ON(irqs_disabled()); | |
586 | ||
587 | data.func = func; | |
588 | data.info = info; | |
589 | atomic_set(&data.finished, 0); | |
590 | data.wait = wait; | |
591 | ||
592 | spin_lock(&call_lock); | |
593 | ||
594 | call_data = &data; | |
595 | ||
596 | smp_cross_call(&xcall_call_function, 0, 0, 0); | |
597 | ||
598 | /* | |
599 | * Wait for other cpus to complete function or at | |
600 | * least snap the call data. | |
601 | */ | |
602 | timeout = 1000000; | |
603 | while (atomic_read(&data.finished) != cpus) { | |
604 | if (--timeout <= 0) | |
605 | goto out_timeout; | |
606 | barrier(); | |
607 | udelay(1); | |
608 | } | |
609 | ||
610 | spin_unlock(&call_lock); | |
611 | ||
612 | return 0; | |
613 | ||
614 | out_timeout: | |
615 | spin_unlock(&call_lock); | |
616 | printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n", | |
617 | (long) num_online_cpus() - 1L, | |
618 | (long) atomic_read(&data.finished)); | |
619 | return 0; | |
620 | } | |
621 | ||
622 | void smp_call_function_client(int irq, struct pt_regs *regs) | |
623 | { | |
624 | void (*func) (void *info) = call_data->func; | |
625 | void *info = call_data->info; | |
626 | ||
627 | clear_softint(1 << irq); | |
628 | if (call_data->wait) { | |
629 | /* let initiator proceed only after completion */ | |
630 | func(info); | |
631 | atomic_inc(&call_data->finished); | |
632 | } else { | |
633 | /* let initiator proceed after getting data */ | |
634 | atomic_inc(&call_data->finished); | |
635 | func(info); | |
636 | } | |
637 | } | |
638 | ||
639 | extern unsigned long xcall_flush_tlb_mm; | |
640 | extern unsigned long xcall_flush_tlb_pending; | |
641 | extern unsigned long xcall_flush_tlb_kernel_range; | |
642 | extern unsigned long xcall_flush_tlb_all_spitfire; | |
643 | extern unsigned long xcall_flush_tlb_all_cheetah; | |
644 | extern unsigned long xcall_report_regs; | |
645 | extern unsigned long xcall_receive_signal; | |
646 | ||
647 | #ifdef DCACHE_ALIASING_POSSIBLE | |
648 | extern unsigned long xcall_flush_dcache_page_cheetah; | |
649 | #endif | |
650 | extern unsigned long xcall_flush_dcache_page_spitfire; | |
651 | ||
652 | #ifdef CONFIG_DEBUG_DCFLUSH | |
653 | extern atomic_t dcpage_flushes; | |
654 | extern atomic_t dcpage_flushes_xcall; | |
655 | #endif | |
656 | ||
657 | static __inline__ void __local_flush_dcache_page(struct page *page) | |
658 | { | |
659 | #ifdef DCACHE_ALIASING_POSSIBLE | |
660 | __flush_dcache_page(page_address(page), | |
661 | ((tlb_type == spitfire) && | |
662 | page_mapping(page) != NULL)); | |
663 | #else | |
664 | if (page_mapping(page) != NULL && | |
665 | tlb_type == spitfire) | |
666 | __flush_icache_page(__pa(page_address(page))); | |
667 | #endif | |
668 | } | |
669 | ||
670 | void smp_flush_dcache_page_impl(struct page *page, int cpu) | |
671 | { | |
672 | cpumask_t mask = cpumask_of_cpu(cpu); | |
673 | int this_cpu = get_cpu(); | |
674 | ||
675 | #ifdef CONFIG_DEBUG_DCFLUSH | |
676 | atomic_inc(&dcpage_flushes); | |
677 | #endif | |
678 | if (cpu == this_cpu) { | |
679 | __local_flush_dcache_page(page); | |
680 | } else if (cpu_online(cpu)) { | |
681 | void *pg_addr = page_address(page); | |
682 | u64 data0; | |
683 | ||
684 | if (tlb_type == spitfire) { | |
685 | data0 = | |
686 | ((u64)&xcall_flush_dcache_page_spitfire); | |
687 | if (page_mapping(page) != NULL) | |
688 | data0 |= ((u64)1 << 32); | |
689 | spitfire_xcall_deliver(data0, | |
690 | __pa(pg_addr), | |
691 | (u64) pg_addr, | |
692 | mask); | |
693 | } else { | |
694 | #ifdef DCACHE_ALIASING_POSSIBLE | |
695 | data0 = | |
696 | ((u64)&xcall_flush_dcache_page_cheetah); | |
697 | cheetah_xcall_deliver(data0, | |
698 | __pa(pg_addr), | |
699 | 0, mask); | |
700 | #endif | |
701 | } | |
702 | #ifdef CONFIG_DEBUG_DCFLUSH | |
703 | atomic_inc(&dcpage_flushes_xcall); | |
704 | #endif | |
705 | } | |
706 | ||
707 | put_cpu(); | |
708 | } | |
709 | ||
710 | void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |
711 | { | |
712 | void *pg_addr = page_address(page); | |
713 | cpumask_t mask = cpu_online_map; | |
714 | u64 data0; | |
715 | int this_cpu = get_cpu(); | |
716 | ||
717 | cpu_clear(this_cpu, mask); | |
718 | ||
719 | #ifdef CONFIG_DEBUG_DCFLUSH | |
720 | atomic_inc(&dcpage_flushes); | |
721 | #endif | |
722 | if (cpus_empty(mask)) | |
723 | goto flush_self; | |
724 | if (tlb_type == spitfire) { | |
725 | data0 = ((u64)&xcall_flush_dcache_page_spitfire); | |
726 | if (page_mapping(page) != NULL) | |
727 | data0 |= ((u64)1 << 32); | |
728 | spitfire_xcall_deliver(data0, | |
729 | __pa(pg_addr), | |
730 | (u64) pg_addr, | |
731 | mask); | |
732 | } else { | |
733 | #ifdef DCACHE_ALIASING_POSSIBLE | |
734 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); | |
735 | cheetah_xcall_deliver(data0, | |
736 | __pa(pg_addr), | |
737 | 0, mask); | |
738 | #endif | |
739 | } | |
740 | #ifdef CONFIG_DEBUG_DCFLUSH | |
741 | atomic_inc(&dcpage_flushes_xcall); | |
742 | #endif | |
743 | flush_self: | |
744 | __local_flush_dcache_page(page); | |
745 | ||
746 | put_cpu(); | |
747 | } | |
748 | ||
749 | void smp_receive_signal(int cpu) | |
750 | { | |
751 | cpumask_t mask = cpumask_of_cpu(cpu); | |
752 | ||
753 | if (cpu_online(cpu)) { | |
754 | u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff); | |
755 | ||
756 | if (tlb_type == spitfire) | |
757 | spitfire_xcall_deliver(data0, 0, 0, mask); | |
758 | else | |
759 | cheetah_xcall_deliver(data0, 0, 0, mask); | |
760 | } | |
761 | } | |
762 | ||
763 | void smp_receive_signal_client(int irq, struct pt_regs *regs) | |
764 | { | |
765 | /* Just return, rtrap takes care of the rest. */ | |
766 | clear_softint(1 << irq); | |
767 | } | |
768 | ||
769 | void smp_report_regs(void) | |
770 | { | |
771 | smp_cross_call(&xcall_report_regs, 0, 0, 0); | |
772 | } | |
773 | ||
774 | void smp_flush_tlb_all(void) | |
775 | { | |
776 | if (tlb_type == spitfire) | |
777 | smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0); | |
778 | else | |
779 | smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0); | |
780 | __flush_tlb_all(); | |
781 | } | |
782 | ||
783 | /* We know that the window frames of the user have been flushed | |
784 | * to the stack before we get here because all callers of us | |
785 | * are flush_tlb_*() routines, and these run after flush_cache_*() | |
786 | * which performs the flushw. | |
787 | * | |
788 | * The SMP TLB coherency scheme we use works as follows: | |
789 | * | |
790 | * 1) mm->cpu_vm_mask is a bit mask of which cpus an address | |
791 | * space has (potentially) executed on, this is the heuristic | |
792 | * we use to avoid doing cross calls. | |
793 | * | |
794 | * Also, for flushing from kswapd and also for clones, we | |
795 | * use cpu_vm_mask as the list of cpus to make run the TLB. | |
796 | * | |
797 | * 2) TLB context numbers are shared globally across all processors | |
798 | * in the system, this allows us to play several games to avoid | |
799 | * cross calls. | |
800 | * | |
801 | * One invariant is that when a cpu switches to a process, and | |
802 | * that processes tsk->active_mm->cpu_vm_mask does not have the | |
803 | * current cpu's bit set, that tlb context is flushed locally. | |
804 | * | |
805 | * If the address space is non-shared (ie. mm->count == 1) we avoid | |
806 | * cross calls when we want to flush the currently running process's | |
807 | * tlb state. This is done by clearing all cpu bits except the current | |
808 | * processor's in current->active_mm->cpu_vm_mask and performing the | |
809 | * flush locally only. This will force any subsequent cpus which run | |
810 | * this task to flush the context from the local tlb if the process | |
811 | * migrates to another cpu (again). | |
812 | * | |
813 | * 3) For shared address spaces (threads) and swapping we bite the | |
814 | * bullet for most cases and perform the cross call (but only to | |
815 | * the cpus listed in cpu_vm_mask). | |
816 | * | |
817 | * The performance gain from "optimizing" away the cross call for threads is | |
818 | * questionable (in theory the big win for threads is the massive sharing of | |
819 | * address space state across processors). | |
820 | */ | |
821 | void smp_flush_tlb_mm(struct mm_struct *mm) | |
822 | { | |
823 | /* | |
824 | * This code is called from two places, dup_mmap and exit_mmap. In the | |
825 | * former case, we really need a flush. In the later case, the callers | |
826 | * are single threaded exec_mmap (really need a flush), multithreaded | |
827 | * exec_mmap case (do not need to flush, since the caller gets a new | |
828 | * context via activate_mm), and all other callers of mmput() whence | |
829 | * the flush can be optimized since the associated threads are dead and | |
830 | * the mm is being torn down (__exit_mm and other mmput callers) or the | |
831 | * owning thread is dissociating itself from the mm. The | |
832 | * (atomic_read(&mm->mm_users) == 0) check ensures real work is done | |
833 | * for single thread exec and dup_mmap cases. An alternate check might | |
834 | * have been (current->mm != mm). | |
835 | * Kanoj Sarcar | |
836 | */ | |
837 | if (atomic_read(&mm->mm_users) == 0) | |
838 | return; | |
839 | ||
840 | { | |
841 | u32 ctx = CTX_HWBITS(mm->context); | |
842 | int cpu = get_cpu(); | |
843 | ||
844 | if (atomic_read(&mm->mm_users) == 1) { | |
845 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); | |
846 | goto local_flush_and_out; | |
847 | } | |
848 | ||
849 | smp_cross_call_masked(&xcall_flush_tlb_mm, | |
850 | ctx, 0, 0, | |
851 | mm->cpu_vm_mask); | |
852 | ||
853 | local_flush_and_out: | |
854 | __flush_tlb_mm(ctx, SECONDARY_CONTEXT); | |
855 | ||
856 | put_cpu(); | |
857 | } | |
858 | } | |
859 | ||
860 | void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) | |
861 | { | |
862 | u32 ctx = CTX_HWBITS(mm->context); | |
863 | int cpu = get_cpu(); | |
864 | ||
865 | if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) { | |
866 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); | |
867 | goto local_flush_and_out; | |
868 | } else { | |
869 | /* This optimization is not valid. Normally | |
870 | * we will be holding the page_table_lock, but | |
871 | * there is an exception which is copy_page_range() | |
872 | * when forking. The lock is held during the individual | |
873 | * page table updates in the parent, but not at the | |
874 | * top level, which is where we are invoked. | |
875 | */ | |
876 | if (0) { | |
877 | cpumask_t this_cpu_mask = cpumask_of_cpu(cpu); | |
878 | ||
879 | /* By virtue of running under the mm->page_table_lock, | |
880 | * and mmu_context.h:switch_mm doing the same, the | |
881 | * following operation is safe. | |
882 | */ | |
883 | if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask)) | |
884 | goto local_flush_and_out; | |
885 | } | |
886 | } | |
887 | ||
888 | smp_cross_call_masked(&xcall_flush_tlb_pending, | |
889 | ctx, nr, (unsigned long) vaddrs, | |
890 | mm->cpu_vm_mask); | |
891 | ||
892 | local_flush_and_out: | |
893 | __flush_tlb_pending(ctx, nr, vaddrs); | |
894 | ||
895 | put_cpu(); | |
896 | } | |
897 | ||
898 | void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
899 | { | |
900 | start &= PAGE_MASK; | |
901 | end = PAGE_ALIGN(end); | |
902 | if (start != end) { | |
903 | smp_cross_call(&xcall_flush_tlb_kernel_range, | |
904 | 0, start, end); | |
905 | ||
906 | __flush_tlb_kernel_range(start, end); | |
907 | } | |
908 | } | |
909 | ||
910 | /* CPU capture. */ | |
911 | /* #define CAPTURE_DEBUG */ | |
912 | extern unsigned long xcall_capture; | |
913 | ||
914 | static atomic_t smp_capture_depth = ATOMIC_INIT(0); | |
915 | static atomic_t smp_capture_registry = ATOMIC_INIT(0); | |
916 | static unsigned long penguins_are_doing_time; | |
917 | ||
918 | void smp_capture(void) | |
919 | { | |
920 | int result = atomic_add_ret(1, &smp_capture_depth); | |
921 | ||
922 | if (result == 1) { | |
923 | int ncpus = num_online_cpus(); | |
924 | ||
925 | #ifdef CAPTURE_DEBUG | |
926 | printk("CPU[%d]: Sending penguins to jail...", | |
927 | smp_processor_id()); | |
928 | #endif | |
929 | penguins_are_doing_time = 1; | |
4f07118f | 930 | membar_storestore_loadstore(); |
1da177e4 LT |
931 | atomic_inc(&smp_capture_registry); |
932 | smp_cross_call(&xcall_capture, 0, 0, 0); | |
933 | while (atomic_read(&smp_capture_registry) != ncpus) | |
4f07118f | 934 | rmb(); |
1da177e4 LT |
935 | #ifdef CAPTURE_DEBUG |
936 | printk("done\n"); | |
937 | #endif | |
938 | } | |
939 | } | |
940 | ||
941 | void smp_release(void) | |
942 | { | |
943 | if (atomic_dec_and_test(&smp_capture_depth)) { | |
944 | #ifdef CAPTURE_DEBUG | |
945 | printk("CPU[%d]: Giving pardon to " | |
946 | "imprisoned penguins\n", | |
947 | smp_processor_id()); | |
948 | #endif | |
949 | penguins_are_doing_time = 0; | |
4f07118f | 950 | membar_storeload_storestore(); |
1da177e4 LT |
951 | atomic_dec(&smp_capture_registry); |
952 | } | |
953 | } | |
954 | ||
955 | /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they | |
956 | * can service tlb flush xcalls... | |
957 | */ | |
958 | extern void prom_world(int); | |
959 | extern void save_alternate_globals(unsigned long *); | |
960 | extern void restore_alternate_globals(unsigned long *); | |
961 | void smp_penguin_jailcell(int irq, struct pt_regs *regs) | |
962 | { | |
963 | unsigned long global_save[24]; | |
964 | ||
965 | clear_softint(1 << irq); | |
966 | ||
967 | preempt_disable(); | |
968 | ||
969 | __asm__ __volatile__("flushw"); | |
970 | save_alternate_globals(global_save); | |
971 | prom_world(1); | |
972 | atomic_inc(&smp_capture_registry); | |
4f07118f | 973 | membar_storeload_storestore(); |
1da177e4 | 974 | while (penguins_are_doing_time) |
4f07118f | 975 | rmb(); |
1da177e4 LT |
976 | restore_alternate_globals(global_save); |
977 | atomic_dec(&smp_capture_registry); | |
978 | prom_world(0); | |
979 | ||
980 | preempt_enable(); | |
981 | } | |
982 | ||
983 | extern unsigned long xcall_promstop; | |
984 | ||
985 | void smp_promstop_others(void) | |
986 | { | |
987 | smp_cross_call(&xcall_promstop, 0, 0, 0); | |
988 | } | |
989 | ||
990 | #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier | |
991 | #define prof_counter(__cpu) cpu_data(__cpu).counter | |
992 | ||
993 | void smp_percpu_timer_interrupt(struct pt_regs *regs) | |
994 | { | |
995 | unsigned long compare, tick, pstate; | |
996 | int cpu = smp_processor_id(); | |
997 | int user = user_mode(regs); | |
998 | ||
999 | /* | |
1000 | * Check for level 14 softint. | |
1001 | */ | |
1002 | { | |
1003 | unsigned long tick_mask = tick_ops->softint_mask; | |
1004 | ||
1005 | if (!(get_softint() & tick_mask)) { | |
1006 | extern void handler_irq(int, struct pt_regs *); | |
1007 | ||
1008 | handler_irq(14, regs); | |
1009 | return; | |
1010 | } | |
1011 | clear_softint(tick_mask); | |
1012 | } | |
1013 | ||
1014 | do { | |
1015 | profile_tick(CPU_PROFILING, regs); | |
1016 | if (!--prof_counter(cpu)) { | |
1017 | irq_enter(); | |
1018 | ||
1019 | if (cpu == boot_cpu_id) { | |
1020 | kstat_this_cpu.irqs[0]++; | |
1021 | timer_tick_interrupt(regs); | |
1022 | } | |
1023 | ||
1024 | update_process_times(user); | |
1025 | ||
1026 | irq_exit(); | |
1027 | ||
1028 | prof_counter(cpu) = prof_multiplier(cpu); | |
1029 | } | |
1030 | ||
1031 | /* Guarantee that the following sequences execute | |
1032 | * uninterrupted. | |
1033 | */ | |
1034 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | |
1035 | "wrpr %0, %1, %%pstate" | |
1036 | : "=r" (pstate) | |
1037 | : "i" (PSTATE_IE)); | |
1038 | ||
1039 | compare = tick_ops->add_compare(current_tick_offset); | |
1040 | tick = tick_ops->get_tick(); | |
1041 | ||
1042 | /* Restore PSTATE_IE. */ | |
1043 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
1044 | : /* no outputs */ | |
1045 | : "r" (pstate)); | |
1046 | } while (time_after_eq(tick, compare)); | |
1047 | } | |
1048 | ||
1049 | static void __init smp_setup_percpu_timer(void) | |
1050 | { | |
1051 | int cpu = smp_processor_id(); | |
1052 | unsigned long pstate; | |
1053 | ||
1054 | prof_counter(cpu) = prof_multiplier(cpu) = 1; | |
1055 | ||
1056 | /* Guarantee that the following sequences execute | |
1057 | * uninterrupted. | |
1058 | */ | |
1059 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | |
1060 | "wrpr %0, %1, %%pstate" | |
1061 | : "=r" (pstate) | |
1062 | : "i" (PSTATE_IE)); | |
1063 | ||
1064 | tick_ops->init_tick(current_tick_offset); | |
1065 | ||
1066 | /* Restore PSTATE_IE. */ | |
1067 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | |
1068 | : /* no outputs */ | |
1069 | : "r" (pstate)); | |
1070 | } | |
1071 | ||
1072 | void __init smp_tick_init(void) | |
1073 | { | |
1074 | boot_cpu_id = hard_smp_processor_id(); | |
1075 | current_tick_offset = timer_tick_offset; | |
1076 | ||
1077 | cpu_set(boot_cpu_id, cpu_online_map); | |
1078 | prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1; | |
1079 | } | |
1080 | ||
1081 | /* /proc/profile writes can call this, don't __init it please. */ | |
1082 | static DEFINE_SPINLOCK(prof_setup_lock); | |
1083 | ||
1084 | int setup_profiling_timer(unsigned int multiplier) | |
1085 | { | |
1086 | unsigned long flags; | |
1087 | int i; | |
1088 | ||
1089 | if ((!multiplier) || (timer_tick_offset / multiplier) < 1000) | |
1090 | return -EINVAL; | |
1091 | ||
1092 | spin_lock_irqsave(&prof_setup_lock, flags); | |
1093 | for (i = 0; i < NR_CPUS; i++) | |
1094 | prof_multiplier(i) = multiplier; | |
1095 | current_tick_offset = (timer_tick_offset / multiplier); | |
1096 | spin_unlock_irqrestore(&prof_setup_lock, flags); | |
1097 | ||
1098 | return 0; | |
1099 | } | |
1100 | ||
1101 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
1102 | { | |
1103 | int instance, mid; | |
1104 | ||
1105 | instance = 0; | |
1106 | while (!cpu_find_by_instance(instance, NULL, &mid)) { | |
1107 | if (mid < max_cpus) | |
1108 | cpu_set(mid, phys_cpu_present_map); | |
1109 | instance++; | |
1110 | } | |
1111 | ||
1112 | if (num_possible_cpus() > max_cpus) { | |
1113 | instance = 0; | |
1114 | while (!cpu_find_by_instance(instance, NULL, &mid)) { | |
1115 | if (mid != boot_cpu_id) { | |
1116 | cpu_clear(mid, phys_cpu_present_map); | |
1117 | if (num_possible_cpus() <= max_cpus) | |
1118 | break; | |
1119 | } | |
1120 | instance++; | |
1121 | } | |
1122 | } | |
1123 | ||
1124 | smp_store_cpu_info(boot_cpu_id); | |
1125 | } | |
1126 | ||
1127 | void __devinit smp_prepare_boot_cpu(void) | |
1128 | { | |
1129 | if (hard_smp_processor_id() >= NR_CPUS) { | |
1130 | prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); | |
1131 | prom_halt(); | |
1132 | } | |
1133 | ||
1134 | current_thread_info()->cpu = hard_smp_processor_id(); | |
1135 | ||
1136 | cpu_set(smp_processor_id(), cpu_online_map); | |
1137 | cpu_set(smp_processor_id(), phys_cpu_present_map); | |
1138 | } | |
1139 | ||
1140 | int __devinit __cpu_up(unsigned int cpu) | |
1141 | { | |
1142 | int ret = smp_boot_one_cpu(cpu); | |
1143 | ||
1144 | if (!ret) { | |
1145 | cpu_set(cpu, smp_commenced_mask); | |
1146 | while (!cpu_isset(cpu, cpu_online_map)) | |
1147 | mb(); | |
1148 | if (!cpu_isset(cpu, cpu_online_map)) { | |
1149 | ret = -ENODEV; | |
1150 | } else { | |
1151 | smp_synchronize_one_tick(cpu); | |
1152 | } | |
1153 | } | |
1154 | return ret; | |
1155 | } | |
1156 | ||
1157 | void __init smp_cpus_done(unsigned int max_cpus) | |
1158 | { | |
1159 | unsigned long bogosum = 0; | |
1160 | int i; | |
1161 | ||
1162 | for (i = 0; i < NR_CPUS; i++) { | |
1163 | if (cpu_online(i)) | |
1164 | bogosum += cpu_data(i).udelay_val; | |
1165 | } | |
1166 | printk("Total of %ld processors activated " | |
1167 | "(%lu.%02lu BogoMIPS).\n", | |
1168 | (long) num_online_cpus(), | |
1169 | bogosum/(500000/HZ), | |
1170 | (bogosum/(5000/HZ))%100); | |
1171 | } | |
1172 | ||
1173 | /* This needn't do anything as we do not sleep the cpu | |
1174 | * inside of the idler task, so an interrupt is not needed | |
1175 | * to get a clean fast response. | |
1176 | * | |
1177 | * XXX Reverify this assumption... -DaveM | |
1178 | * | |
1179 | * Addendum: We do want it to do something for the signal | |
1180 | * delivery case, we detect that by just seeing | |
1181 | * if we are trying to send this to an idler or not. | |
1182 | */ | |
1183 | void smp_send_reschedule(int cpu) | |
1184 | { | |
1185 | if (cpu_data(cpu).idle_volume == 0) | |
1186 | smp_receive_signal(cpu); | |
1187 | } | |
1188 | ||
1189 | /* This is a nop because we capture all other cpus | |
1190 | * anyways when making the PROM active. | |
1191 | */ | |
1192 | void smp_send_stop(void) | |
1193 | { | |
1194 | } | |
1195 | ||
d369ddd2 DM |
1196 | unsigned long __per_cpu_base __read_mostly; |
1197 | unsigned long __per_cpu_shift __read_mostly; | |
1da177e4 LT |
1198 | |
1199 | EXPORT_SYMBOL(__per_cpu_base); | |
1200 | EXPORT_SYMBOL(__per_cpu_shift); | |
1201 | ||
1202 | void __init setup_per_cpu_areas(void) | |
1203 | { | |
1204 | unsigned long goal, size, i; | |
1205 | char *ptr; | |
1206 | /* Created by linker magic */ | |
1207 | extern char __per_cpu_start[], __per_cpu_end[]; | |
1208 | ||
1209 | /* Copy section for each CPU (we discard the original) */ | |
1210 | goal = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE); | |
1211 | ||
1212 | #ifdef CONFIG_MODULES | |
1213 | if (goal < PERCPU_ENOUGH_ROOM) | |
1214 | goal = PERCPU_ENOUGH_ROOM; | |
1215 | #endif | |
1216 | __per_cpu_shift = 0; | |
1217 | for (size = 1UL; size < goal; size <<= 1UL) | |
1218 | __per_cpu_shift++; | |
1219 | ||
1220 | /* Make sure the resulting __per_cpu_base value | |
1221 | * will fit in the 43-bit sign extended IMMU | |
1222 | * TSB register. | |
1223 | */ | |
1224 | ptr = __alloc_bootmem(size * NR_CPUS, PAGE_SIZE, | |
1225 | (unsigned long) __per_cpu_start); | |
1226 | ||
1227 | __per_cpu_base = ptr - __per_cpu_start; | |
1228 | ||
1229 | if ((__per_cpu_shift < PAGE_SHIFT) || | |
1230 | (__per_cpu_base & ~PAGE_MASK) || | |
1231 | (__per_cpu_base != (((long) __per_cpu_base << 20) >> 20))) { | |
1232 | prom_printf("PER_CPU: Invalid layout, " | |
1233 | "ptr[%p] shift[%lx] base[%lx]\n", | |
1234 | ptr, __per_cpu_shift, __per_cpu_base); | |
1235 | prom_halt(); | |
1236 | } | |
1237 | ||
1238 | for (i = 0; i < NR_CPUS; i++, ptr += size) | |
1239 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | |
1240 | ||
1241 | /* Finally, load in the boot cpu's base value. | |
1242 | * We abuse the IMMU TSB register for trap handler | |
1243 | * entry and exit loading of %g5. That is why it | |
1244 | * has to be page aligned. | |
1245 | */ | |
1246 | cpu_setup_percpu_base(hard_smp_processor_id()); | |
1247 | } |