[SPARC64]: Make core and sibling groups equal on UltraSPARC-IV.
[linux-2.6-block.git] / arch / sparc64 / kernel / smp.c
CommitLineData
1da177e4
LT
1/* smp.c: Sparc64 SMP support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pagemap.h>
11#include <linux/threads.h>
12#include <linux/smp.h>
1da177e4
LT
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/delay.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/fs.h>
19#include <linux/seq_file.h>
20#include <linux/cache.h>
21#include <linux/jiffies.h>
22#include <linux/profile.h>
23#include <linux/bootmem.h>
24
25#include <asm/head.h>
26#include <asm/ptrace.h>
27#include <asm/atomic.h>
28#include <asm/tlbflush.h>
29#include <asm/mmu_context.h>
30#include <asm/cpudata.h>
31
32#include <asm/irq.h>
6d24c8dc 33#include <asm/irq_regs.h>
1da177e4
LT
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/oplib.h>
37#include <asm/uaccess.h>
38#include <asm/timer.h>
39#include <asm/starfire.h>
40#include <asm/tlb.h>
56fb4df6 41#include <asm/sections.h>
07f8e5f3 42#include <asm/prom.h>
5cbc3073 43#include <asm/mdesc.h>
1da177e4 44
1da177e4
LT
45extern void calibrate_delay(void);
46
47/* Please don't make this stuff initdata!!! --DaveM */
777a4475 48unsigned char boot_cpu_id;
1da177e4 49
c12a8289
AM
50cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
51cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
8935dced
DM
52cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
53 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
f78eae2e
DM
54cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
55 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
1da177e4
LT
56static cpumask_t smp_commenced_mask;
57static cpumask_t cpu_callout_map;
58
59void smp_info(struct seq_file *m)
60{
61 int i;
62
63 seq_printf(m, "State:\n");
394e3902
AM
64 for_each_online_cpu(i)
65 seq_printf(m, "CPU%d:\t\tonline\n", i);
1da177e4
LT
66}
67
68void smp_bogo(struct seq_file *m)
69{
70 int i;
71
394e3902
AM
72 for_each_online_cpu(i)
73 seq_printf(m,
74 "Cpu%dBogo\t: %lu.%02lu\n"
75 "Cpu%dClkTck\t: %016lx\n",
76 i, cpu_data(i).udelay_val / (500000/HZ),
77 (cpu_data(i).udelay_val / (5000/HZ)) % 100,
78 i, cpu_data(i).clock_tick);
1da177e4
LT
79}
80
112f4871 81extern void setup_sparc64_timer(void);
1da177e4
LT
82
83static volatile unsigned long callin_flag = 0;
84
1da177e4
LT
85void __init smp_callin(void)
86{
87 int cpuid = hard_smp_processor_id();
88
56fb4df6 89 __local_per_cpu_offset = __per_cpu_offset(cpuid);
1da177e4 90
4a07e646 91 if (tlb_type == hypervisor)
490384e7 92 sun4v_ktsb_register();
481295f9 93
56fb4df6 94 __flush_tlb_all();
1da177e4 95
112f4871 96 setup_sparc64_timer();
1da177e4 97
816242da
DM
98 if (cheetah_pcache_forced_on)
99 cheetah_enable_pcache();
100
1da177e4
LT
101 local_irq_enable();
102
103 calibrate_delay();
5cbc3073 104 cpu_data(cpuid).udelay_val = loops_per_jiffy;
1da177e4
LT
105 callin_flag = 1;
106 __asm__ __volatile__("membar #Sync\n\t"
107 "flush %%g6" : : : "memory");
108
109 /* Clear this or we will die instantly when we
110 * schedule back to this idler...
111 */
db7d9a4e 112 current_thread_info()->new_child = 0;
1da177e4
LT
113
114 /* Attach to the address space of init_task. */
115 atomic_inc(&init_mm.mm_count);
116 current->active_mm = &init_mm;
117
118 while (!cpu_isset(cpuid, smp_commenced_mask))
4f07118f 119 rmb();
1da177e4
LT
120
121 cpu_set(cpuid, cpu_online_map);
5bfb5d69
NP
122
123 /* idle thread is expected to have preempt disabled */
124 preempt_disable();
1da177e4
LT
125}
126
127void cpu_panic(void)
128{
129 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
130 panic("SMP bolixed\n");
131}
132
1da177e4
LT
133/* This tick register synchronization scheme is taken entirely from
134 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
135 *
136 * The only change I've made is to rework it so that the master
137 * initiates the synchonization instead of the slave. -DaveM
138 */
139
140#define MASTER 0
141#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
142
143#define NUM_ROUNDS 64 /* magic value */
144#define NUM_ITERS 5 /* likewise */
145
146static DEFINE_SPINLOCK(itc_sync_lock);
147static unsigned long go[SLAVE + 1];
148
149#define DEBUG_TICK_SYNC 0
150
151static inline long get_delta (long *rt, long *master)
152{
153 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
154 unsigned long tcenter, t0, t1, tm;
155 unsigned long i;
156
157 for (i = 0; i < NUM_ITERS; i++) {
158 t0 = tick_ops->get_tick();
159 go[MASTER] = 1;
4f07118f 160 membar_storeload();
1da177e4 161 while (!(tm = go[SLAVE]))
4f07118f 162 rmb();
1da177e4 163 go[SLAVE] = 0;
4f07118f 164 wmb();
1da177e4
LT
165 t1 = tick_ops->get_tick();
166
167 if (t1 - t0 < best_t1 - best_t0)
168 best_t0 = t0, best_t1 = t1, best_tm = tm;
169 }
170
171 *rt = best_t1 - best_t0;
172 *master = best_tm - best_t0;
173
174 /* average best_t0 and best_t1 without overflow: */
175 tcenter = (best_t0/2 + best_t1/2);
176 if (best_t0 % 2 + best_t1 % 2 == 2)
177 tcenter++;
178 return tcenter - best_tm;
179}
180
181void smp_synchronize_tick_client(void)
182{
183 long i, delta, adj, adjust_latency = 0, done = 0;
184 unsigned long flags, rt, master_time_stamp, bound;
185#if DEBUG_TICK_SYNC
186 struct {
187 long rt; /* roundtrip time */
188 long master; /* master's timestamp */
189 long diff; /* difference between midpoint and master's timestamp */
190 long lat; /* estimate of itc adjustment latency */
191 } t[NUM_ROUNDS];
192#endif
193
194 go[MASTER] = 1;
195
196 while (go[MASTER])
4f07118f 197 rmb();
1da177e4
LT
198
199 local_irq_save(flags);
200 {
201 for (i = 0; i < NUM_ROUNDS; i++) {
202 delta = get_delta(&rt, &master_time_stamp);
203 if (delta == 0) {
204 done = 1; /* let's lock on to this... */
205 bound = rt;
206 }
207
208 if (!done) {
209 if (i > 0) {
210 adjust_latency += -delta;
211 adj = -delta + adjust_latency/4;
212 } else
213 adj = -delta;
214
112f4871 215 tick_ops->add_tick(adj);
1da177e4
LT
216 }
217#if DEBUG_TICK_SYNC
218 t[i].rt = rt;
219 t[i].master = master_time_stamp;
220 t[i].diff = delta;
221 t[i].lat = adjust_latency/4;
222#endif
223 }
224 }
225 local_irq_restore(flags);
226
227#if DEBUG_TICK_SYNC
228 for (i = 0; i < NUM_ROUNDS; i++)
229 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
230 t[i].rt, t[i].master, t[i].diff, t[i].lat);
231#endif
232
233 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
234 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
235}
236
237static void smp_start_sync_tick_client(int cpu);
238
239static void smp_synchronize_one_tick(int cpu)
240{
241 unsigned long flags, i;
242
243 go[MASTER] = 0;
244
245 smp_start_sync_tick_client(cpu);
246
247 /* wait for client to be ready */
248 while (!go[MASTER])
4f07118f 249 rmb();
1da177e4
LT
250
251 /* now let the client proceed into his loop */
252 go[MASTER] = 0;
4f07118f 253 membar_storeload();
1da177e4
LT
254
255 spin_lock_irqsave(&itc_sync_lock, flags);
256 {
257 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
258 while (!go[MASTER])
4f07118f 259 rmb();
1da177e4 260 go[MASTER] = 0;
4f07118f 261 wmb();
1da177e4 262 go[SLAVE] = tick_ops->get_tick();
4f07118f 263 membar_storeload();
1da177e4
LT
264 }
265 }
266 spin_unlock_irqrestore(&itc_sync_lock, flags);
267}
268
72aff53f
DM
269extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
270
1da177e4
LT
271extern unsigned long sparc64_cpu_startup;
272
273/* The OBP cpu startup callback truncates the 3rd arg cookie to
274 * 32-bits (I think) so to be safe we have it read the pointer
275 * contained here so we work on >4GB machines. -DaveM
276 */
277static struct thread_info *cpu_new_thread = NULL;
278
279static int __devinit smp_boot_one_cpu(unsigned int cpu)
280{
281 unsigned long entry =
282 (unsigned long)(&sparc64_cpu_startup);
283 unsigned long cookie =
284 (unsigned long)(&cpu_new_thread);
285 struct task_struct *p;
7890f794 286 int timeout, ret;
1da177e4
LT
287
288 p = fork_idle(cpu);
289 callin_flag = 0;
f3169641 290 cpu_new_thread = task_thread_info(p);
1da177e4
LT
291 cpu_set(cpu, cpu_callout_map);
292
7890f794 293 if (tlb_type == hypervisor) {
72aff53f
DM
294 /* Alloc the mondo queues, cpu will load them. */
295 sun4v_init_mondo_queues(0, cpu, 1, 0);
296
7890f794
DM
297 prom_startcpu_cpuid(cpu, entry, cookie);
298 } else {
5cbc3073 299 struct device_node *dp = of_find_node_by_cpuid(cpu);
7890f794 300
07f8e5f3 301 prom_startcpu(dp->node, entry, cookie);
7890f794 302 }
1da177e4
LT
303
304 for (timeout = 0; timeout < 5000000; timeout++) {
305 if (callin_flag)
306 break;
307 udelay(100);
308 }
72aff53f 309
1da177e4
LT
310 if (callin_flag) {
311 ret = 0;
312 } else {
313 printk("Processor %d is stuck.\n", cpu);
314 cpu_clear(cpu, cpu_callout_map);
315 ret = -ENODEV;
316 }
317 cpu_new_thread = NULL;
318
319 return ret;
320}
321
322static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
323{
324 u64 result, target;
325 int stuck, tmp;
326
327 if (this_is_starfire) {
328 /* map to real upaid */
329 cpu = (((cpu & 0x3c) << 1) |
330 ((cpu & 0x40) >> 4) |
331 (cpu & 0x3));
332 }
333
334 target = (cpu << 14) | 0x70;
335again:
336 /* Ok, this is the real Spitfire Errata #54.
337 * One must read back from a UDB internal register
338 * after writes to the UDB interrupt dispatch, but
339 * before the membar Sync for that write.
340 * So we use the high UDB control register (ASI 0x7f,
341 * ADDR 0x20) for the dummy read. -DaveM
342 */
343 tmp = 0x40;
344 __asm__ __volatile__(
345 "wrpr %1, %2, %%pstate\n\t"
346 "stxa %4, [%0] %3\n\t"
347 "stxa %5, [%0+%8] %3\n\t"
348 "add %0, %8, %0\n\t"
349 "stxa %6, [%0+%8] %3\n\t"
350 "membar #Sync\n\t"
351 "stxa %%g0, [%7] %3\n\t"
352 "membar #Sync\n\t"
353 "mov 0x20, %%g1\n\t"
354 "ldxa [%%g1] 0x7f, %%g0\n\t"
355 "membar #Sync"
356 : "=r" (tmp)
357 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
358 "r" (data0), "r" (data1), "r" (data2), "r" (target),
359 "r" (0x10), "0" (tmp)
360 : "g1");
361
362 /* NOTE: PSTATE_IE is still clear. */
363 stuck = 100000;
364 do {
365 __asm__ __volatile__("ldxa [%%g0] %1, %0"
366 : "=r" (result)
367 : "i" (ASI_INTR_DISPATCH_STAT));
368 if (result == 0) {
369 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
370 : : "r" (pstate));
371 return;
372 }
373 stuck -= 1;
374 if (stuck == 0)
375 break;
376 } while (result & 0x1);
377 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
378 : : "r" (pstate));
379 if (stuck == 0) {
380 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
381 smp_processor_id(), result);
382 } else {
383 udelay(2);
384 goto again;
385 }
386}
387
388static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
389{
390 u64 pstate;
391 int i;
392
393 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
394 for_each_cpu_mask(i, mask)
395 spitfire_xcall_helper(data0, data1, data2, pstate, i);
396}
397
398/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
399 * packet, but we have no use for that. However we do take advantage of
400 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
401 */
402static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
403{
404 u64 pstate, ver;
22adb358 405 int nack_busy_id, is_jbus, need_more;
1da177e4
LT
406
407 if (cpus_empty(mask))
408 return;
409
410 /* Unfortunately, someone at Sun had the brilliant idea to make the
411 * busy/nack fields hard-coded by ITID number for this Ultra-III
412 * derivative processor.
413 */
414 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
92704a1c
DM
415 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
416 (ver >> 32) == __SERRANO_ID);
1da177e4
LT
417
418 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
419
420retry:
22adb358 421 need_more = 0;
1da177e4
LT
422 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
423 : : "r" (pstate), "i" (PSTATE_IE));
424
425 /* Setup the dispatch data registers. */
426 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
427 "stxa %1, [%4] %6\n\t"
428 "stxa %2, [%5] %6\n\t"
429 "membar #Sync\n\t"
430 : /* no outputs */
431 : "r" (data0), "r" (data1), "r" (data2),
432 "r" (0x40), "r" (0x50), "r" (0x60),
433 "i" (ASI_INTR_W));
434
435 nack_busy_id = 0;
436 {
437 int i;
438
439 for_each_cpu_mask(i, mask) {
440 u64 target = (i << 14) | 0x70;
441
92704a1c 442 if (!is_jbus)
1da177e4
LT
443 target |= (nack_busy_id << 24);
444 __asm__ __volatile__(
445 "stxa %%g0, [%0] %1\n\t"
446 "membar #Sync\n\t"
447 : /* no outputs */
448 : "r" (target), "i" (ASI_INTR_W));
449 nack_busy_id++;
22adb358
DM
450 if (nack_busy_id == 32) {
451 need_more = 1;
452 break;
453 }
1da177e4
LT
454 }
455 }
456
457 /* Now, poll for completion. */
458 {
459 u64 dispatch_stat;
460 long stuck;
461
462 stuck = 100000 * nack_busy_id;
463 do {
464 __asm__ __volatile__("ldxa [%%g0] %1, %0"
465 : "=r" (dispatch_stat)
466 : "i" (ASI_INTR_DISPATCH_STAT));
467 if (dispatch_stat == 0UL) {
468 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
469 : : "r" (pstate));
22adb358
DM
470 if (unlikely(need_more)) {
471 int i, cnt = 0;
472 for_each_cpu_mask(i, mask) {
473 cpu_clear(i, mask);
474 cnt++;
475 if (cnt == 32)
476 break;
477 }
478 goto retry;
479 }
1da177e4
LT
480 return;
481 }
482 if (!--stuck)
483 break;
484 } while (dispatch_stat & 0x5555555555555555UL);
485
486 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
487 : : "r" (pstate));
488
489 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
490 /* Busy bits will not clear, continue instead
491 * of freezing up on this cpu.
492 */
493 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
494 smp_processor_id(), dispatch_stat);
495 } else {
496 int i, this_busy_nack = 0;
497
498 /* Delay some random time with interrupts enabled
499 * to prevent deadlock.
500 */
501 udelay(2 * nack_busy_id);
502
503 /* Clear out the mask bits for cpus which did not
504 * NACK us.
505 */
506 for_each_cpu_mask(i, mask) {
507 u64 check_mask;
508
92704a1c 509 if (is_jbus)
1da177e4
LT
510 check_mask = (0x2UL << (2*i));
511 else
512 check_mask = (0x2UL <<
513 this_busy_nack);
514 if ((dispatch_stat & check_mask) == 0)
515 cpu_clear(i, mask);
516 this_busy_nack += 2;
22adb358
DM
517 if (this_busy_nack == 64)
518 break;
1da177e4
LT
519 }
520
521 goto retry;
522 }
523 }
524}
525
1d2f1f90 526/* Multi-cpu list version. */
a43fe0e7
DM
527static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
528{
b830ab66
DM
529 struct trap_per_cpu *tb;
530 u16 *cpu_list;
531 u64 *mondo;
532 cpumask_t error_mask;
533 unsigned long flags, status;
3cab0c3e 534 int cnt, retries, this_cpu, prev_sent, i;
b830ab66 535
17f34f0e
DM
536 if (cpus_empty(mask))
537 return;
538
b830ab66
DM
539 /* We have to do this whole thing with interrupts fully disabled.
540 * Otherwise if we send an xcall from interrupt context it will
541 * corrupt both our mondo block and cpu list state.
542 *
543 * One consequence of this is that we cannot use timeout mechanisms
544 * that depend upon interrupts being delivered locally. So, for
545 * example, we cannot sample jiffies and expect it to advance.
546 *
547 * Fortunately, udelay() uses %stick/%tick so we can use that.
548 */
549 local_irq_save(flags);
550
551 this_cpu = smp_processor_id();
552 tb = &trap_block[this_cpu];
1d2f1f90 553
b830ab66 554 mondo = __va(tb->cpu_mondo_block_pa);
1d2f1f90
DM
555 mondo[0] = data0;
556 mondo[1] = data1;
557 mondo[2] = data2;
558 wmb();
559
b830ab66
DM
560 cpu_list = __va(tb->cpu_list_pa);
561
562 /* Setup the initial cpu list. */
563 cnt = 0;
564 for_each_cpu_mask(i, mask)
565 cpu_list[cnt++] = i;
566
567 cpus_clear(error_mask);
1d2f1f90 568 retries = 0;
3cab0c3e 569 prev_sent = 0;
1d2f1f90 570 do {
3cab0c3e 571 int forward_progress, n_sent;
1d2f1f90 572
b830ab66
DM
573 status = sun4v_cpu_mondo_send(cnt,
574 tb->cpu_list_pa,
575 tb->cpu_mondo_block_pa);
576
577 /* HV_EOK means all cpus received the xcall, we're done. */
578 if (likely(status == HV_EOK))
1d2f1f90 579 break;
b830ab66 580
3cab0c3e
DM
581 /* First, see if we made any forward progress.
582 *
583 * The hypervisor indicates successful sends by setting
584 * cpu list entries to the value 0xffff.
b830ab66 585 */
3cab0c3e 586 n_sent = 0;
b830ab66 587 for (i = 0; i < cnt; i++) {
3cab0c3e
DM
588 if (likely(cpu_list[i] == 0xffff))
589 n_sent++;
1d2f1f90
DM
590 }
591
3cab0c3e
DM
592 forward_progress = 0;
593 if (n_sent > prev_sent)
594 forward_progress = 1;
595
596 prev_sent = n_sent;
597
b830ab66
DM
598 /* If we get a HV_ECPUERROR, then one or more of the cpus
599 * in the list are in error state. Use the cpu_state()
600 * hypervisor call to find out which cpus are in error state.
601 */
602 if (unlikely(status == HV_ECPUERROR)) {
603 for (i = 0; i < cnt; i++) {
604 long err;
605 u16 cpu;
606
607 cpu = cpu_list[i];
608 if (cpu == 0xffff)
609 continue;
610
611 err = sun4v_cpu_state(cpu);
612 if (err >= 0 &&
613 err == HV_CPU_STATE_ERROR) {
3cab0c3e 614 cpu_list[i] = 0xffff;
b830ab66
DM
615 cpu_set(cpu, error_mask);
616 }
617 }
618 } else if (unlikely(status != HV_EWOULDBLOCK))
619 goto fatal_mondo_error;
620
3cab0c3e
DM
621 /* Don't bother rewriting the CPU list, just leave the
622 * 0xffff and non-0xffff entries in there and the
623 * hypervisor will do the right thing.
624 *
625 * Only advance timeout state if we didn't make any
626 * forward progress.
627 */
b830ab66
DM
628 if (unlikely(!forward_progress)) {
629 if (unlikely(++retries > 10000))
630 goto fatal_mondo_timeout;
631
632 /* Delay a little bit to let other cpus catch up
633 * on their cpu mondo queue work.
634 */
635 udelay(2 * cnt);
636 }
1d2f1f90
DM
637 } while (1);
638
b830ab66
DM
639 local_irq_restore(flags);
640
641 if (unlikely(!cpus_empty(error_mask)))
642 goto fatal_mondo_cpu_error;
643
644 return;
645
646fatal_mondo_cpu_error:
647 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
648 "were in error state\n",
649 this_cpu);
650 printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
651 for_each_cpu_mask(i, error_mask)
652 printk("%d ", i);
653 printk("]\n");
654 return;
655
656fatal_mondo_timeout:
657 local_irq_restore(flags);
658 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
659 " progress after %d retries.\n",
660 this_cpu, retries);
661 goto dump_cpu_list_and_out;
662
663fatal_mondo_error:
664 local_irq_restore(flags);
665 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
666 this_cpu, status);
667 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
668 "mondo_block_pa(%lx)\n",
669 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
670
671dump_cpu_list_and_out:
672 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
673 for (i = 0; i < cnt; i++)
674 printk("%u ", cpu_list[i]);
675 printk("]\n");
1d2f1f90 676}
a43fe0e7 677
1da177e4
LT
678/* Send cross call to all processors mentioned in MASK
679 * except self.
680 */
681static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
682{
683 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
684 int this_cpu = get_cpu();
685
686 cpus_and(mask, mask, cpu_online_map);
687 cpu_clear(this_cpu, mask);
688
689 if (tlb_type == spitfire)
690 spitfire_xcall_deliver(data0, data1, data2, mask);
a43fe0e7 691 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1da177e4 692 cheetah_xcall_deliver(data0, data1, data2, mask);
a43fe0e7
DM
693 else
694 hypervisor_xcall_deliver(data0, data1, data2, mask);
1da177e4
LT
695 /* NOTE: Caller runs local copy on master. */
696
697 put_cpu();
698}
699
700extern unsigned long xcall_sync_tick;
701
702static void smp_start_sync_tick_client(int cpu)
703{
704 cpumask_t mask = cpumask_of_cpu(cpu);
705
706 smp_cross_call_masked(&xcall_sync_tick,
707 0, 0, 0, mask);
708}
709
710/* Send cross call to all processors except self. */
711#define smp_cross_call(func, ctx, data1, data2) \
712 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
713
714struct call_data_struct {
715 void (*func) (void *info);
716 void *info;
717 atomic_t finished;
718 int wait;
719};
720
aa1d1a0a 721static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
1da177e4
LT
722static struct call_data_struct *call_data;
723
724extern unsigned long xcall_call_function;
725
aa1d1a0a
DM
726/**
727 * smp_call_function(): Run a function on all other CPUs.
728 * @func: The function to run. This must be fast and non-blocking.
729 * @info: An arbitrary pointer to pass to the function.
730 * @nonatomic: currently unused.
731 * @wait: If true, wait (atomically) until function has completed on other CPUs.
732 *
733 * Returns 0 on success, else a negative status code. Does not return until
734 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
735 *
1da177e4
LT
736 * You must not call this function with disabled interrupts or from a
737 * hardware interrupt handler or from a bottom half handler.
738 */
bd40791e
DM
739static int smp_call_function_mask(void (*func)(void *info), void *info,
740 int nonatomic, int wait, cpumask_t mask)
1da177e4
LT
741{
742 struct call_data_struct data;
ee29074d 743 int cpus;
1da177e4 744
1da177e4
LT
745 /* Can deadlock when called with interrupts disabled */
746 WARN_ON(irqs_disabled());
747
748 data.func = func;
749 data.info = info;
750 atomic_set(&data.finished, 0);
751 data.wait = wait;
752
753 spin_lock(&call_lock);
754
ee29074d
DM
755 cpu_clear(smp_processor_id(), mask);
756 cpus = cpus_weight(mask);
757 if (!cpus)
758 goto out_unlock;
759
1da177e4 760 call_data = &data;
aa1d1a0a 761 mb();
1da177e4 762
bd40791e 763 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
1da177e4 764
aa1d1a0a
DM
765 /* Wait for response */
766 while (atomic_read(&data.finished) != cpus)
767 cpu_relax();
1da177e4 768
ee29074d 769out_unlock:
1da177e4
LT
770 spin_unlock(&call_lock);
771
772 return 0;
1da177e4
LT
773}
774
bd40791e
DM
775int smp_call_function(void (*func)(void *info), void *info,
776 int nonatomic, int wait)
777{
778 return smp_call_function_mask(func, info, nonatomic, wait,
779 cpu_online_map);
780}
781
1da177e4
LT
782void smp_call_function_client(int irq, struct pt_regs *regs)
783{
784 void (*func) (void *info) = call_data->func;
785 void *info = call_data->info;
786
787 clear_softint(1 << irq);
788 if (call_data->wait) {
789 /* let initiator proceed only after completion */
790 func(info);
791 atomic_inc(&call_data->finished);
792 } else {
793 /* let initiator proceed after getting data */
794 atomic_inc(&call_data->finished);
795 func(info);
796 }
797}
798
bd40791e
DM
799static void tsb_sync(void *info)
800{
6f25f398 801 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
bd40791e
DM
802 struct mm_struct *mm = info;
803
6f25f398
DM
804 /* It is not valid to test "currrent->active_mm == mm" here.
805 *
806 * The value of "current" is not changed atomically with
807 * switch_mm(). But that's OK, we just need to check the
808 * current cpu's trap block PGD physical address.
809 */
810 if (tp->pgd_paddr == __pa(mm->pgd))
bd40791e
DM
811 tsb_context_switch(mm);
812}
813
814void smp_tsb_sync(struct mm_struct *mm)
815{
816 smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
817}
818
1da177e4
LT
819extern unsigned long xcall_flush_tlb_mm;
820extern unsigned long xcall_flush_tlb_pending;
821extern unsigned long xcall_flush_tlb_kernel_range;
1da177e4
LT
822extern unsigned long xcall_report_regs;
823extern unsigned long xcall_receive_signal;
ee29074d 824extern unsigned long xcall_new_mmu_context_version;
1da177e4
LT
825
826#ifdef DCACHE_ALIASING_POSSIBLE
827extern unsigned long xcall_flush_dcache_page_cheetah;
828#endif
829extern unsigned long xcall_flush_dcache_page_spitfire;
830
831#ifdef CONFIG_DEBUG_DCFLUSH
832extern atomic_t dcpage_flushes;
833extern atomic_t dcpage_flushes_xcall;
834#endif
835
836static __inline__ void __local_flush_dcache_page(struct page *page)
837{
838#ifdef DCACHE_ALIASING_POSSIBLE
839 __flush_dcache_page(page_address(page),
840 ((tlb_type == spitfire) &&
841 page_mapping(page) != NULL));
842#else
843 if (page_mapping(page) != NULL &&
844 tlb_type == spitfire)
845 __flush_icache_page(__pa(page_address(page)));
846#endif
847}
848
849void smp_flush_dcache_page_impl(struct page *page, int cpu)
850{
851 cpumask_t mask = cpumask_of_cpu(cpu);
a43fe0e7
DM
852 int this_cpu;
853
854 if (tlb_type == hypervisor)
855 return;
1da177e4
LT
856
857#ifdef CONFIG_DEBUG_DCFLUSH
858 atomic_inc(&dcpage_flushes);
859#endif
a43fe0e7
DM
860
861 this_cpu = get_cpu();
862
1da177e4
LT
863 if (cpu == this_cpu) {
864 __local_flush_dcache_page(page);
865 } else if (cpu_online(cpu)) {
866 void *pg_addr = page_address(page);
867 u64 data0;
868
869 if (tlb_type == spitfire) {
870 data0 =
871 ((u64)&xcall_flush_dcache_page_spitfire);
872 if (page_mapping(page) != NULL)
873 data0 |= ((u64)1 << 32);
874 spitfire_xcall_deliver(data0,
875 __pa(pg_addr),
876 (u64) pg_addr,
877 mask);
a43fe0e7 878 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1da177e4
LT
879#ifdef DCACHE_ALIASING_POSSIBLE
880 data0 =
881 ((u64)&xcall_flush_dcache_page_cheetah);
882 cheetah_xcall_deliver(data0,
883 __pa(pg_addr),
884 0, mask);
885#endif
886 }
887#ifdef CONFIG_DEBUG_DCFLUSH
888 atomic_inc(&dcpage_flushes_xcall);
889#endif
890 }
891
892 put_cpu();
893}
894
895void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
896{
897 void *pg_addr = page_address(page);
898 cpumask_t mask = cpu_online_map;
899 u64 data0;
a43fe0e7
DM
900 int this_cpu;
901
902 if (tlb_type == hypervisor)
903 return;
904
905 this_cpu = get_cpu();
1da177e4
LT
906
907 cpu_clear(this_cpu, mask);
908
909#ifdef CONFIG_DEBUG_DCFLUSH
910 atomic_inc(&dcpage_flushes);
911#endif
912 if (cpus_empty(mask))
913 goto flush_self;
914 if (tlb_type == spitfire) {
915 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
916 if (page_mapping(page) != NULL)
917 data0 |= ((u64)1 << 32);
918 spitfire_xcall_deliver(data0,
919 __pa(pg_addr),
920 (u64) pg_addr,
921 mask);
a43fe0e7 922 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1da177e4
LT
923#ifdef DCACHE_ALIASING_POSSIBLE
924 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
925 cheetah_xcall_deliver(data0,
926 __pa(pg_addr),
927 0, mask);
928#endif
929 }
930#ifdef CONFIG_DEBUG_DCFLUSH
931 atomic_inc(&dcpage_flushes_xcall);
932#endif
933 flush_self:
934 __local_flush_dcache_page(page);
935
936 put_cpu();
937}
938
a0663a79
DM
939static void __smp_receive_signal_mask(cpumask_t mask)
940{
941 smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
942}
943
1da177e4
LT
944void smp_receive_signal(int cpu)
945{
946 cpumask_t mask = cpumask_of_cpu(cpu);
947
a0663a79
DM
948 if (cpu_online(cpu))
949 __smp_receive_signal_mask(mask);
1da177e4
LT
950}
951
952void smp_receive_signal_client(int irq, struct pt_regs *regs)
ee29074d
DM
953{
954 clear_softint(1 << irq);
955}
956
957void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
1da177e4 958{
a0663a79 959 struct mm_struct *mm;
ee29074d 960 unsigned long flags;
a0663a79 961
1da177e4 962 clear_softint(1 << irq);
a0663a79
DM
963
964 /* See if we need to allocate a new TLB context because
965 * the version of the one we are using is now out of date.
966 */
967 mm = current->active_mm;
ee29074d
DM
968 if (unlikely(!mm || (mm == &init_mm)))
969 return;
a0663a79 970
ee29074d 971 spin_lock_irqsave(&mm->context.lock, flags);
aac0aadf 972
ee29074d
DM
973 if (unlikely(!CTX_VALID(mm->context)))
974 get_new_mmu_context(mm);
aac0aadf 975
ee29074d 976 spin_unlock_irqrestore(&mm->context.lock, flags);
aac0aadf 977
ee29074d
DM
978 load_secondary_context(mm);
979 __flush_tlb_mm(CTX_HWBITS(mm->context),
980 SECONDARY_CONTEXT);
a0663a79
DM
981}
982
983void smp_new_mmu_context_version(void)
984{
ee29074d 985 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
1da177e4
LT
986}
987
988void smp_report_regs(void)
989{
990 smp_cross_call(&xcall_report_regs, 0, 0, 0);
991}
992
1da177e4
LT
993/* We know that the window frames of the user have been flushed
994 * to the stack before we get here because all callers of us
995 * are flush_tlb_*() routines, and these run after flush_cache_*()
996 * which performs the flushw.
997 *
998 * The SMP TLB coherency scheme we use works as follows:
999 *
1000 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1001 * space has (potentially) executed on, this is the heuristic
1002 * we use to avoid doing cross calls.
1003 *
1004 * Also, for flushing from kswapd and also for clones, we
1005 * use cpu_vm_mask as the list of cpus to make run the TLB.
1006 *
1007 * 2) TLB context numbers are shared globally across all processors
1008 * in the system, this allows us to play several games to avoid
1009 * cross calls.
1010 *
1011 * One invariant is that when a cpu switches to a process, and
1012 * that processes tsk->active_mm->cpu_vm_mask does not have the
1013 * current cpu's bit set, that tlb context is flushed locally.
1014 *
1015 * If the address space is non-shared (ie. mm->count == 1) we avoid
1016 * cross calls when we want to flush the currently running process's
1017 * tlb state. This is done by clearing all cpu bits except the current
1018 * processor's in current->active_mm->cpu_vm_mask and performing the
1019 * flush locally only. This will force any subsequent cpus which run
1020 * this task to flush the context from the local tlb if the process
1021 * migrates to another cpu (again).
1022 *
1023 * 3) For shared address spaces (threads) and swapping we bite the
1024 * bullet for most cases and perform the cross call (but only to
1025 * the cpus listed in cpu_vm_mask).
1026 *
1027 * The performance gain from "optimizing" away the cross call for threads is
1028 * questionable (in theory the big win for threads is the massive sharing of
1029 * address space state across processors).
1030 */
62dbec78
DM
1031
1032/* This currently is only used by the hugetlb arch pre-fault
1033 * hook on UltraSPARC-III+ and later when changing the pagesize
1034 * bits of the context register for an address space.
1035 */
1da177e4
LT
1036void smp_flush_tlb_mm(struct mm_struct *mm)
1037{
62dbec78
DM
1038 u32 ctx = CTX_HWBITS(mm->context);
1039 int cpu = get_cpu();
1da177e4 1040
62dbec78
DM
1041 if (atomic_read(&mm->mm_users) == 1) {
1042 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1043 goto local_flush_and_out;
1044 }
1da177e4 1045
62dbec78
DM
1046 smp_cross_call_masked(&xcall_flush_tlb_mm,
1047 ctx, 0, 0,
1048 mm->cpu_vm_mask);
1da177e4 1049
62dbec78
DM
1050local_flush_and_out:
1051 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1da177e4 1052
62dbec78 1053 put_cpu();
1da177e4
LT
1054}
1055
1056void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1057{
1058 u32 ctx = CTX_HWBITS(mm->context);
1059 int cpu = get_cpu();
1060
dedeb002 1061 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
1da177e4 1062 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
dedeb002
HD
1063 else
1064 smp_cross_call_masked(&xcall_flush_tlb_pending,
1065 ctx, nr, (unsigned long) vaddrs,
1066 mm->cpu_vm_mask);
1da177e4 1067
1da177e4
LT
1068 __flush_tlb_pending(ctx, nr, vaddrs);
1069
1070 put_cpu();
1071}
1072
1073void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1074{
1075 start &= PAGE_MASK;
1076 end = PAGE_ALIGN(end);
1077 if (start != end) {
1078 smp_cross_call(&xcall_flush_tlb_kernel_range,
1079 0, start, end);
1080
1081 __flush_tlb_kernel_range(start, end);
1082 }
1083}
1084
1085/* CPU capture. */
1086/* #define CAPTURE_DEBUG */
1087extern unsigned long xcall_capture;
1088
1089static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1090static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1091static unsigned long penguins_are_doing_time;
1092
1093void smp_capture(void)
1094{
1095 int result = atomic_add_ret(1, &smp_capture_depth);
1096
1097 if (result == 1) {
1098 int ncpus = num_online_cpus();
1099
1100#ifdef CAPTURE_DEBUG
1101 printk("CPU[%d]: Sending penguins to jail...",
1102 smp_processor_id());
1103#endif
1104 penguins_are_doing_time = 1;
4f07118f 1105 membar_storestore_loadstore();
1da177e4
LT
1106 atomic_inc(&smp_capture_registry);
1107 smp_cross_call(&xcall_capture, 0, 0, 0);
1108 while (atomic_read(&smp_capture_registry) != ncpus)
4f07118f 1109 rmb();
1da177e4
LT
1110#ifdef CAPTURE_DEBUG
1111 printk("done\n");
1112#endif
1113 }
1114}
1115
1116void smp_release(void)
1117{
1118 if (atomic_dec_and_test(&smp_capture_depth)) {
1119#ifdef CAPTURE_DEBUG
1120 printk("CPU[%d]: Giving pardon to "
1121 "imprisoned penguins\n",
1122 smp_processor_id());
1123#endif
1124 penguins_are_doing_time = 0;
4f07118f 1125 membar_storeload_storestore();
1da177e4
LT
1126 atomic_dec(&smp_capture_registry);
1127 }
1128}
1129
1130/* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1131 * can service tlb flush xcalls...
1132 */
1133extern void prom_world(int);
96c6e0d8 1134
1da177e4
LT
1135void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1136{
1da177e4
LT
1137 clear_softint(1 << irq);
1138
1139 preempt_disable();
1140
1141 __asm__ __volatile__("flushw");
1da177e4
LT
1142 prom_world(1);
1143 atomic_inc(&smp_capture_registry);
4f07118f 1144 membar_storeload_storestore();
1da177e4 1145 while (penguins_are_doing_time)
4f07118f 1146 rmb();
1da177e4
LT
1147 atomic_dec(&smp_capture_registry);
1148 prom_world(0);
1149
1150 preempt_enable();
1151}
1152
1da177e4
LT
1153void __init smp_tick_init(void)
1154{
1155 boot_cpu_id = hard_smp_processor_id();
1da177e4
LT
1156}
1157
1158/* /proc/profile writes can call this, don't __init it please. */
1da177e4
LT
1159int setup_profiling_timer(unsigned int multiplier)
1160{
777a4475 1161 return -EINVAL;
1da177e4
LT
1162}
1163
9145bcf6
DM
1164static void __init smp_tune_scheduling(void)
1165{
5cbc3073
DM
1166 unsigned int smallest = ~0U;
1167 int i;
9145bcf6 1168
5cbc3073
DM
1169 for (i = 0; i < NR_CPUS; i++) {
1170 unsigned int val = cpu_data(i).ecache_size;
9145bcf6 1171
5cbc3073 1172 if (val && val < smallest)
9145bcf6 1173 smallest = val;
9145bcf6
DM
1174 }
1175
1176 /* Any value less than 256K is nonsense. */
1177 if (smallest < (256U * 1024U))
1178 smallest = 256 * 1024;
1179
1180 max_cache_size = smallest;
1181
1182 if (smallest < 1U * 1024U * 1024U)
1183 printk(KERN_INFO "Using max_cache_size of %uKB\n",
1184 smallest / 1024U);
1185 else
1186 printk(KERN_INFO "Using max_cache_size of %uMB\n",
1187 smallest / 1024U / 1024U);
1188}
1189
7abea921 1190/* Constrain the number of cpus to max_cpus. */
1da177e4
LT
1191void __init smp_prepare_cpus(unsigned int max_cpus)
1192{
8935dced
DM
1193 int i;
1194
1da177e4 1195 if (num_possible_cpus() > max_cpus) {
5cbc3073
DM
1196 for_each_possible_cpu(i) {
1197 if (i != boot_cpu_id) {
1198 cpu_clear(i, phys_cpu_present_map);
1199 cpu_clear(i, cpu_present_map);
1da177e4
LT
1200 if (num_possible_cpus() <= max_cpus)
1201 break;
1202 }
8935dced
DM
1203 }
1204 }
1205
5cbc3073 1206 cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy;
9145bcf6 1207 smp_tune_scheduling();
1da177e4
LT
1208}
1209
5cbc3073 1210void __devinit smp_prepare_boot_cpu(void)
7abea921 1211{
7abea921
DM
1212}
1213
5cbc3073 1214void __devinit smp_fill_in_sib_core_maps(void)
1da177e4 1215{
5cbc3073
DM
1216 unsigned int i;
1217
1218 for_each_possible_cpu(i) {
1219 unsigned int j;
1220
1221 if (cpu_data(i).core_id == 0) {
f78eae2e 1222 cpu_set(i, cpu_core_map[i]);
5cbc3073
DM
1223 continue;
1224 }
1225
1226 for_each_possible_cpu(j) {
1227 if (cpu_data(i).core_id ==
1228 cpu_data(j).core_id)
f78eae2e
DM
1229 cpu_set(j, cpu_core_map[i]);
1230 }
1231 }
1232
1233 for_each_possible_cpu(i) {
1234 unsigned int j;
1235
1236 if (cpu_data(i).proc_id == -1) {
1237 cpu_set(i, cpu_sibling_map[i]);
1238 continue;
1239 }
1240
1241 for_each_possible_cpu(j) {
1242 if (cpu_data(i).proc_id ==
1243 cpu_data(j).proc_id)
5cbc3073
DM
1244 cpu_set(j, cpu_sibling_map[i]);
1245 }
1246 }
1da177e4
LT
1247}
1248
b282b6f8 1249int __cpuinit __cpu_up(unsigned int cpu)
1da177e4
LT
1250{
1251 int ret = smp_boot_one_cpu(cpu);
1252
1253 if (!ret) {
1254 cpu_set(cpu, smp_commenced_mask);
1255 while (!cpu_isset(cpu, cpu_online_map))
1256 mb();
1257 if (!cpu_isset(cpu, cpu_online_map)) {
1258 ret = -ENODEV;
1259 } else {
02fead75
DM
1260 /* On SUN4V, writes to %tick and %stick are
1261 * not allowed.
1262 */
1263 if (tlb_type != hypervisor)
1264 smp_synchronize_one_tick(cpu);
1da177e4
LT
1265 }
1266 }
1267 return ret;
1268}
1269
1270void __init smp_cpus_done(unsigned int max_cpus)
1271{
1272 unsigned long bogosum = 0;
1273 int i;
1274
394e3902
AM
1275 for_each_online_cpu(i)
1276 bogosum += cpu_data(i).udelay_val;
1da177e4
LT
1277 printk("Total of %ld processors activated "
1278 "(%lu.%02lu BogoMIPS).\n",
1279 (long) num_online_cpus(),
1280 bogosum/(500000/HZ),
1281 (bogosum/(5000/HZ))%100);
1282}
1283
1da177e4
LT
1284void smp_send_reschedule(int cpu)
1285{
64c7c8f8 1286 smp_receive_signal(cpu);
1da177e4
LT
1287}
1288
1289/* This is a nop because we capture all other cpus
1290 * anyways when making the PROM active.
1291 */
1292void smp_send_stop(void)
1293{
1294}
1295
d369ddd2
DM
1296unsigned long __per_cpu_base __read_mostly;
1297unsigned long __per_cpu_shift __read_mostly;
1da177e4
LT
1298
1299EXPORT_SYMBOL(__per_cpu_base);
1300EXPORT_SYMBOL(__per_cpu_shift);
1301
5cbc3073 1302void __init real_setup_per_cpu_areas(void)
1da177e4
LT
1303{
1304 unsigned long goal, size, i;
1305 char *ptr;
1da177e4
LT
1306
1307 /* Copy section for each CPU (we discard the original) */
5a089006
DM
1308 goal = PERCPU_ENOUGH_ROOM;
1309
b6e3590f
JF
1310 __per_cpu_shift = PAGE_SHIFT;
1311 for (size = PAGE_SIZE; size < goal; size <<= 1UL)
1da177e4
LT
1312 __per_cpu_shift++;
1313
b6e3590f 1314 ptr = alloc_bootmem_pages(size * NR_CPUS);
1da177e4
LT
1315
1316 __per_cpu_base = ptr - __per_cpu_start;
1317
1da177e4
LT
1318 for (i = 0; i < NR_CPUS; i++, ptr += size)
1319 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
951bc82c
DM
1320
1321 /* Setup %g5 for the boot cpu. */
1322 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1da177e4 1323}