powerpc/book3s64/keys/kuap: Reset AMR/IAMR values on kexec
[linux-block.git] / arch / powerpc / kernel / smp.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * SMP support for ppc.
4 *
5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
6 * deal of code from the sparc and intel versions.
7 *
8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 *
10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
1da177e4
LT
12 */
13
14#undef DEBUG
15
1da177e4 16#include <linux/kernel.h>
4b16f8e2 17#include <linux/export.h>
68e21be2 18#include <linux/sched/mm.h>
678c668a 19#include <linux/sched/task_stack.h>
105ab3d8 20#include <linux/sched/topology.h>
1da177e4
LT
21#include <linux/smp.h>
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/spinlock.h>
26#include <linux/cache.h>
27#include <linux/err.h>
8a25a2fd 28#include <linux/device.h>
1da177e4
LT
29#include <linux/cpu.h>
30#include <linux/notifier.h>
4b703a23 31#include <linux/topology.h>
665e87ff 32#include <linux/profile.h>
4e287e65 33#include <linux/processor.h>
7241d26e 34#include <linux/random.h>
b6aeddea 35#include <linux/stackprotector.h>
65fddcfc 36#include <linux/pgtable.h>
1da177e4
LT
37
38#include <asm/ptrace.h>
60063497 39#include <linux/atomic.h>
1da177e4 40#include <asm/irq.h>
1b67bee1 41#include <asm/hw_irq.h>
441c19c8 42#include <asm/kvm_ppc.h>
b866cc21 43#include <asm/dbell.h>
1da177e4 44#include <asm/page.h>
1da177e4
LT
45#include <asm/prom.h>
46#include <asm/smp.h>
1da177e4
LT
47#include <asm/time.h>
48#include <asm/machdep.h>
e2075f79 49#include <asm/cputhreads.h>
1da177e4 50#include <asm/cputable.h>
bbeb3f4c 51#include <asm/mpic.h>
a7f290da 52#include <asm/vdso_datapage.h>
5ad57078
PM
53#ifdef CONFIG_PPC64
54#include <asm/paca.h>
55#endif
18ad51dd 56#include <asm/vdso.h>
ae3a197e 57#include <asm/debug.h>
1217d34b 58#include <asm/kexec.h>
42f5b4ca 59#include <asm/asm-prototypes.h>
b92a226e 60#include <asm/cpu_has_feature.h>
d1039786 61#include <asm/ftrace.h>
5ad57078 62
1da177e4 63#ifdef DEBUG
f9e4ec57 64#include <asm/udbg.h>
1da177e4
LT
65#define DBG(fmt...) udbg_printf(fmt)
66#else
67#define DBG(fmt...)
68#endif
69
c56e5853 70#ifdef CONFIG_HOTPLUG_CPU
fb82b839
BH
71/* State of each CPU during hotplug phases */
72static DEFINE_PER_CPU(int, cpu_state) = { 0 };
c56e5853
BH
73#endif
74
7c19c2e5 75struct task_struct *secondary_current;
425752c6 76bool has_big_cores;
f9e4ec57 77
cc1ba8ea 78DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
425752c6 79DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
2a636a56 80DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
cc1ba8ea 81DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
1da177e4 82
d5a7430d 83EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
2a636a56 84EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
440a0857 85EXPORT_PER_CPU_SYMBOL(cpu_core_map);
425752c6
GS
86EXPORT_SYMBOL_GPL(has_big_cores);
87
88#define MAX_THREAD_LIST_SIZE 8
89#define THREAD_GROUP_SHARE_L1 1
90struct thread_groups {
91 unsigned int property;
92 unsigned int nr_groups;
93 unsigned int threads_per_group;
94 unsigned int thread_list[MAX_THREAD_LIST_SIZE];
95};
96
97/*
98 * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
99 * the set its siblings that share the L1-cache.
100 */
101DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
1da177e4 102
5ad57078 103/* SMP operations for this machine */
1da177e4
LT
104struct smp_ops_t *smp_ops;
105
7ccbe504
BH
106/* Can't be static due to PowerMac hackery */
107volatile unsigned int cpu_callin_map[NR_CPUS];
1da177e4 108
1da177e4
LT
109int smt_enabled_at_boot = 1;
110
3cd85250
AF
111/*
112 * Returns 1 if the specified cpu should be brought up during boot.
113 * Used to inhibit booting threads if they've been disabled or
114 * limited on the command line
115 */
116int smp_generic_cpu_bootable(unsigned int nr)
117{
118 /* Special case - we inhibit secondary thread startup
119 * during boot if the user requests it.
120 */
a8fcfc19 121 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
3cd85250
AF
122 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
123 return 0;
124 if (smt_enabled_at_boot
125 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
126 return 0;
127 }
128
129 return 1;
130}
131
132
5ad57078 133#ifdef CONFIG_PPC64
cad5cef6 134int smp_generic_kick_cpu(int nr)
1da177e4 135{
c642af9c 136 if (nr < 0 || nr >= nr_cpu_ids)
f8d0d5dc 137 return -EINVAL;
1da177e4
LT
138
139 /*
140 * The processor is currently spinning, waiting for the
141 * cpu_start field to become non-zero After we set cpu_start,
142 * the processor will continue on to secondary_start
143 */
d2e60075
NP
144 if (!paca_ptrs[nr]->cpu_start) {
145 paca_ptrs[nr]->cpu_start = 1;
fb82b839
BH
146 smp_mb();
147 return 0;
148 }
149
150#ifdef CONFIG_HOTPLUG_CPU
151 /*
152 * Ok it's not there, so it might be soft-unplugged, let's
153 * try to bring it back
154 */
ae5cab47 155 generic_set_cpu_up(nr);
fb82b839
BH
156 smp_wmb();
157 smp_send_reschedule(nr);
158#endif /* CONFIG_HOTPLUG_CPU */
de300974
ME
159
160 return 0;
1da177e4 161}
fb82b839 162#endif /* CONFIG_PPC64 */
1da177e4 163
25ddd738
MM
164static irqreturn_t call_function_action(int irq, void *data)
165{
166 generic_smp_call_function_interrupt();
167 return IRQ_HANDLED;
168}
169
170static irqreturn_t reschedule_action(int irq, void *data)
171{
184748cc 172 scheduler_ipi();
25ddd738
MM
173 return IRQ_HANDLED;
174}
175
bc907113 176#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
1b67bee1 177static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
25ddd738 178{
3f984620 179 timer_broadcast_interrupt();
25ddd738
MM
180 return IRQ_HANDLED;
181}
bc907113 182#endif
25ddd738 183
ddd703ca
NP
184#ifdef CONFIG_NMI_IPI
185static irqreturn_t nmi_ipi_action(int irq, void *data)
25ddd738 186{
ddd703ca 187 smp_handle_nmi_ipi(get_irq_regs());
25ddd738
MM
188 return IRQ_HANDLED;
189}
ddd703ca 190#endif
25ddd738
MM
191
192static irq_handler_t smp_ipi_action[] = {
193 [PPC_MSG_CALL_FUNCTION] = call_function_action,
194 [PPC_MSG_RESCHEDULE] = reschedule_action,
bc907113 195#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
1b67bee1 196 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
bc907113 197#endif
ddd703ca
NP
198#ifdef CONFIG_NMI_IPI
199 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
200#endif
25ddd738
MM
201};
202
ddd703ca
NP
203/*
204 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
205 * than going through the call function infrastructure, and strongly
206 * serialized, so it is more appropriate for debugging.
207 */
25ddd738
MM
208const char *smp_ipi_name[] = {
209 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
210 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
bc907113 211#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
1b67bee1 212 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
bc907113 213#endif
21bfd6a8 214#ifdef CONFIG_NMI_IPI
ddd703ca 215 [PPC_MSG_NMI_IPI] = "nmi ipi",
21bfd6a8 216#endif
25ddd738
MM
217};
218
219/* optional function to request ipi, for controllers with >= 4 ipis */
220int smp_request_message_ipi(int virq, int msg)
221{
222 int err;
223
ddd703ca 224 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
25ddd738 225 return -EINVAL;
ddd703ca
NP
226#ifndef CONFIG_NMI_IPI
227 if (msg == PPC_MSG_NMI_IPI)
25ddd738 228 return 1;
25ddd738 229#endif
ddd703ca 230
3b5e16d7 231 err = request_irq(virq, smp_ipi_action[msg],
e6651de9 232 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
b0d436c7 233 smp_ipi_name[msg], NULL);
25ddd738
MM
234 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
235 virq, smp_ipi_name[msg], err);
236
237 return err;
238}
239
1ece355b 240#ifdef CONFIG_PPC_SMP_MUXED_IPI
23d72bfd 241struct cpu_messages {
bd7f561f 242 long messages; /* current messages */
23d72bfd
MM
243};
244static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
245
31639c77 246void smp_muxed_ipi_set_message(int cpu, int msg)
23d72bfd
MM
247{
248 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
71454272 249 char *message = (char *)&info->messages;
23d72bfd 250
9fb1b36c
PM
251 /*
252 * Order previous accesses before accesses in the IPI handler.
253 */
254 smp_mb();
71454272 255 message[msg] = 1;
31639c77
SW
256}
257
258void smp_muxed_ipi_message_pass(int cpu, int msg)
259{
31639c77 260 smp_muxed_ipi_set_message(cpu, msg);
b866cc21 261
9fb1b36c
PM
262 /*
263 * cause_ipi functions are required to include a full barrier
264 * before doing whatever causes the IPI.
265 */
b866cc21 266 smp_ops->cause_ipi(cpu);
23d72bfd
MM
267}
268
0654de1c 269#ifdef __BIG_ENDIAN__
bd7f561f 270#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
0654de1c 271#else
bd7f561f 272#define IPI_MESSAGE(A) (1uL << (8 * (A)))
0654de1c
AB
273#endif
274
23d72bfd
MM
275irqreturn_t smp_ipi_demux(void)
276{
23d72bfd 277 mb(); /* order any irq clear */
71454272 278
b87ac021
NP
279 return smp_ipi_demux_relaxed();
280}
281
282/* sync-free variant. Callers should ensure synchronization */
283irqreturn_t smp_ipi_demux_relaxed(void)
23d72bfd 284{
b866cc21 285 struct cpu_messages *info;
bd7f561f 286 unsigned long all;
23d72bfd 287
b866cc21 288 info = this_cpu_ptr(&ipi_message);
71454272 289 do {
9fb1b36c 290 all = xchg(&info->messages, 0);
e17769eb
SW
291#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
292 /*
293 * Must check for PPC_MSG_RM_HOST_ACTION messages
294 * before PPC_MSG_CALL_FUNCTION messages because when
295 * a VM is destroyed, we call kick_all_cpus_sync()
296 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
297 * messages have completed before we free any VCPUs.
298 */
299 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
300 kvmppc_xics_ipi_action();
301#endif
0654de1c 302 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
23d72bfd 303 generic_smp_call_function_interrupt();
0654de1c 304 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
880102e7 305 scheduler_ipi();
bc907113 306#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
1b67bee1 307 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
3f984620 308 timer_broadcast_interrupt();
bc907113 309#endif
ddd703ca
NP
310#ifdef CONFIG_NMI_IPI
311 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
312 nmi_ipi_action(0, NULL);
313#endif
71454272
MM
314 } while (info->messages);
315
23d72bfd
MM
316 return IRQ_HANDLED;
317}
1ece355b 318#endif /* CONFIG_PPC_SMP_MUXED_IPI */
23d72bfd 319
9ca980dc
PM
320static inline void do_message_pass(int cpu, int msg)
321{
322 if (smp_ops->message_pass)
323 smp_ops->message_pass(cpu, msg);
324#ifdef CONFIG_PPC_SMP_MUXED_IPI
325 else
326 smp_muxed_ipi_message_pass(cpu, msg);
327#endif
328}
329
1da177e4
LT
330void smp_send_reschedule(int cpu)
331{
8cffc6ac 332 if (likely(smp_ops))
9ca980dc 333 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
1da177e4 334}
de56a948 335EXPORT_SYMBOL_GPL(smp_send_reschedule);
1da177e4 336
b7d7a240
JA
337void arch_send_call_function_single_ipi(int cpu)
338{
402d9a1e 339 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
b7d7a240
JA
340}
341
f063ea02 342void arch_send_call_function_ipi_mask(const struct cpumask *mask)
b7d7a240
JA
343{
344 unsigned int cpu;
345
f063ea02 346 for_each_cpu(cpu, mask)
9ca980dc 347 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
b7d7a240
JA
348}
349
ddd703ca
NP
350#ifdef CONFIG_NMI_IPI
351
352/*
353 * "NMI IPI" system.
354 *
355 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
356 * a running system. They can be used for crash, debug, halt/reboot, etc.
357 *
ddd703ca 358 * The IPI call waits with interrupts disabled until all targets enter the
88b9a3d1
NP
359 * NMI handler, then returns. Subsequent IPIs can be issued before targets
360 * have returned from their handlers, so there is no guarantee about
361 * concurrency or re-entrancy.
ddd703ca 362 *
88b9a3d1 363 * A new NMI can be issued before all targets exit the handler.
ddd703ca
NP
364 *
365 * The IPI call may time out without all targets entering the NMI handler.
366 * In that case, there is some logic to recover (and ignore subsequent
367 * NMI interrupts that may eventually be raised), but the platform interrupt
368 * handler may not be able to distinguish this from other exception causes,
369 * which may cause a crash.
370 */
371
372static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
373static struct cpumask nmi_ipi_pending_mask;
88b9a3d1 374static bool nmi_ipi_busy = false;
ddd703ca
NP
375static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
376
377static void nmi_ipi_lock_start(unsigned long *flags)
378{
379 raw_local_irq_save(*flags);
380 hard_irq_disable();
381 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
382 raw_local_irq_restore(*flags);
0459ddfd 383 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
ddd703ca
NP
384 raw_local_irq_save(*flags);
385 hard_irq_disable();
386 }
387}
388
389static void nmi_ipi_lock(void)
390{
391 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
0459ddfd 392 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
ddd703ca
NP
393}
394
395static void nmi_ipi_unlock(void)
396{
397 smp_mb();
398 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
399 atomic_set(&__nmi_ipi_lock, 0);
400}
401
402static void nmi_ipi_unlock_end(unsigned long *flags)
403{
404 nmi_ipi_unlock();
405 raw_local_irq_restore(*flags);
406}
407
408/*
409 * Platform NMI handler calls this to ack
410 */
411int smp_handle_nmi_ipi(struct pt_regs *regs)
412{
88b9a3d1 413 void (*fn)(struct pt_regs *) = NULL;
ddd703ca
NP
414 unsigned long flags;
415 int me = raw_smp_processor_id();
416 int ret = 0;
417
418 /*
419 * Unexpected NMIs are possible here because the interrupt may not
420 * be able to distinguish NMI IPIs from other types of NMIs, or
421 * because the caller may have timed out.
422 */
423 nmi_ipi_lock_start(&flags);
88b9a3d1
NP
424 if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
425 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
426 fn = READ_ONCE(nmi_ipi_function);
427 WARN_ON_ONCE(!fn);
428 ret = 1;
429 }
ddd703ca
NP
430 nmi_ipi_unlock_end(&flags);
431
88b9a3d1
NP
432 if (fn)
433 fn(regs);
434
ddd703ca
NP
435 return ret;
436}
437
6ba55716 438static void do_smp_send_nmi_ipi(int cpu, bool safe)
ddd703ca 439{
6ba55716 440 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
c64af645
NP
441 return;
442
ddd703ca
NP
443 if (cpu >= 0) {
444 do_message_pass(cpu, PPC_MSG_NMI_IPI);
445 } else {
446 int c;
447
448 for_each_online_cpu(c) {
449 if (c == raw_smp_processor_id())
450 continue;
451 do_message_pass(c, PPC_MSG_NMI_IPI);
452 }
453 }
454}
455
456/*
457 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
458 * - fn is the target callback function.
459 * - delay_us > 0 is the delay before giving up waiting for targets to
88b9a3d1 460 * begin executing the handler, == 0 specifies indefinite delay.
ddd703ca 461 */
6fe243fe
NP
462static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
463 u64 delay_us, bool safe)
ddd703ca
NP
464{
465 unsigned long flags;
466 int me = raw_smp_processor_id();
467 int ret = 1;
468
469 BUG_ON(cpu == me);
470 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
471
472 if (unlikely(!smp_ops))
473 return 0;
474
ddd703ca 475 nmi_ipi_lock_start(&flags);
88b9a3d1 476 while (nmi_ipi_busy) {
ddd703ca 477 nmi_ipi_unlock_end(&flags);
88b9a3d1 478 spin_until_cond(!nmi_ipi_busy);
ddd703ca
NP
479 nmi_ipi_lock_start(&flags);
480 }
88b9a3d1 481 nmi_ipi_busy = true;
ddd703ca
NP
482 nmi_ipi_function = fn;
483
88b9a3d1
NP
484 WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
485
ddd703ca
NP
486 if (cpu < 0) {
487 /* ALL_OTHERS */
488 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
489 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
490 } else {
ddd703ca
NP
491 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
492 }
88b9a3d1 493
ddd703ca
NP
494 nmi_ipi_unlock();
495
88b9a3d1
NP
496 /* Interrupts remain hard disabled */
497
6ba55716 498 do_smp_send_nmi_ipi(cpu, safe);
ddd703ca 499
5b73151f 500 nmi_ipi_lock();
88b9a3d1 501 /* nmi_ipi_busy is set here, so unlock/lock is okay */
ddd703ca 502 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
5b73151f 503 nmi_ipi_unlock();
ddd703ca 504 udelay(1);
5b73151f
NP
505 nmi_ipi_lock();
506 if (delay_us) {
507 delay_us--;
508 if (!delay_us)
88b9a3d1 509 break;
5b73151f
NP
510 }
511 }
512
ddd703ca 513 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
5b73151f 514 /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
ddd703ca
NP
515 ret = 0;
516 cpumask_clear(&nmi_ipi_pending_mask);
517 }
5b73151f 518
88b9a3d1
NP
519 nmi_ipi_function = NULL;
520 nmi_ipi_busy = false;
521
ddd703ca
NP
522 nmi_ipi_unlock_end(&flags);
523
524 return ret;
525}
6ba55716
ME
526
527int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
528{
529 return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
530}
531
532int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
533{
534 return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
535}
ddd703ca
NP
536#endif /* CONFIG_NMI_IPI */
537
1b67bee1
SB
538#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
539void tick_broadcast(const struct cpumask *mask)
540{
541 unsigned int cpu;
542
543 for_each_cpu(cpu, mask)
544 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
545}
546#endif
547
ddd703ca
NP
548#ifdef CONFIG_DEBUGGER
549void debugger_ipi_callback(struct pt_regs *regs)
1da177e4 550{
ddd703ca
NP
551 debugger_ipi(regs);
552}
e0476371 553
ddd703ca
NP
554void smp_send_debugger_break(void)
555{
556 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
1da177e4
LT
557}
558#endif
559
da665885 560#ifdef CONFIG_KEXEC_CORE
cc532915
ME
561void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
562{
4145f358
BS
563 int cpu;
564
ddd703ca 565 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
4145f358
BS
566 if (kdump_in_progress() && crash_wake_offline) {
567 for_each_present_cpu(cpu) {
568 if (cpu_online(cpu))
569 continue;
570 /*
571 * crash_ipi_callback will wait for
572 * all cpus, including offline CPUs.
573 * We don't care about nmi_ipi_function.
574 * Offline cpus will jump straight into
575 * crash_ipi_callback, we can skip the
576 * entire NMI dance and waiting for
577 * cpus to clear pending mask, etc.
578 */
6ba55716 579 do_smp_send_nmi_ipi(cpu, false);
4145f358
BS
580 }
581 }
cc532915
ME
582}
583#endif
584
ac61c115
NP
585#ifdef CONFIG_NMI_IPI
586static void nmi_stop_this_cpu(struct pt_regs *regs)
587{
588 /*
6029755e 589 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
ac61c115 590 */
6029755e
NP
591 spin_begin();
592 while (1)
593 spin_cpu_relax();
ac61c115 594}
ac61c115 595
8fd7675c
SS
596void smp_send_stop(void)
597{
ac61c115 598 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
6029755e
NP
599}
600
601#else /* CONFIG_NMI_IPI */
602
603static void stop_this_cpu(void *dummy)
604{
6029755e
NP
605 hard_irq_disable();
606 spin_begin();
607 while (1)
608 spin_cpu_relax();
609}
610
611void smp_send_stop(void)
612{
613 static bool stopped = false;
614
615 /*
616 * Prevent waiting on csd lock from a previous smp_send_stop.
617 * This is racy, but in general callers try to do the right
618 * thing and only fire off one smp_send_stop (e.g., see
619 * kernel/panic.c)
620 */
621 if (stopped)
622 return;
623
624 stopped = true;
625
8691e5a8 626 smp_call_function(stop_this_cpu, NULL, 0);
1da177e4 627}
6029755e 628#endif /* CONFIG_NMI_IPI */
1da177e4 629
7c19c2e5 630struct task_struct *current_set[NR_CPUS];
1da177e4 631
cad5cef6 632static void smp_store_cpu_info(int id)
1da177e4 633{
6b7487fc 634 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
3160b097
BB
635#ifdef CONFIG_PPC_FSL_BOOK3E
636 per_cpu(next_tlbcam_idx, id)
637 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
638#endif
1da177e4
LT
639}
640
df52f671
OH
641/*
642 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
643 * rather than just passing around the cpumask we pass around a function that
644 * returns the that cpumask for the given CPU.
645 */
646static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
647{
648 cpumask_set_cpu(i, get_cpumask(j));
649 cpumask_set_cpu(j, get_cpumask(i));
650}
651
652#ifdef CONFIG_HOTPLUG_CPU
653static void set_cpus_unrelated(int i, int j,
654 struct cpumask *(*get_cpumask)(int))
655{
656 cpumask_clear_cpu(i, get_cpumask(j));
657 cpumask_clear_cpu(j, get_cpumask(i));
658}
659#endif
660
425752c6
GS
661/*
662 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
663 * property for the CPU device node @dn and stores
664 * the parsed output in the thread_groups
665 * structure @tg if the ibm,thread-groups[0]
666 * matches @property.
667 *
668 * @dn: The device node of the CPU device.
669 * @tg: Pointer to a thread group structure into which the parsed
670 * output of "ibm,thread-groups" is stored.
671 * @property: The property of the thread-group that the caller is
672 * interested in.
673 *
674 * ibm,thread-groups[0..N-1] array defines which group of threads in
675 * the CPU-device node can be grouped together based on the property.
676 *
677 * ibm,thread-groups[0] tells us the property based on which the
678 * threads are being grouped together. If this value is 1, it implies
679 * that the threads in the same group share L1, translation cache.
680 *
681 * ibm,thread-groups[1] tells us how many such thread groups exist.
682 *
683 * ibm,thread-groups[2] tells us the number of threads in each such
684 * group.
685 *
686 * ibm,thread-groups[3..N-1] is the list of threads identified by
687 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
688 * the grouping.
689 *
690 * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it
691 * implies that there are 2 groups of 4 threads each, where each group
692 * of threads share L1, translation cache.
693 *
694 * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8}
695 * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10,
696 * 11, 12} structure
697 *
698 * Returns 0 on success, -EINVAL if the property does not exist,
699 * -ENODATA if property does not have a value, and -EOVERFLOW if the
700 * property data isn't large enough.
701 */
702static int parse_thread_groups(struct device_node *dn,
703 struct thread_groups *tg,
704 unsigned int property)
705{
706 int i;
707 u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
708 u32 *thread_list;
709 size_t total_threads;
710 int ret;
711
712 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
713 thread_group_array, 3);
714 if (ret)
715 return ret;
716
717 tg->property = thread_group_array[0];
718 tg->nr_groups = thread_group_array[1];
719 tg->threads_per_group = thread_group_array[2];
720 if (tg->property != property ||
721 tg->nr_groups < 1 ||
722 tg->threads_per_group < 1)
723 return -ENODATA;
724
725 total_threads = tg->nr_groups * tg->threads_per_group;
726
727 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
728 thread_group_array,
729 3 + total_threads);
730 if (ret)
731 return ret;
732
733 thread_list = &thread_group_array[3];
734
735 for (i = 0 ; i < total_threads; i++)
736 tg->thread_list[i] = thread_list[i];
737
738 return 0;
739}
740
741/*
742 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
743 * that @cpu belongs to.
744 *
745 * @cpu : The logical CPU whose thread group is being searched.
746 * @tg : The thread-group structure of the CPU node which @cpu belongs
747 * to.
748 *
749 * Returns the index to tg->thread_list that points to the the start
750 * of the thread_group that @cpu belongs to.
751 *
752 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
753 * tg->thread_list.
754 */
755static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
756{
757 int hw_cpu_id = get_hard_smp_processor_id(cpu);
758 int i, j;
759
760 for (i = 0; i < tg->nr_groups; i++) {
761 int group_start = i * tg->threads_per_group;
762
763 for (j = 0; j < tg->threads_per_group; j++) {
764 int idx = group_start + j;
765
766 if (tg->thread_list[idx] == hw_cpu_id)
767 return group_start;
768 }
769 }
770
771 return -1;
772}
773
774static int init_cpu_l1_cache_map(int cpu)
775
776{
777 struct device_node *dn = of_get_cpu_node(cpu, NULL);
778 struct thread_groups tg = {.property = 0,
779 .nr_groups = 0,
780 .threads_per_group = 0};
781 int first_thread = cpu_first_thread_sibling(cpu);
782 int i, cpu_group_start = -1, err = 0;
783
784 if (!dn)
785 return -ENODATA;
786
787 err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
788 if (err)
789 goto out;
790
791 zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
792 GFP_KERNEL,
793 cpu_to_node(cpu));
794
795 cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
796
797 if (unlikely(cpu_group_start == -1)) {
798 WARN_ON_ONCE(1);
799 err = -ENODATA;
800 goto out;
801 }
802
803 for (i = first_thread; i < first_thread + threads_per_core; i++) {
804 int i_group_start = get_cpu_thread_group_start(i, &tg);
805
806 if (unlikely(i_group_start == -1)) {
807 WARN_ON_ONCE(1);
808 err = -ENODATA;
809 goto out;
810 }
811
812 if (i_group_start == cpu_group_start)
813 cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
814 }
815
816out:
817 of_node_put(dn);
818 return err;
819}
820
821static int init_big_cores(void)
822{
823 int cpu;
824
825 for_each_possible_cpu(cpu) {
826 int err = init_cpu_l1_cache_map(cpu);
827
828 if (err)
829 return err;
830
831 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
832 GFP_KERNEL,
833 cpu_to_node(cpu));
834 }
835
836 has_big_cores = true;
837 return 0;
838}
839
1da177e4
LT
840void __init smp_prepare_cpus(unsigned int max_cpus)
841{
842 unsigned int cpu;
843
844 DBG("smp_prepare_cpus\n");
845
846 /*
847 * setup_cpu may need to be called on the boot cpu. We havent
848 * spun any cpus up but lets be paranoid.
849 */
850 BUG_ON(boot_cpuid != smp_processor_id());
851
852 /* Fixup boot cpu */
853 smp_store_cpu_info(boot_cpuid);
854 cpu_callin_map[boot_cpuid] = 1;
855
cc1ba8ea
AB
856 for_each_possible_cpu(cpu) {
857 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
858 GFP_KERNEL, cpu_to_node(cpu));
2a636a56
OH
859 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
860 GFP_KERNEL, cpu_to_node(cpu));
cc1ba8ea
AB
861 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
862 GFP_KERNEL, cpu_to_node(cpu));
2fabf084
NA
863 /*
864 * numa_node_id() works after this.
865 */
bc3c4327
LZ
866 if (cpu_present(cpu)) {
867 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
868 set_cpu_numa_mem(cpu,
869 local_memory_node(numa_cpu_lookup_table[cpu]));
870 }
cc1ba8ea
AB
871 }
872
df52f671 873 /* Init the cpumasks so the boot CPU is related to itself */
cc1ba8ea 874 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
2a636a56 875 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
cc1ba8ea
AB
876 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
877
425752c6
GS
878 init_big_cores();
879 if (has_big_cores) {
880 cpumask_set_cpu(boot_cpuid,
881 cpu_smallcore_mask(boot_cpuid));
882 }
883
dfee0efe
CG
884 if (smp_ops && smp_ops->probe)
885 smp_ops->probe();
1da177e4
LT
886}
887
cad5cef6 888void smp_prepare_boot_cpu(void)
1da177e4
LT
889{
890 BUG_ON(smp_processor_id() != boot_cpuid);
5ad57078 891#ifdef CONFIG_PPC64
d2e60075 892 paca_ptrs[boot_cpuid]->__current = current;
5ad57078 893#endif
8c272261 894 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
7c19c2e5 895 current_set[boot_cpuid] = current;
1da177e4
LT
896}
897
898#ifdef CONFIG_HOTPLUG_CPU
1da177e4
LT
899
900int generic_cpu_disable(void)
901{
902 unsigned int cpu = smp_processor_id();
903
904 if (cpu == boot_cpuid)
905 return -EBUSY;
906
ea0f1cab 907 set_cpu_online(cpu, false);
799d6046 908#ifdef CONFIG_PPC64
a7f290da 909 vdso_data->processorCount--;
094fe2e7 910#endif
a978e139
BH
911 /* Update affinity of all IRQs previously aimed at this CPU */
912 irq_migrate_all_off_this_cpu();
913
687b8f24
ME
914 /*
915 * Depending on the details of the interrupt controller, it's possible
916 * that one of the interrupts we just migrated away from this CPU is
917 * actually already pending on this CPU. If we leave it in that state
918 * the interrupt will never be EOI'ed, and will never fire again. So
919 * temporarily enable interrupts here, to allow any pending interrupt to
920 * be received (and EOI'ed), before we take this CPU offline.
921 */
a978e139
BH
922 local_irq_enable();
923 mdelay(1);
924 local_irq_disable();
925
1da177e4
LT
926 return 0;
927}
928
1da177e4
LT
929void generic_cpu_die(unsigned int cpu)
930{
931 int i;
932
933 for (i = 0; i < 100; i++) {
0d8d4d42 934 smp_rmb();
2f4f1f81 935 if (is_cpu_dead(cpu))
1da177e4
LT
936 return;
937 msleep(100);
938 }
939 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
940}
941
105765f4
BH
942void generic_set_cpu_dead(unsigned int cpu)
943{
944 per_cpu(cpu_state, cpu) = CPU_DEAD;
945}
fb82b839 946
ae5cab47
ZC
947/*
948 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
949 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
950 * which makes the delay in generic_cpu_die() not happen.
951 */
952void generic_set_cpu_up(unsigned int cpu)
953{
954 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
955}
956
fb82b839
BH
957int generic_check_cpu_restart(unsigned int cpu)
958{
959 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
960}
512691d4 961
2f4f1f81 962int is_cpu_dead(unsigned int cpu)
963{
964 return per_cpu(cpu_state, cpu) == CPU_DEAD;
965}
966
441c19c8 967static bool secondaries_inhibited(void)
512691d4 968{
441c19c8 969 return kvm_hv_mode_active();
512691d4
PM
970}
971
972#else /* HOTPLUG_CPU */
973
974#define secondaries_inhibited() 0
975
1da177e4
LT
976#endif
977
17e32eac 978static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
c56e5853 979{
c56e5853 980#ifdef CONFIG_PPC64
d2e60075 981 paca_ptrs[cpu]->__current = idle;
678c668a
CL
982 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
983 THREAD_SIZE - STACK_FRAME_OVERHEAD;
c56e5853 984#endif
ed1cd6de 985 idle->cpu = cpu;
7c19c2e5 986 secondary_current = current_set[cpu] = idle;
c56e5853
BH
987}
988
061d19f2 989int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1da177e4 990{
c56e5853 991 int rc, c;
1da177e4 992
512691d4
PM
993 /*
994 * Don't allow secondary threads to come online if inhibited
995 */
996 if (threads_per_core > 1 && secondaries_inhibited() &&
6f5e40a3 997 cpu_thread_in_subcore(cpu))
512691d4
PM
998 return -EBUSY;
999
8cffc6ac
BH
1000 if (smp_ops == NULL ||
1001 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1da177e4
LT
1002 return -EINVAL;
1003
17e32eac 1004 cpu_idle_thread_init(cpu, tidle);
c560bbce 1005
14d4ae5c
BH
1006 /*
1007 * The platform might need to allocate resources prior to bringing
1008 * up the CPU
1009 */
1010 if (smp_ops->prepare_cpu) {
1011 rc = smp_ops->prepare_cpu(cpu);
1012 if (rc)
1013 return rc;
1014 }
1015
1da177e4
LT
1016 /* Make sure callin-map entry is 0 (can be leftover a CPU
1017 * hotplug
1018 */
1019 cpu_callin_map[cpu] = 0;
1020
1021 /* The information for processor bringup must
1022 * be written out to main store before we release
1023 * the processor.
1024 */
0d8d4d42 1025 smp_mb();
1da177e4
LT
1026
1027 /* wake up cpus */
1028 DBG("smp: kicking cpu %d\n", cpu);
de300974
ME
1029 rc = smp_ops->kick_cpu(cpu);
1030 if (rc) {
1031 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1032 return rc;
1033 }
1da177e4
LT
1034
1035 /*
1036 * wait to see if the cpu made a callin (is actually up).
1037 * use this value that I found through experimentation.
1038 * -- Cort
1039 */
1040 if (system_state < SYSTEM_RUNNING)
ee0339f2 1041 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1da177e4
LT
1042 udelay(100);
1043#ifdef CONFIG_HOTPLUG_CPU
1044 else
1045 /*
1046 * CPUs can take much longer to come up in the
1047 * hotplug case. Wait five seconds.
1048 */
67764263
GS
1049 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1050 msleep(1);
1da177e4
LT
1051#endif
1052
1053 if (!cpu_callin_map[cpu]) {
6685a477 1054 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1da177e4
LT
1055 return -ENOENT;
1056 }
1057
6685a477 1058 DBG("Processor %u found.\n", cpu);
1da177e4
LT
1059
1060 if (smp_ops->give_timebase)
1061 smp_ops->give_timebase();
1062
875ebe94 1063 /* Wait until cpu puts itself in the online & active maps */
4e287e65 1064 spin_until_cond(cpu_online(cpu));
1da177e4
LT
1065
1066 return 0;
1067}
1068
e9efed3b
NL
1069/* Return the value of the reg property corresponding to the given
1070 * logical cpu.
1071 */
1072int cpu_to_core_id(int cpu)
1073{
1074 struct device_node *np;
f8a1883a 1075 const __be32 *reg;
e9efed3b
NL
1076 int id = -1;
1077
1078 np = of_get_cpu_node(cpu, NULL);
1079 if (!np)
1080 goto out;
1081
1082 reg = of_get_property(np, "reg", NULL);
1083 if (!reg)
1084 goto out;
1085
f8a1883a 1086 id = be32_to_cpup(reg);
e9efed3b
NL
1087out:
1088 of_node_put(np);
1089 return id;
1090}
f8ab4810 1091EXPORT_SYMBOL_GPL(cpu_to_core_id);
e9efed3b 1092
99d86705
VS
1093/* Helper routines for cpu to core mapping */
1094int cpu_core_index_of_thread(int cpu)
1095{
1096 return cpu >> threads_shift;
1097}
1098EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1099
1100int cpu_first_thread_of_core(int core)
1101{
1102 return core << threads_shift;
1103}
1104EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1105
104699c0 1106/* Must be called when no change can occur to cpu_present_mask,
440a0857
NL
1107 * i.e. during cpu online or offline.
1108 */
1109static struct device_node *cpu_to_l2cache(int cpu)
1110{
1111 struct device_node *np;
b2ea25b9 1112 struct device_node *cache;
440a0857
NL
1113
1114 if (!cpu_present(cpu))
1115 return NULL;
1116
1117 np = of_get_cpu_node(cpu, NULL);
1118 if (np == NULL)
1119 return NULL;
1120
b2ea25b9
NL
1121 cache = of_find_next_cache_node(np);
1122
440a0857
NL
1123 of_node_put(np);
1124
b2ea25b9 1125 return cache;
440a0857 1126}
1da177e4 1127
df52f671 1128static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
a8a5356c 1129{
256f2d4b 1130 struct device_node *l2_cache, *np;
e3d8b67e 1131 int i;
256f2d4b 1132
a8a5356c 1133 l2_cache = cpu_to_l2cache(cpu);
df52f671
OH
1134 if (!l2_cache)
1135 return false;
1136
1137 for_each_cpu(i, cpu_online_mask) {
1138 /*
1139 * when updating the marks the current CPU has not been marked
1140 * online, but we need to update the cache masks
1141 */
256f2d4b 1142 np = cpu_to_l2cache(i);
a8a5356c
PM
1143 if (!np)
1144 continue;
df52f671
OH
1145
1146 if (np == l2_cache)
1147 set_cpus_related(cpu, i, mask_fn);
1148
a8a5356c
PM
1149 of_node_put(np);
1150 }
1151 of_node_put(l2_cache);
df52f671
OH
1152
1153 return true;
1154}
1155
1156#ifdef CONFIG_HOTPLUG_CPU
1157static void remove_cpu_from_masks(int cpu)
1158{
1159 int i;
1160
1161 /* NB: cpu_core_mask is a superset of the others */
1162 for_each_cpu(i, cpu_core_mask(cpu)) {
1163 set_cpus_unrelated(cpu, i, cpu_core_mask);
2a636a56 1164 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
df52f671 1165 set_cpus_unrelated(cpu, i, cpu_sibling_mask);
425752c6
GS
1166 if (has_big_cores)
1167 set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
df52f671
OH
1168 }
1169}
1170#endif
1171
425752c6
GS
1172static inline void add_cpu_to_smallcore_masks(int cpu)
1173{
1174 struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu);
1175 int i, first_thread = cpu_first_thread_sibling(cpu);
1176
1177 if (!has_big_cores)
1178 return;
1179
1180 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1181
1182 for (i = first_thread; i < first_thread + threads_per_core; i++) {
1183 if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map))
1184 set_cpus_related(i, cpu, cpu_smallcore_mask);
1185 }
1186}
1187
a05f0e5b
SD
1188int get_physical_package_id(int cpu)
1189{
1190 int pkg_id = cpu_to_chip_id(cpu);
1191
a05f0e5b
SD
1192 /*
1193 * If the platform is PowerNV or Guest on KVM, ibm,chip-id is
1194 * defined. Hence we would return the chip-id as the result of
1195 * get_physical_package_id.
1196 */
c72e8da0
ME
1197 if (pkg_id == -1 && firmware_has_feature(FW_FEATURE_LPAR) &&
1198 IS_ENABLED(CONFIG_PPC_SPLPAR)) {
a05f0e5b 1199 struct device_node *np = of_get_cpu_node(cpu, NULL);
4b4d181d
ME
1200 pkg_id = of_node_to_nid(np);
1201 of_node_put(np);
a05f0e5b 1202 }
a05f0e5b
SD
1203
1204 return pkg_id;
1205}
1206EXPORT_SYMBOL_GPL(get_physical_package_id);
1207
df52f671
OH
1208static void add_cpu_to_masks(int cpu)
1209{
1210 int first_thread = cpu_first_thread_sibling(cpu);
a05f0e5b 1211 int pkg_id = get_physical_package_id(cpu);
df52f671
OH
1212 int i;
1213
1214 /*
1215 * This CPU will not be in the online mask yet so we need to manually
1216 * add it to it's own thread sibling mask.
1217 */
1218 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1219
1220 for (i = first_thread; i < first_thread + threads_per_core; i++)
1221 if (cpu_online(i))
1222 set_cpus_related(i, cpu, cpu_sibling_mask);
1223
425752c6 1224 add_cpu_to_smallcore_masks(cpu);
df52f671 1225 /*
2a636a56
OH
1226 * Copy the thread sibling mask into the cache sibling mask
1227 * and mark any CPUs that share an L2 with this CPU.
df52f671
OH
1228 */
1229 for_each_cpu(i, cpu_sibling_mask(cpu))
2a636a56
OH
1230 set_cpus_related(cpu, i, cpu_l2_cache_mask);
1231 update_mask_by_l2(cpu, cpu_l2_cache_mask);
1232
1233 /*
1234 * Copy the cache sibling mask into core sibling mask and mark
1235 * any CPUs on the same chip as this CPU.
1236 */
1237 for_each_cpu(i, cpu_l2_cache_mask(cpu))
df52f671
OH
1238 set_cpus_related(cpu, i, cpu_core_mask);
1239
a05f0e5b 1240 if (pkg_id == -1)
df52f671 1241 return;
df52f671
OH
1242
1243 for_each_cpu(i, cpu_online_mask)
a05f0e5b 1244 if (get_physical_package_id(i) == pkg_id)
df52f671 1245 set_cpus_related(cpu, i, cpu_core_mask);
a8a5356c
PM
1246}
1247
96d91431
OH
1248static bool shared_caches;
1249
1da177e4 1250/* Activate a secondary processor. */
061d19f2 1251void start_secondary(void *unused)
1da177e4
LT
1252{
1253 unsigned int cpu = smp_processor_id();
8e8a31d7 1254 struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1da177e4 1255
f1f10076 1256 mmgrab(&init_mm);
1da177e4
LT
1257 current->active_mm = &init_mm;
1258
1259 smp_store_cpu_info(cpu);
5ad57078 1260 set_dec(tb_ticks_per_jiffy);
e4d76e1c 1261 preempt_disable();
1be6f10f 1262 cpu_callin_map[cpu] = 1;
1da177e4 1263
757cbd46
KG
1264 if (smp_ops->setup_cpu)
1265 smp_ops->setup_cpu(cpu);
1da177e4
LT
1266 if (smp_ops->take_timebase)
1267 smp_ops->take_timebase();
1268
d831d0b8
TB
1269 secondary_cpu_time_init();
1270
aeeafbfa
BH
1271#ifdef CONFIG_PPC64
1272 if (system_state == SYSTEM_RUNNING)
1273 vdso_data->processorCount++;
18ad51dd
AB
1274
1275 vdso_getcpu_init();
aeeafbfa 1276#endif
df52f671
OH
1277 /* Update topology CPU masks */
1278 add_cpu_to_masks(cpu);
1da177e4 1279
8e8a31d7
GS
1280 if (has_big_cores)
1281 sibling_mask = cpu_smallcore_mask;
96d91431
OH
1282 /*
1283 * Check for any shared caches. Note that this must be done on a
1284 * per-core basis because one core in the pair might be disabled.
1285 */
8e8a31d7 1286 if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
96d91431
OH
1287 shared_caches = true;
1288
bc3c4327
LZ
1289 set_numa_node(numa_cpu_lookup_table[cpu]);
1290 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1291
cce606fe
LZ
1292 smp_wmb();
1293 notify_cpu_starting(cpu);
1294 set_cpu_online(cpu, true);
1295
b6aeddea
ME
1296 boot_init_stack_canary();
1297
1da177e4
LT
1298 local_irq_enable();
1299
d1039786
NR
1300 /* We can enable ftrace for secondary cpus now */
1301 this_cpu_enable_ftrace();
1302
fc6d73d6 1303 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
fa3f82c8
BH
1304
1305 BUG();
1da177e4
LT
1306}
1307
1308int setup_profiling_timer(unsigned int multiplier)
1309{
1310 return 0;
1311}
1312
607b45e9
VG
1313#ifdef CONFIG_SCHED_SMT
1314/* cpumask of CPUs with asymetric SMT dependancy */
b6220ad6 1315static int powerpc_smt_flags(void)
607b45e9 1316{
5d4dfddd 1317 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
607b45e9
VG
1318
1319 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1320 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1321 flags |= SD_ASYM_PACKING;
1322 }
1323 return flags;
1324}
1325#endif
1326
1327static struct sched_domain_topology_level powerpc_topology[] = {
1328#ifdef CONFIG_SCHED_SMT
1329 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1330#endif
1331 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1332 { NULL, },
1333};
1334
96d91431
OH
1335/*
1336 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1337 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1338 * since the migrated task remains cache hot. We want to take advantage of this
1339 * at the scheduler level so an extra topology level is required.
1340 */
1341static int powerpc_shared_cache_flags(void)
1342{
1343 return SD_SHARE_PKG_RESOURCES;
1344}
1345
1346/*
1347 * We can't just pass cpu_l2_cache_mask() directly because
1348 * returns a non-const pointer and the compiler barfs on that.
1349 */
1350static const struct cpumask *shared_cache_mask(int cpu)
1351{
1352 return cpu_l2_cache_mask(cpu);
1353}
1354
8e8a31d7
GS
1355#ifdef CONFIG_SCHED_SMT
1356static const struct cpumask *smallcore_smt_mask(int cpu)
1357{
1358 return cpu_smallcore_mask(cpu);
1359}
1360#endif
1361
96d91431
OH
1362static struct sched_domain_topology_level power9_topology[] = {
1363#ifdef CONFIG_SCHED_SMT
1364 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1365#endif
1366 { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1367 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1368 { NULL, },
1369};
1370
6d11b87d
TG
1371void __init smp_cpus_done(unsigned int max_cpus)
1372{
1373 /*
7b7622bb 1374 * We are running pinned to the boot CPU, see rest_init().
1da177e4 1375 */
757cbd46 1376 if (smp_ops && smp_ops->setup_cpu)
7b7622bb 1377 smp_ops->setup_cpu(boot_cpuid);
4b703a23 1378
d7294445
BH
1379 if (smp_ops && smp_ops->bringup_done)
1380 smp_ops->bringup_done();
1381
4b703a23 1382 dump_numa_cpu_topology();
d7294445 1383
8e8a31d7
GS
1384#ifdef CONFIG_SCHED_SMT
1385 if (has_big_cores) {
82a7cebd 1386 pr_info("Big cores detected but using small core scheduling\n");
8e8a31d7
GS
1387 power9_topology[0].mask = smallcore_smt_mask;
1388 powerpc_topology[0].mask = smallcore_smt_mask;
1389 }
1390#endif
96d91431
OH
1391 /*
1392 * If any CPU detects that it's sharing a cache with another CPU then
1393 * use the deeper topology that is aware of this sharing.
1394 */
1395 if (shared_caches) {
1396 pr_info("Using shared cache scheduler topology\n");
1397 set_sched_topology(power9_topology);
1398 } else {
1399 pr_info("Using standard scheduler topology\n");
1400 set_sched_topology(powerpc_topology);
1401 }
e1f0ece1
MN
1402}
1403
1da177e4
LT
1404#ifdef CONFIG_HOTPLUG_CPU
1405int __cpu_disable(void)
1406{
e2075f79 1407 int cpu = smp_processor_id();
e2075f79 1408 int err;
1da177e4 1409
e2075f79
NL
1410 if (!smp_ops->cpu_disable)
1411 return -ENOSYS;
1412
424ef016
NR
1413 this_cpu_disable_ftrace();
1414
e2075f79
NL
1415 err = smp_ops->cpu_disable();
1416 if (err)
1417 return err;
1418
1419 /* Update sibling maps */
df52f671 1420 remove_cpu_from_masks(cpu);
e2075f79
NL
1421
1422 return 0;
1da177e4
LT
1423}
1424
1425void __cpu_die(unsigned int cpu)
1426{
1427 if (smp_ops->cpu_die)
1428 smp_ops->cpu_die(cpu);
1429}
d0174c72 1430
abb17f9c
MM
1431void cpu_die(void)
1432{
424ef016
NR
1433 /*
1434 * Disable on the down path. This will be re-enabled by
1435 * start_secondary() via start_secondary_resume() below
1436 */
1437 this_cpu_disable_ftrace();
1438
abb17f9c
MM
1439 if (ppc_md.cpu_die)
1440 ppc_md.cpu_die();
fa3f82c8
BH
1441
1442 /* If we return, we re-enter start_secondary */
1443 start_secondary_resume();
abb17f9c 1444}
fa3f82c8 1445
1da177e4 1446#endif