powerpc/topology: Update topology_core_cpumask
[linux-block.git] / arch / powerpc / kernel / smp.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * SMP support for ppc.
4 *
5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
6 * deal of code from the sparc and intel versions.
7 *
8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 *
10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
1da177e4
LT
12 */
13
14#undef DEBUG
15
1da177e4 16#include <linux/kernel.h>
4b16f8e2 17#include <linux/export.h>
68e21be2 18#include <linux/sched/mm.h>
678c668a 19#include <linux/sched/task_stack.h>
105ab3d8 20#include <linux/sched/topology.h>
1da177e4
LT
21#include <linux/smp.h>
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/spinlock.h>
26#include <linux/cache.h>
27#include <linux/err.h>
8a25a2fd 28#include <linux/device.h>
1da177e4
LT
29#include <linux/cpu.h>
30#include <linux/notifier.h>
4b703a23 31#include <linux/topology.h>
665e87ff 32#include <linux/profile.h>
4e287e65 33#include <linux/processor.h>
7241d26e 34#include <linux/random.h>
b6aeddea 35#include <linux/stackprotector.h>
65fddcfc 36#include <linux/pgtable.h>
1da177e4
LT
37
38#include <asm/ptrace.h>
60063497 39#include <linux/atomic.h>
1da177e4 40#include <asm/irq.h>
1b67bee1 41#include <asm/hw_irq.h>
441c19c8 42#include <asm/kvm_ppc.h>
b866cc21 43#include <asm/dbell.h>
1da177e4 44#include <asm/page.h>
1da177e4
LT
45#include <asm/prom.h>
46#include <asm/smp.h>
1da177e4
LT
47#include <asm/time.h>
48#include <asm/machdep.h>
e2075f79 49#include <asm/cputhreads.h>
1da177e4 50#include <asm/cputable.h>
bbeb3f4c 51#include <asm/mpic.h>
a7f290da 52#include <asm/vdso_datapage.h>
5ad57078
PM
53#ifdef CONFIG_PPC64
54#include <asm/paca.h>
55#endif
18ad51dd 56#include <asm/vdso.h>
ae3a197e 57#include <asm/debug.h>
1217d34b 58#include <asm/kexec.h>
42f5b4ca 59#include <asm/asm-prototypes.h>
b92a226e 60#include <asm/cpu_has_feature.h>
d1039786 61#include <asm/ftrace.h>
e0d8e991 62#include <asm/kup.h>
5ad57078 63
1da177e4 64#ifdef DEBUG
f9e4ec57 65#include <asm/udbg.h>
1da177e4
LT
66#define DBG(fmt...) udbg_printf(fmt)
67#else
68#define DBG(fmt...)
69#endif
70
c56e5853 71#ifdef CONFIG_HOTPLUG_CPU
fb82b839
BH
72/* State of each CPU during hotplug phases */
73static DEFINE_PER_CPU(int, cpu_state) = { 0 };
c56e5853
BH
74#endif
75
7c19c2e5 76struct task_struct *secondary_current;
425752c6 77bool has_big_cores;
f9f130ff 78bool coregroup_enabled;
f9e4ec57 79
cc1ba8ea 80DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
425752c6 81DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
2a636a56 82DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
cc1ba8ea 83DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
72730bfc 84DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
1da177e4 85
d5a7430d 86EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
2a636a56 87EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
440a0857 88EXPORT_PER_CPU_SYMBOL(cpu_core_map);
425752c6
GS
89EXPORT_SYMBOL_GPL(has_big_cores);
90
72730bfc
SD
91enum {
92#ifdef CONFIG_SCHED_SMT
93 smt_idx,
94#endif
95 cache_idx,
96 mc_idx,
97 die_idx,
98};
99
425752c6
GS
100#define MAX_THREAD_LIST_SIZE 8
101#define THREAD_GROUP_SHARE_L1 1
102struct thread_groups {
103 unsigned int property;
104 unsigned int nr_groups;
105 unsigned int threads_per_group;
106 unsigned int thread_list[MAX_THREAD_LIST_SIZE];
107};
108
109/*
110 * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
111 * the set its siblings that share the L1-cache.
112 */
113DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
1da177e4 114
5ad57078 115/* SMP operations for this machine */
1da177e4
LT
116struct smp_ops_t *smp_ops;
117
7ccbe504
BH
118/* Can't be static due to PowerMac hackery */
119volatile unsigned int cpu_callin_map[NR_CPUS];
1da177e4 120
1da177e4
LT
121int smt_enabled_at_boot = 1;
122
3cd85250
AF
123/*
124 * Returns 1 if the specified cpu should be brought up during boot.
125 * Used to inhibit booting threads if they've been disabled or
126 * limited on the command line
127 */
128int smp_generic_cpu_bootable(unsigned int nr)
129{
130 /* Special case - we inhibit secondary thread startup
131 * during boot if the user requests it.
132 */
a8fcfc19 133 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
3cd85250
AF
134 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
135 return 0;
136 if (smt_enabled_at_boot
137 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
138 return 0;
139 }
140
141 return 1;
142}
143
144
5ad57078 145#ifdef CONFIG_PPC64
cad5cef6 146int smp_generic_kick_cpu(int nr)
1da177e4 147{
c642af9c 148 if (nr < 0 || nr >= nr_cpu_ids)
f8d0d5dc 149 return -EINVAL;
1da177e4
LT
150
151 /*
152 * The processor is currently spinning, waiting for the
153 * cpu_start field to become non-zero After we set cpu_start,
154 * the processor will continue on to secondary_start
155 */
d2e60075
NP
156 if (!paca_ptrs[nr]->cpu_start) {
157 paca_ptrs[nr]->cpu_start = 1;
fb82b839
BH
158 smp_mb();
159 return 0;
160 }
161
162#ifdef CONFIG_HOTPLUG_CPU
163 /*
164 * Ok it's not there, so it might be soft-unplugged, let's
165 * try to bring it back
166 */
ae5cab47 167 generic_set_cpu_up(nr);
fb82b839
BH
168 smp_wmb();
169 smp_send_reschedule(nr);
170#endif /* CONFIG_HOTPLUG_CPU */
de300974
ME
171
172 return 0;
1da177e4 173}
fb82b839 174#endif /* CONFIG_PPC64 */
1da177e4 175
25ddd738
MM
176static irqreturn_t call_function_action(int irq, void *data)
177{
178 generic_smp_call_function_interrupt();
179 return IRQ_HANDLED;
180}
181
182static irqreturn_t reschedule_action(int irq, void *data)
183{
184748cc 184 scheduler_ipi();
25ddd738
MM
185 return IRQ_HANDLED;
186}
187
bc907113 188#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
1b67bee1 189static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
25ddd738 190{
3f984620 191 timer_broadcast_interrupt();
25ddd738
MM
192 return IRQ_HANDLED;
193}
bc907113 194#endif
25ddd738 195
ddd703ca
NP
196#ifdef CONFIG_NMI_IPI
197static irqreturn_t nmi_ipi_action(int irq, void *data)
25ddd738 198{
ddd703ca 199 smp_handle_nmi_ipi(get_irq_regs());
25ddd738
MM
200 return IRQ_HANDLED;
201}
ddd703ca 202#endif
25ddd738
MM
203
204static irq_handler_t smp_ipi_action[] = {
205 [PPC_MSG_CALL_FUNCTION] = call_function_action,
206 [PPC_MSG_RESCHEDULE] = reschedule_action,
bc907113 207#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
1b67bee1 208 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
bc907113 209#endif
ddd703ca
NP
210#ifdef CONFIG_NMI_IPI
211 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
212#endif
25ddd738
MM
213};
214
ddd703ca
NP
215/*
216 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
217 * than going through the call function infrastructure, and strongly
218 * serialized, so it is more appropriate for debugging.
219 */
25ddd738
MM
220const char *smp_ipi_name[] = {
221 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
222 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
bc907113 223#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
1b67bee1 224 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
bc907113 225#endif
21bfd6a8 226#ifdef CONFIG_NMI_IPI
ddd703ca 227 [PPC_MSG_NMI_IPI] = "nmi ipi",
21bfd6a8 228#endif
25ddd738
MM
229};
230
231/* optional function to request ipi, for controllers with >= 4 ipis */
232int smp_request_message_ipi(int virq, int msg)
233{
234 int err;
235
ddd703ca 236 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
25ddd738 237 return -EINVAL;
ddd703ca
NP
238#ifndef CONFIG_NMI_IPI
239 if (msg == PPC_MSG_NMI_IPI)
25ddd738 240 return 1;
25ddd738 241#endif
ddd703ca 242
3b5e16d7 243 err = request_irq(virq, smp_ipi_action[msg],
e6651de9 244 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
b0d436c7 245 smp_ipi_name[msg], NULL);
25ddd738
MM
246 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
247 virq, smp_ipi_name[msg], err);
248
249 return err;
250}
251
1ece355b 252#ifdef CONFIG_PPC_SMP_MUXED_IPI
23d72bfd 253struct cpu_messages {
bd7f561f 254 long messages; /* current messages */
23d72bfd
MM
255};
256static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
257
31639c77 258void smp_muxed_ipi_set_message(int cpu, int msg)
23d72bfd
MM
259{
260 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
71454272 261 char *message = (char *)&info->messages;
23d72bfd 262
9fb1b36c
PM
263 /*
264 * Order previous accesses before accesses in the IPI handler.
265 */
266 smp_mb();
71454272 267 message[msg] = 1;
31639c77
SW
268}
269
270void smp_muxed_ipi_message_pass(int cpu, int msg)
271{
31639c77 272 smp_muxed_ipi_set_message(cpu, msg);
b866cc21 273
9fb1b36c
PM
274 /*
275 * cause_ipi functions are required to include a full barrier
276 * before doing whatever causes the IPI.
277 */
b866cc21 278 smp_ops->cause_ipi(cpu);
23d72bfd
MM
279}
280
0654de1c 281#ifdef __BIG_ENDIAN__
bd7f561f 282#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
0654de1c 283#else
bd7f561f 284#define IPI_MESSAGE(A) (1uL << (8 * (A)))
0654de1c
AB
285#endif
286
23d72bfd
MM
287irqreturn_t smp_ipi_demux(void)
288{
23d72bfd 289 mb(); /* order any irq clear */
71454272 290
b87ac021
NP
291 return smp_ipi_demux_relaxed();
292}
293
294/* sync-free variant. Callers should ensure synchronization */
295irqreturn_t smp_ipi_demux_relaxed(void)
23d72bfd 296{
b866cc21 297 struct cpu_messages *info;
bd7f561f 298 unsigned long all;
23d72bfd 299
b866cc21 300 info = this_cpu_ptr(&ipi_message);
71454272 301 do {
9fb1b36c 302 all = xchg(&info->messages, 0);
e17769eb
SW
303#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
304 /*
305 * Must check for PPC_MSG_RM_HOST_ACTION messages
306 * before PPC_MSG_CALL_FUNCTION messages because when
307 * a VM is destroyed, we call kick_all_cpus_sync()
308 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
309 * messages have completed before we free any VCPUs.
310 */
311 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
312 kvmppc_xics_ipi_action();
313#endif
0654de1c 314 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
23d72bfd 315 generic_smp_call_function_interrupt();
0654de1c 316 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
880102e7 317 scheduler_ipi();
bc907113 318#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
1b67bee1 319 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
3f984620 320 timer_broadcast_interrupt();
bc907113 321#endif
ddd703ca
NP
322#ifdef CONFIG_NMI_IPI
323 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
324 nmi_ipi_action(0, NULL);
325#endif
71454272
MM
326 } while (info->messages);
327
23d72bfd
MM
328 return IRQ_HANDLED;
329}
1ece355b 330#endif /* CONFIG_PPC_SMP_MUXED_IPI */
23d72bfd 331
9ca980dc
PM
332static inline void do_message_pass(int cpu, int msg)
333{
334 if (smp_ops->message_pass)
335 smp_ops->message_pass(cpu, msg);
336#ifdef CONFIG_PPC_SMP_MUXED_IPI
337 else
338 smp_muxed_ipi_message_pass(cpu, msg);
339#endif
340}
341
1da177e4
LT
342void smp_send_reschedule(int cpu)
343{
8cffc6ac 344 if (likely(smp_ops))
9ca980dc 345 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
1da177e4 346}
de56a948 347EXPORT_SYMBOL_GPL(smp_send_reschedule);
1da177e4 348
b7d7a240
JA
349void arch_send_call_function_single_ipi(int cpu)
350{
402d9a1e 351 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
b7d7a240
JA
352}
353
f063ea02 354void arch_send_call_function_ipi_mask(const struct cpumask *mask)
b7d7a240
JA
355{
356 unsigned int cpu;
357
f063ea02 358 for_each_cpu(cpu, mask)
9ca980dc 359 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
b7d7a240
JA
360}
361
ddd703ca
NP
362#ifdef CONFIG_NMI_IPI
363
364/*
365 * "NMI IPI" system.
366 *
367 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
368 * a running system. They can be used for crash, debug, halt/reboot, etc.
369 *
ddd703ca 370 * The IPI call waits with interrupts disabled until all targets enter the
88b9a3d1
NP
371 * NMI handler, then returns. Subsequent IPIs can be issued before targets
372 * have returned from their handlers, so there is no guarantee about
373 * concurrency or re-entrancy.
ddd703ca 374 *
88b9a3d1 375 * A new NMI can be issued before all targets exit the handler.
ddd703ca
NP
376 *
377 * The IPI call may time out without all targets entering the NMI handler.
378 * In that case, there is some logic to recover (and ignore subsequent
379 * NMI interrupts that may eventually be raised), but the platform interrupt
380 * handler may not be able to distinguish this from other exception causes,
381 * which may cause a crash.
382 */
383
384static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
385static struct cpumask nmi_ipi_pending_mask;
88b9a3d1 386static bool nmi_ipi_busy = false;
ddd703ca
NP
387static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
388
389static void nmi_ipi_lock_start(unsigned long *flags)
390{
391 raw_local_irq_save(*flags);
392 hard_irq_disable();
393 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
394 raw_local_irq_restore(*flags);
0459ddfd 395 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
ddd703ca
NP
396 raw_local_irq_save(*flags);
397 hard_irq_disable();
398 }
399}
400
401static void nmi_ipi_lock(void)
402{
403 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
0459ddfd 404 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
ddd703ca
NP
405}
406
407static void nmi_ipi_unlock(void)
408{
409 smp_mb();
410 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
411 atomic_set(&__nmi_ipi_lock, 0);
412}
413
414static void nmi_ipi_unlock_end(unsigned long *flags)
415{
416 nmi_ipi_unlock();
417 raw_local_irq_restore(*flags);
418}
419
420/*
421 * Platform NMI handler calls this to ack
422 */
423int smp_handle_nmi_ipi(struct pt_regs *regs)
424{
88b9a3d1 425 void (*fn)(struct pt_regs *) = NULL;
ddd703ca
NP
426 unsigned long flags;
427 int me = raw_smp_processor_id();
428 int ret = 0;
429
430 /*
431 * Unexpected NMIs are possible here because the interrupt may not
432 * be able to distinguish NMI IPIs from other types of NMIs, or
433 * because the caller may have timed out.
434 */
435 nmi_ipi_lock_start(&flags);
88b9a3d1
NP
436 if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
437 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
438 fn = READ_ONCE(nmi_ipi_function);
439 WARN_ON_ONCE(!fn);
440 ret = 1;
441 }
ddd703ca
NP
442 nmi_ipi_unlock_end(&flags);
443
88b9a3d1
NP
444 if (fn)
445 fn(regs);
446
ddd703ca
NP
447 return ret;
448}
449
6ba55716 450static void do_smp_send_nmi_ipi(int cpu, bool safe)
ddd703ca 451{
6ba55716 452 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
c64af645
NP
453 return;
454
ddd703ca
NP
455 if (cpu >= 0) {
456 do_message_pass(cpu, PPC_MSG_NMI_IPI);
457 } else {
458 int c;
459
460 for_each_online_cpu(c) {
461 if (c == raw_smp_processor_id())
462 continue;
463 do_message_pass(c, PPC_MSG_NMI_IPI);
464 }
465 }
466}
467
468/*
469 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
470 * - fn is the target callback function.
471 * - delay_us > 0 is the delay before giving up waiting for targets to
88b9a3d1 472 * begin executing the handler, == 0 specifies indefinite delay.
ddd703ca 473 */
6fe243fe
NP
474static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
475 u64 delay_us, bool safe)
ddd703ca
NP
476{
477 unsigned long flags;
478 int me = raw_smp_processor_id();
479 int ret = 1;
480
481 BUG_ON(cpu == me);
482 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
483
484 if (unlikely(!smp_ops))
485 return 0;
486
ddd703ca 487 nmi_ipi_lock_start(&flags);
88b9a3d1 488 while (nmi_ipi_busy) {
ddd703ca 489 nmi_ipi_unlock_end(&flags);
88b9a3d1 490 spin_until_cond(!nmi_ipi_busy);
ddd703ca
NP
491 nmi_ipi_lock_start(&flags);
492 }
88b9a3d1 493 nmi_ipi_busy = true;
ddd703ca
NP
494 nmi_ipi_function = fn;
495
88b9a3d1
NP
496 WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
497
ddd703ca
NP
498 if (cpu < 0) {
499 /* ALL_OTHERS */
500 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
501 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
502 } else {
ddd703ca
NP
503 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
504 }
88b9a3d1 505
ddd703ca
NP
506 nmi_ipi_unlock();
507
88b9a3d1
NP
508 /* Interrupts remain hard disabled */
509
6ba55716 510 do_smp_send_nmi_ipi(cpu, safe);
ddd703ca 511
5b73151f 512 nmi_ipi_lock();
88b9a3d1 513 /* nmi_ipi_busy is set here, so unlock/lock is okay */
ddd703ca 514 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
5b73151f 515 nmi_ipi_unlock();
ddd703ca 516 udelay(1);
5b73151f
NP
517 nmi_ipi_lock();
518 if (delay_us) {
519 delay_us--;
520 if (!delay_us)
88b9a3d1 521 break;
5b73151f
NP
522 }
523 }
524
ddd703ca 525 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
5b73151f 526 /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
ddd703ca
NP
527 ret = 0;
528 cpumask_clear(&nmi_ipi_pending_mask);
529 }
5b73151f 530
88b9a3d1
NP
531 nmi_ipi_function = NULL;
532 nmi_ipi_busy = false;
533
ddd703ca
NP
534 nmi_ipi_unlock_end(&flags);
535
536 return ret;
537}
6ba55716
ME
538
539int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
540{
541 return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
542}
543
544int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
545{
546 return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
547}
ddd703ca
NP
548#endif /* CONFIG_NMI_IPI */
549
1b67bee1
SB
550#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
551void tick_broadcast(const struct cpumask *mask)
552{
553 unsigned int cpu;
554
555 for_each_cpu(cpu, mask)
556 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
557}
558#endif
559
ddd703ca
NP
560#ifdef CONFIG_DEBUGGER
561void debugger_ipi_callback(struct pt_regs *regs)
1da177e4 562{
ddd703ca
NP
563 debugger_ipi(regs);
564}
e0476371 565
ddd703ca
NP
566void smp_send_debugger_break(void)
567{
568 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
1da177e4
LT
569}
570#endif
571
da665885 572#ifdef CONFIG_KEXEC_CORE
cc532915
ME
573void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
574{
4145f358
BS
575 int cpu;
576
ddd703ca 577 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
4145f358
BS
578 if (kdump_in_progress() && crash_wake_offline) {
579 for_each_present_cpu(cpu) {
580 if (cpu_online(cpu))
581 continue;
582 /*
583 * crash_ipi_callback will wait for
584 * all cpus, including offline CPUs.
585 * We don't care about nmi_ipi_function.
586 * Offline cpus will jump straight into
587 * crash_ipi_callback, we can skip the
588 * entire NMI dance and waiting for
589 * cpus to clear pending mask, etc.
590 */
6ba55716 591 do_smp_send_nmi_ipi(cpu, false);
4145f358
BS
592 }
593 }
cc532915
ME
594}
595#endif
596
ac61c115
NP
597#ifdef CONFIG_NMI_IPI
598static void nmi_stop_this_cpu(struct pt_regs *regs)
599{
600 /*
6029755e 601 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
ac61c115 602 */
6029755e
NP
603 spin_begin();
604 while (1)
605 spin_cpu_relax();
ac61c115 606}
ac61c115 607
8fd7675c
SS
608void smp_send_stop(void)
609{
ac61c115 610 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
6029755e
NP
611}
612
613#else /* CONFIG_NMI_IPI */
614
615static void stop_this_cpu(void *dummy)
616{
6029755e
NP
617 hard_irq_disable();
618 spin_begin();
619 while (1)
620 spin_cpu_relax();
621}
622
623void smp_send_stop(void)
624{
625 static bool stopped = false;
626
627 /*
628 * Prevent waiting on csd lock from a previous smp_send_stop.
629 * This is racy, but in general callers try to do the right
630 * thing and only fire off one smp_send_stop (e.g., see
631 * kernel/panic.c)
632 */
633 if (stopped)
634 return;
635
636 stopped = true;
637
8691e5a8 638 smp_call_function(stop_this_cpu, NULL, 0);
1da177e4 639}
6029755e 640#endif /* CONFIG_NMI_IPI */
1da177e4 641
7c19c2e5 642struct task_struct *current_set[NR_CPUS];
1da177e4 643
cad5cef6 644static void smp_store_cpu_info(int id)
1da177e4 645{
6b7487fc 646 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
3160b097
BB
647#ifdef CONFIG_PPC_FSL_BOOK3E
648 per_cpu(next_tlbcam_idx, id)
649 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
650#endif
1da177e4
LT
651}
652
df52f671
OH
653/*
654 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
655 * rather than just passing around the cpumask we pass around a function that
656 * returns the that cpumask for the given CPU.
657 */
658static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
659{
660 cpumask_set_cpu(i, get_cpumask(j));
661 cpumask_set_cpu(j, get_cpumask(i));
662}
663
664#ifdef CONFIG_HOTPLUG_CPU
665static void set_cpus_unrelated(int i, int j,
666 struct cpumask *(*get_cpumask)(int))
667{
668 cpumask_clear_cpu(i, get_cpumask(j));
669 cpumask_clear_cpu(j, get_cpumask(i));
670}
671#endif
672
425752c6
GS
673/*
674 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
675 * property for the CPU device node @dn and stores
676 * the parsed output in the thread_groups
677 * structure @tg if the ibm,thread-groups[0]
678 * matches @property.
679 *
680 * @dn: The device node of the CPU device.
681 * @tg: Pointer to a thread group structure into which the parsed
682 * output of "ibm,thread-groups" is stored.
683 * @property: The property of the thread-group that the caller is
684 * interested in.
685 *
686 * ibm,thread-groups[0..N-1] array defines which group of threads in
687 * the CPU-device node can be grouped together based on the property.
688 *
689 * ibm,thread-groups[0] tells us the property based on which the
690 * threads are being grouped together. If this value is 1, it implies
691 * that the threads in the same group share L1, translation cache.
692 *
693 * ibm,thread-groups[1] tells us how many such thread groups exist.
694 *
695 * ibm,thread-groups[2] tells us the number of threads in each such
696 * group.
697 *
698 * ibm,thread-groups[3..N-1] is the list of threads identified by
699 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
700 * the grouping.
701 *
702 * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it
703 * implies that there are 2 groups of 4 threads each, where each group
704 * of threads share L1, translation cache.
705 *
706 * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8}
707 * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10,
708 * 11, 12} structure
709 *
710 * Returns 0 on success, -EINVAL if the property does not exist,
711 * -ENODATA if property does not have a value, and -EOVERFLOW if the
712 * property data isn't large enough.
713 */
714static int parse_thread_groups(struct device_node *dn,
715 struct thread_groups *tg,
716 unsigned int property)
717{
718 int i;
719 u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
720 u32 *thread_list;
721 size_t total_threads;
722 int ret;
723
724 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
725 thread_group_array, 3);
726 if (ret)
727 return ret;
728
729 tg->property = thread_group_array[0];
730 tg->nr_groups = thread_group_array[1];
731 tg->threads_per_group = thread_group_array[2];
732 if (tg->property != property ||
733 tg->nr_groups < 1 ||
734 tg->threads_per_group < 1)
735 return -ENODATA;
736
737 total_threads = tg->nr_groups * tg->threads_per_group;
738
739 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
740 thread_group_array,
741 3 + total_threads);
742 if (ret)
743 return ret;
744
745 thread_list = &thread_group_array[3];
746
747 for (i = 0 ; i < total_threads; i++)
748 tg->thread_list[i] = thread_list[i];
749
750 return 0;
751}
752
753/*
754 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
755 * that @cpu belongs to.
756 *
757 * @cpu : The logical CPU whose thread group is being searched.
758 * @tg : The thread-group structure of the CPU node which @cpu belongs
759 * to.
760 *
761 * Returns the index to tg->thread_list that points to the the start
762 * of the thread_group that @cpu belongs to.
763 *
764 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
765 * tg->thread_list.
766 */
767static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
768{
769 int hw_cpu_id = get_hard_smp_processor_id(cpu);
770 int i, j;
771
772 for (i = 0; i < tg->nr_groups; i++) {
773 int group_start = i * tg->threads_per_group;
774
775 for (j = 0; j < tg->threads_per_group; j++) {
776 int idx = group_start + j;
777
778 if (tg->thread_list[idx] == hw_cpu_id)
779 return group_start;
780 }
781 }
782
783 return -1;
784}
785
786static int init_cpu_l1_cache_map(int cpu)
787
788{
789 struct device_node *dn = of_get_cpu_node(cpu, NULL);
790 struct thread_groups tg = {.property = 0,
791 .nr_groups = 0,
792 .threads_per_group = 0};
793 int first_thread = cpu_first_thread_sibling(cpu);
794 int i, cpu_group_start = -1, err = 0;
795
796 if (!dn)
797 return -ENODATA;
798
799 err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
800 if (err)
801 goto out;
802
425752c6
GS
803 cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
804
805 if (unlikely(cpu_group_start == -1)) {
806 WARN_ON_ONCE(1);
807 err = -ENODATA;
808 goto out;
809 }
810
6e086302
SD
811 zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
812 GFP_KERNEL, cpu_to_node(cpu));
813
425752c6
GS
814 for (i = first_thread; i < first_thread + threads_per_core; i++) {
815 int i_group_start = get_cpu_thread_group_start(i, &tg);
816
817 if (unlikely(i_group_start == -1)) {
818 WARN_ON_ONCE(1);
819 err = -ENODATA;
820 goto out;
821 }
822
823 if (i_group_start == cpu_group_start)
824 cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
825 }
826
827out:
828 of_node_put(dn);
829 return err;
830}
831
5e93f16a
SD
832static bool shared_caches;
833
834#ifdef CONFIG_SCHED_SMT
835/* cpumask of CPUs with asymmetric SMT dependency */
836static int powerpc_smt_flags(void)
837{
838 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
839
840 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
841 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
842 flags |= SD_ASYM_PACKING;
843 }
844 return flags;
845}
846#endif
847
848/*
849 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
850 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
851 * since the migrated task remains cache hot. We want to take advantage of this
852 * at the scheduler level so an extra topology level is required.
853 */
854static int powerpc_shared_cache_flags(void)
855{
856 return SD_SHARE_PKG_RESOURCES;
857}
858
859/*
860 * We can't just pass cpu_l2_cache_mask() directly because
861 * returns a non-const pointer and the compiler barfs on that.
862 */
863static const struct cpumask *shared_cache_mask(int cpu)
864{
caa8e29d 865 return per_cpu(cpu_l2_cache_map, cpu);
5e93f16a
SD
866}
867
868#ifdef CONFIG_SCHED_SMT
869static const struct cpumask *smallcore_smt_mask(int cpu)
870{
871 return cpu_smallcore_mask(cpu);
872}
873#endif
874
72730bfc
SD
875static struct cpumask *cpu_coregroup_mask(int cpu)
876{
877 return per_cpu(cpu_coregroup_map, cpu);
878}
879
880static bool has_coregroup_support(void)
881{
882 return coregroup_enabled;
883}
884
885static const struct cpumask *cpu_mc_mask(int cpu)
886{
887 return cpu_coregroup_mask(cpu);
888}
889
5e93f16a
SD
890static struct sched_domain_topology_level powerpc_topology[] = {
891#ifdef CONFIG_SCHED_SMT
892 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
893#endif
894 { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
72730bfc 895 { cpu_mc_mask, SD_INIT_NAME(MC) },
5e93f16a
SD
896 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
897 { NULL, },
898};
899
425752c6
GS
900static int init_big_cores(void)
901{
902 int cpu;
903
904 for_each_possible_cpu(cpu) {
905 int err = init_cpu_l1_cache_map(cpu);
906
907 if (err)
908 return err;
909
910 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
911 GFP_KERNEL,
912 cpu_to_node(cpu));
913 }
914
915 has_big_cores = true;
916 return 0;
917}
918
1da177e4
LT
919void __init smp_prepare_cpus(unsigned int max_cpus)
920{
921 unsigned int cpu;
922
923 DBG("smp_prepare_cpus\n");
924
925 /*
926 * setup_cpu may need to be called on the boot cpu. We havent
927 * spun any cpus up but lets be paranoid.
928 */
929 BUG_ON(boot_cpuid != smp_processor_id());
930
931 /* Fixup boot cpu */
932 smp_store_cpu_info(boot_cpuid);
933 cpu_callin_map[boot_cpuid] = 1;
934
cc1ba8ea
AB
935 for_each_possible_cpu(cpu) {
936 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
937 GFP_KERNEL, cpu_to_node(cpu));
2a636a56
OH
938 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
939 GFP_KERNEL, cpu_to_node(cpu));
cc1ba8ea
AB
940 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
941 GFP_KERNEL, cpu_to_node(cpu));
72730bfc
SD
942 if (has_coregroup_support())
943 zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
944 GFP_KERNEL, cpu_to_node(cpu));
945
d0fd24bb 946#ifdef CONFIG_NEED_MULTIPLE_NODES
2fabf084
NA
947 /*
948 * numa_node_id() works after this.
949 */
bc3c4327
LZ
950 if (cpu_present(cpu)) {
951 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
952 set_cpu_numa_mem(cpu,
953 local_memory_node(numa_cpu_lookup_table[cpu]));
954 }
d0fd24bb 955#endif
cc1ba8ea
AB
956 }
957
df52f671 958 /* Init the cpumasks so the boot CPU is related to itself */
cc1ba8ea 959 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
2a636a56 960 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
cc1ba8ea
AB
961 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
962
72730bfc
SD
963 if (has_coregroup_support())
964 cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
965
425752c6
GS
966 init_big_cores();
967 if (has_big_cores) {
968 cpumask_set_cpu(boot_cpuid,
969 cpu_smallcore_mask(boot_cpuid));
970 }
971
dfee0efe
CG
972 if (smp_ops && smp_ops->probe)
973 smp_ops->probe();
1da177e4
LT
974}
975
cad5cef6 976void smp_prepare_boot_cpu(void)
1da177e4
LT
977{
978 BUG_ON(smp_processor_id() != boot_cpuid);
5ad57078 979#ifdef CONFIG_PPC64
d2e60075 980 paca_ptrs[boot_cpuid]->__current = current;
5ad57078 981#endif
8c272261 982 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
7c19c2e5 983 current_set[boot_cpuid] = current;
1da177e4
LT
984}
985
986#ifdef CONFIG_HOTPLUG_CPU
1da177e4
LT
987
988int generic_cpu_disable(void)
989{
990 unsigned int cpu = smp_processor_id();
991
992 if (cpu == boot_cpuid)
993 return -EBUSY;
994
ea0f1cab 995 set_cpu_online(cpu, false);
799d6046 996#ifdef CONFIG_PPC64
a7f290da 997 vdso_data->processorCount--;
094fe2e7 998#endif
a978e139
BH
999 /* Update affinity of all IRQs previously aimed at this CPU */
1000 irq_migrate_all_off_this_cpu();
1001
687b8f24
ME
1002 /*
1003 * Depending on the details of the interrupt controller, it's possible
1004 * that one of the interrupts we just migrated away from this CPU is
1005 * actually already pending on this CPU. If we leave it in that state
1006 * the interrupt will never be EOI'ed, and will never fire again. So
1007 * temporarily enable interrupts here, to allow any pending interrupt to
1008 * be received (and EOI'ed), before we take this CPU offline.
1009 */
a978e139
BH
1010 local_irq_enable();
1011 mdelay(1);
1012 local_irq_disable();
1013
1da177e4
LT
1014 return 0;
1015}
1016
1da177e4
LT
1017void generic_cpu_die(unsigned int cpu)
1018{
1019 int i;
1020
1021 for (i = 0; i < 100; i++) {
0d8d4d42 1022 smp_rmb();
2f4f1f81 1023 if (is_cpu_dead(cpu))
1da177e4
LT
1024 return;
1025 msleep(100);
1026 }
1027 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
1028}
1029
105765f4
BH
1030void generic_set_cpu_dead(unsigned int cpu)
1031{
1032 per_cpu(cpu_state, cpu) = CPU_DEAD;
1033}
fb82b839 1034
ae5cab47
ZC
1035/*
1036 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
1037 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
1038 * which makes the delay in generic_cpu_die() not happen.
1039 */
1040void generic_set_cpu_up(unsigned int cpu)
1041{
1042 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1043}
1044
fb82b839
BH
1045int generic_check_cpu_restart(unsigned int cpu)
1046{
1047 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
1048}
512691d4 1049
2f4f1f81 1050int is_cpu_dead(unsigned int cpu)
1051{
1052 return per_cpu(cpu_state, cpu) == CPU_DEAD;
1053}
1054
441c19c8 1055static bool secondaries_inhibited(void)
512691d4 1056{
441c19c8 1057 return kvm_hv_mode_active();
512691d4
PM
1058}
1059
1060#else /* HOTPLUG_CPU */
1061
1062#define secondaries_inhibited() 0
1063
1da177e4
LT
1064#endif
1065
17e32eac 1066static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
c56e5853 1067{
c56e5853 1068#ifdef CONFIG_PPC64
d2e60075 1069 paca_ptrs[cpu]->__current = idle;
678c668a
CL
1070 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
1071 THREAD_SIZE - STACK_FRAME_OVERHEAD;
c56e5853 1072#endif
ed1cd6de 1073 idle->cpu = cpu;
7c19c2e5 1074 secondary_current = current_set[cpu] = idle;
c56e5853
BH
1075}
1076
061d19f2 1077int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1da177e4 1078{
c56e5853 1079 int rc, c;
1da177e4 1080
512691d4
PM
1081 /*
1082 * Don't allow secondary threads to come online if inhibited
1083 */
1084 if (threads_per_core > 1 && secondaries_inhibited() &&
6f5e40a3 1085 cpu_thread_in_subcore(cpu))
512691d4
PM
1086 return -EBUSY;
1087
8cffc6ac
BH
1088 if (smp_ops == NULL ||
1089 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1da177e4
LT
1090 return -EINVAL;
1091
17e32eac 1092 cpu_idle_thread_init(cpu, tidle);
c560bbce 1093
14d4ae5c
BH
1094 /*
1095 * The platform might need to allocate resources prior to bringing
1096 * up the CPU
1097 */
1098 if (smp_ops->prepare_cpu) {
1099 rc = smp_ops->prepare_cpu(cpu);
1100 if (rc)
1101 return rc;
1102 }
1103
1da177e4
LT
1104 /* Make sure callin-map entry is 0 (can be leftover a CPU
1105 * hotplug
1106 */
1107 cpu_callin_map[cpu] = 0;
1108
1109 /* The information for processor bringup must
1110 * be written out to main store before we release
1111 * the processor.
1112 */
0d8d4d42 1113 smp_mb();
1da177e4
LT
1114
1115 /* wake up cpus */
1116 DBG("smp: kicking cpu %d\n", cpu);
de300974
ME
1117 rc = smp_ops->kick_cpu(cpu);
1118 if (rc) {
1119 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1120 return rc;
1121 }
1da177e4
LT
1122
1123 /*
1124 * wait to see if the cpu made a callin (is actually up).
1125 * use this value that I found through experimentation.
1126 * -- Cort
1127 */
1128 if (system_state < SYSTEM_RUNNING)
ee0339f2 1129 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1da177e4
LT
1130 udelay(100);
1131#ifdef CONFIG_HOTPLUG_CPU
1132 else
1133 /*
1134 * CPUs can take much longer to come up in the
1135 * hotplug case. Wait five seconds.
1136 */
67764263
GS
1137 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1138 msleep(1);
1da177e4
LT
1139#endif
1140
1141 if (!cpu_callin_map[cpu]) {
6685a477 1142 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1da177e4
LT
1143 return -ENOENT;
1144 }
1145
6685a477 1146 DBG("Processor %u found.\n", cpu);
1da177e4
LT
1147
1148 if (smp_ops->give_timebase)
1149 smp_ops->give_timebase();
1150
875ebe94 1151 /* Wait until cpu puts itself in the online & active maps */
4e287e65 1152 spin_until_cond(cpu_online(cpu));
1da177e4
LT
1153
1154 return 0;
1155}
1156
e9efed3b
NL
1157/* Return the value of the reg property corresponding to the given
1158 * logical cpu.
1159 */
1160int cpu_to_core_id(int cpu)
1161{
1162 struct device_node *np;
f8a1883a 1163 const __be32 *reg;
e9efed3b
NL
1164 int id = -1;
1165
1166 np = of_get_cpu_node(cpu, NULL);
1167 if (!np)
1168 goto out;
1169
1170 reg = of_get_property(np, "reg", NULL);
1171 if (!reg)
1172 goto out;
1173
f8a1883a 1174 id = be32_to_cpup(reg);
e9efed3b
NL
1175out:
1176 of_node_put(np);
1177 return id;
1178}
f8ab4810 1179EXPORT_SYMBOL_GPL(cpu_to_core_id);
e9efed3b 1180
99d86705
VS
1181/* Helper routines for cpu to core mapping */
1182int cpu_core_index_of_thread(int cpu)
1183{
1184 return cpu >> threads_shift;
1185}
1186EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1187
1188int cpu_first_thread_of_core(int core)
1189{
1190 return core << threads_shift;
1191}
1192EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1193
104699c0 1194/* Must be called when no change can occur to cpu_present_mask,
440a0857
NL
1195 * i.e. during cpu online or offline.
1196 */
1197static struct device_node *cpu_to_l2cache(int cpu)
1198{
1199 struct device_node *np;
b2ea25b9 1200 struct device_node *cache;
440a0857
NL
1201
1202 if (!cpu_present(cpu))
1203 return NULL;
1204
1205 np = of_get_cpu_node(cpu, NULL);
1206 if (np == NULL)
1207 return NULL;
1208
b2ea25b9
NL
1209 cache = of_find_next_cache_node(np);
1210
440a0857
NL
1211 of_node_put(np);
1212
b2ea25b9 1213 return cache;
440a0857 1214}
1da177e4 1215
df52f671 1216static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
a8a5356c 1217{
256f2d4b 1218 struct device_node *l2_cache, *np;
e3d8b67e 1219 int i;
256f2d4b 1220
a8a5356c 1221 l2_cache = cpu_to_l2cache(cpu);
f6606cfd
SD
1222 if (!l2_cache) {
1223 struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1224
1225 /*
1226 * If no l2cache for this CPU, assume all siblings to share
1227 * cache with this CPU.
1228 */
1229 if (has_big_cores)
1230 sibling_mask = cpu_smallcore_mask;
1231
1232 for_each_cpu(i, sibling_mask(cpu))
1233 set_cpus_related(cpu, i, cpu_l2_cache_mask);
1234
df52f671 1235 return false;
f6606cfd 1236 }
df52f671 1237
f6606cfd 1238 cpumask_set_cpu(cpu, mask_fn(cpu));
df52f671
OH
1239 for_each_cpu(i, cpu_online_mask) {
1240 /*
1241 * when updating the marks the current CPU has not been marked
1242 * online, but we need to update the cache masks
1243 */
256f2d4b 1244 np = cpu_to_l2cache(i);
a8a5356c
PM
1245 if (!np)
1246 continue;
df52f671
OH
1247
1248 if (np == l2_cache)
1249 set_cpus_related(cpu, i, mask_fn);
1250
a8a5356c
PM
1251 of_node_put(np);
1252 }
1253 of_node_put(l2_cache);
df52f671
OH
1254
1255 return true;
1256}
1257
1258#ifdef CONFIG_HOTPLUG_CPU
1259static void remove_cpu_from_masks(int cpu)
1260{
1261 int i;
1262
1263 /* NB: cpu_core_mask is a superset of the others */
1264 for_each_cpu(i, cpu_core_mask(cpu)) {
1265 set_cpus_unrelated(cpu, i, cpu_core_mask);
2a636a56 1266 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
df52f671 1267 set_cpus_unrelated(cpu, i, cpu_sibling_mask);
425752c6
GS
1268 if (has_big_cores)
1269 set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
72730bfc
SD
1270 if (has_coregroup_support())
1271 set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
df52f671
OH
1272 }
1273}
1274#endif
1275
425752c6
GS
1276static inline void add_cpu_to_smallcore_masks(int cpu)
1277{
1278 struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu);
1279 int i, first_thread = cpu_first_thread_sibling(cpu);
1280
1281 if (!has_big_cores)
1282 return;
1283
1284 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1285
1286 for (i = first_thread; i < first_thread + threads_per_core; i++) {
1287 if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map))
1288 set_cpus_related(i, cpu, cpu_smallcore_mask);
1289 }
1290}
1291
a05f0e5b
SD
1292int get_physical_package_id(int cpu)
1293{
1294 int pkg_id = cpu_to_chip_id(cpu);
1295
a05f0e5b
SD
1296 /*
1297 * If the platform is PowerNV or Guest on KVM, ibm,chip-id is
1298 * defined. Hence we would return the chip-id as the result of
1299 * get_physical_package_id.
1300 */
c72e8da0
ME
1301 if (pkg_id == -1 && firmware_has_feature(FW_FEATURE_LPAR) &&
1302 IS_ENABLED(CONFIG_PPC_SPLPAR)) {
a05f0e5b 1303 struct device_node *np = of_get_cpu_node(cpu, NULL);
4b4d181d
ME
1304 pkg_id = of_node_to_nid(np);
1305 of_node_put(np);
a05f0e5b 1306 }
a05f0e5b
SD
1307
1308 return pkg_id;
1309}
1310EXPORT_SYMBOL_GPL(get_physical_package_id);
1311
df52f671
OH
1312static void add_cpu_to_masks(int cpu)
1313{
1314 int first_thread = cpu_first_thread_sibling(cpu);
a05f0e5b 1315 int pkg_id = get_physical_package_id(cpu);
df52f671
OH
1316 int i;
1317
1318 /*
1319 * This CPU will not be in the online mask yet so we need to manually
1320 * add it to it's own thread sibling mask.
1321 */
1322 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
f6606cfd 1323 cpumask_set_cpu(cpu, cpu_core_mask(cpu));
df52f671
OH
1324
1325 for (i = first_thread; i < first_thread + threads_per_core; i++)
1326 if (cpu_online(i))
1327 set_cpus_related(i, cpu, cpu_sibling_mask);
1328
425752c6 1329 add_cpu_to_smallcore_masks(cpu);
2a636a56
OH
1330 update_mask_by_l2(cpu, cpu_l2_cache_mask);
1331
72730bfc
SD
1332 if (has_coregroup_support()) {
1333 int coregroup_id = cpu_to_coregroup_id(cpu);
1334
1335 cpumask_set_cpu(cpu, cpu_coregroup_mask(cpu));
1336 for_each_cpu_and(i, cpu_online_mask, cpu_cpu_mask(cpu)) {
1337 int fcpu = cpu_first_thread_sibling(i);
1338
1339 if (fcpu == first_thread)
1340 set_cpus_related(cpu, i, cpu_coregroup_mask);
1341 else if (coregroup_id == cpu_to_coregroup_id(i))
1342 set_cpus_related(cpu, i, cpu_coregroup_mask);
1343 }
1344 }
1345
f6606cfd
SD
1346 if (pkg_id == -1) {
1347 struct cpumask *(*mask)(int) = cpu_sibling_mask;
1348
1349 /*
1350 * Copy the sibling mask into core sibling mask and
1351 * mark any CPUs on the same chip as this CPU.
1352 */
1353 if (shared_caches)
1354 mask = cpu_l2_cache_mask;
1355
1356 for_each_cpu(i, mask(cpu))
1357 set_cpus_related(cpu, i, cpu_core_mask);
df52f671 1358
df52f671 1359 return;
f6606cfd 1360 }
df52f671
OH
1361
1362 for_each_cpu(i, cpu_online_mask)
a05f0e5b 1363 if (get_physical_package_id(i) == pkg_id)
df52f671 1364 set_cpus_related(cpu, i, cpu_core_mask);
a8a5356c
PM
1365}
1366
1da177e4 1367/* Activate a secondary processor. */
061d19f2 1368void start_secondary(void *unused)
1da177e4
LT
1369{
1370 unsigned int cpu = smp_processor_id();
1371
f1f10076 1372 mmgrab(&init_mm);
1da177e4
LT
1373 current->active_mm = &init_mm;
1374
1375 smp_store_cpu_info(cpu);
5ad57078 1376 set_dec(tb_ticks_per_jiffy);
e4d76e1c 1377 preempt_disable();
1be6f10f 1378 cpu_callin_map[cpu] = 1;
1da177e4 1379
757cbd46
KG
1380 if (smp_ops->setup_cpu)
1381 smp_ops->setup_cpu(cpu);
1da177e4
LT
1382 if (smp_ops->take_timebase)
1383 smp_ops->take_timebase();
1384
d831d0b8
TB
1385 secondary_cpu_time_init();
1386
aeeafbfa
BH
1387#ifdef CONFIG_PPC64
1388 if (system_state == SYSTEM_RUNNING)
1389 vdso_data->processorCount++;
18ad51dd
AB
1390
1391 vdso_getcpu_init();
aeeafbfa 1392#endif
df52f671
OH
1393 /* Update topology CPU masks */
1394 add_cpu_to_masks(cpu);
1da177e4 1395
96d91431
OH
1396 /*
1397 * Check for any shared caches. Note that this must be done on a
1398 * per-core basis because one core in the pair might be disabled.
1399 */
caa8e29d
SD
1400 if (!shared_caches) {
1401 struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1402 struct cpumask *mask = cpu_l2_cache_mask(cpu);
1403
1404 if (has_big_cores)
1405 sibling_mask = cpu_smallcore_mask;
1406
1407 if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
1408 shared_caches = true;
1409 }
96d91431 1410
bc3c4327
LZ
1411 set_numa_node(numa_cpu_lookup_table[cpu]);
1412 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1413
cce606fe
LZ
1414 smp_wmb();
1415 notify_cpu_starting(cpu);
1416 set_cpu_online(cpu, true);
1417
b6aeddea
ME
1418 boot_init_stack_canary();
1419
1da177e4
LT
1420 local_irq_enable();
1421
d1039786
NR
1422 /* We can enable ftrace for secondary cpus now */
1423 this_cpu_enable_ftrace();
1424
fc6d73d6 1425 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
fa3f82c8
BH
1426
1427 BUG();
1da177e4
LT
1428}
1429
1430int setup_profiling_timer(unsigned int multiplier)
1431{
1432 return 0;
1433}
1434
3c6032a8
SD
1435static void fixup_topology(void)
1436{
1437#ifdef CONFIG_SCHED_SMT
1438 if (has_big_cores) {
1439 pr_info("Big cores detected but using small core scheduling\n");
72730bfc 1440 powerpc_topology[smt_idx].mask = smallcore_smt_mask;
3c6032a8
SD
1441 }
1442#endif
72730bfc
SD
1443
1444 if (!has_coregroup_support())
1445 powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
3c6032a8
SD
1446}
1447
6d11b87d
TG
1448void __init smp_cpus_done(unsigned int max_cpus)
1449{
1450 /*
7b7622bb 1451 * We are running pinned to the boot CPU, see rest_init().
1da177e4 1452 */
757cbd46 1453 if (smp_ops && smp_ops->setup_cpu)
7b7622bb 1454 smp_ops->setup_cpu(boot_cpuid);
4b703a23 1455
d7294445
BH
1456 if (smp_ops && smp_ops->bringup_done)
1457 smp_ops->bringup_done();
1458
4b703a23 1459 dump_numa_cpu_topology();
d7294445 1460
3c6032a8 1461 fixup_topology();
2ef0ca54 1462 set_sched_topology(powerpc_topology);
e1f0ece1
MN
1463}
1464
1da177e4
LT
1465#ifdef CONFIG_HOTPLUG_CPU
1466int __cpu_disable(void)
1467{
e2075f79 1468 int cpu = smp_processor_id();
e2075f79 1469 int err;
1da177e4 1470
e2075f79
NL
1471 if (!smp_ops->cpu_disable)
1472 return -ENOSYS;
1473
424ef016
NR
1474 this_cpu_disable_ftrace();
1475
e2075f79
NL
1476 err = smp_ops->cpu_disable();
1477 if (err)
1478 return err;
1479
1480 /* Update sibling maps */
df52f671 1481 remove_cpu_from_masks(cpu);
e2075f79
NL
1482
1483 return 0;
1da177e4
LT
1484}
1485
1486void __cpu_die(unsigned int cpu)
1487{
1488 if (smp_ops->cpu_die)
1489 smp_ops->cpu_die(cpu);
1490}
d0174c72 1491
1ea21ba2
ME
1492void arch_cpu_idle_dead(void)
1493{
1494 sched_preempt_enable_no_resched();
1ea21ba2 1495
424ef016
NR
1496 /*
1497 * Disable on the down path. This will be re-enabled by
1498 * start_secondary() via start_secondary_resume() below
1499 */
1500 this_cpu_disable_ftrace();
1501
39f87561
ME
1502 if (smp_ops->cpu_offline_self)
1503 smp_ops->cpu_offline_self();
fa3f82c8
BH
1504
1505 /* If we return, we re-enter start_secondary */
1506 start_secondary_resume();
abb17f9c 1507}
fa3f82c8 1508
1da177e4 1509#endif