Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * SMP support for ppc. | |
4 | * | |
5 | * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great | |
6 | * deal of code from the sparc and intel versions. | |
7 | * | |
8 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | |
9 | * | |
10 | * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and | |
11 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com | |
1da177e4 LT |
12 | */ |
13 | ||
14 | #undef DEBUG | |
15 | ||
1da177e4 | 16 | #include <linux/kernel.h> |
4b16f8e2 | 17 | #include <linux/export.h> |
68e21be2 | 18 | #include <linux/sched/mm.h> |
678c668a | 19 | #include <linux/sched/task_stack.h> |
105ab3d8 | 20 | #include <linux/sched/topology.h> |
1da177e4 LT |
21 | #include <linux/smp.h> |
22 | #include <linux/interrupt.h> | |
23 | #include <linux/delay.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/spinlock.h> | |
26 | #include <linux/cache.h> | |
27 | #include <linux/err.h> | |
8a25a2fd | 28 | #include <linux/device.h> |
1da177e4 LT |
29 | #include <linux/cpu.h> |
30 | #include <linux/notifier.h> | |
4b703a23 | 31 | #include <linux/topology.h> |
665e87ff | 32 | #include <linux/profile.h> |
4e287e65 | 33 | #include <linux/processor.h> |
7241d26e | 34 | #include <linux/random.h> |
b6aeddea | 35 | #include <linux/stackprotector.h> |
65fddcfc | 36 | #include <linux/pgtable.h> |
1da177e4 LT |
37 | |
38 | #include <asm/ptrace.h> | |
60063497 | 39 | #include <linux/atomic.h> |
1da177e4 | 40 | #include <asm/irq.h> |
1b67bee1 | 41 | #include <asm/hw_irq.h> |
441c19c8 | 42 | #include <asm/kvm_ppc.h> |
b866cc21 | 43 | #include <asm/dbell.h> |
1da177e4 | 44 | #include <asm/page.h> |
1da177e4 LT |
45 | #include <asm/prom.h> |
46 | #include <asm/smp.h> | |
1da177e4 LT |
47 | #include <asm/time.h> |
48 | #include <asm/machdep.h> | |
e2075f79 | 49 | #include <asm/cputhreads.h> |
1da177e4 | 50 | #include <asm/cputable.h> |
bbeb3f4c | 51 | #include <asm/mpic.h> |
a7f290da | 52 | #include <asm/vdso_datapage.h> |
5ad57078 PM |
53 | #ifdef CONFIG_PPC64 |
54 | #include <asm/paca.h> | |
55 | #endif | |
18ad51dd | 56 | #include <asm/vdso.h> |
ae3a197e | 57 | #include <asm/debug.h> |
1217d34b | 58 | #include <asm/kexec.h> |
42f5b4ca | 59 | #include <asm/asm-prototypes.h> |
b92a226e | 60 | #include <asm/cpu_has_feature.h> |
d1039786 | 61 | #include <asm/ftrace.h> |
e0d8e991 | 62 | #include <asm/kup.h> |
5ad57078 | 63 | |
1da177e4 | 64 | #ifdef DEBUG |
f9e4ec57 | 65 | #include <asm/udbg.h> |
1da177e4 LT |
66 | #define DBG(fmt...) udbg_printf(fmt) |
67 | #else | |
68 | #define DBG(fmt...) | |
69 | #endif | |
70 | ||
c56e5853 | 71 | #ifdef CONFIG_HOTPLUG_CPU |
fb82b839 BH |
72 | /* State of each CPU during hotplug phases */ |
73 | static DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |
c56e5853 BH |
74 | #endif |
75 | ||
7c19c2e5 | 76 | struct task_struct *secondary_current; |
425752c6 | 77 | bool has_big_cores; |
f9f130ff | 78 | bool coregroup_enabled; |
f9e4ec57 | 79 | |
cc1ba8ea | 80 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
425752c6 | 81 | DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map); |
2a636a56 | 82 | DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map); |
cc1ba8ea | 83 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); |
72730bfc | 84 | DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map); |
1da177e4 | 85 | |
d5a7430d | 86 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
2a636a56 | 87 | EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map); |
440a0857 | 88 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
425752c6 GS |
89 | EXPORT_SYMBOL_GPL(has_big_cores); |
90 | ||
72730bfc SD |
91 | enum { |
92 | #ifdef CONFIG_SCHED_SMT | |
93 | smt_idx, | |
94 | #endif | |
95 | cache_idx, | |
96 | mc_idx, | |
97 | die_idx, | |
98 | }; | |
99 | ||
425752c6 GS |
100 | #define MAX_THREAD_LIST_SIZE 8 |
101 | #define THREAD_GROUP_SHARE_L1 1 | |
102 | struct thread_groups { | |
103 | unsigned int property; | |
104 | unsigned int nr_groups; | |
105 | unsigned int threads_per_group; | |
106 | unsigned int thread_list[MAX_THREAD_LIST_SIZE]; | |
107 | }; | |
108 | ||
109 | /* | |
110 | * On big-cores system, cpu_l1_cache_map for each CPU corresponds to | |
111 | * the set its siblings that share the L1-cache. | |
112 | */ | |
113 | DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map); | |
1da177e4 | 114 | |
5ad57078 | 115 | /* SMP operations for this machine */ |
1da177e4 LT |
116 | struct smp_ops_t *smp_ops; |
117 | ||
7ccbe504 BH |
118 | /* Can't be static due to PowerMac hackery */ |
119 | volatile unsigned int cpu_callin_map[NR_CPUS]; | |
1da177e4 | 120 | |
1da177e4 LT |
121 | int smt_enabled_at_boot = 1; |
122 | ||
3cd85250 AF |
123 | /* |
124 | * Returns 1 if the specified cpu should be brought up during boot. | |
125 | * Used to inhibit booting threads if they've been disabled or | |
126 | * limited on the command line | |
127 | */ | |
128 | int smp_generic_cpu_bootable(unsigned int nr) | |
129 | { | |
130 | /* Special case - we inhibit secondary thread startup | |
131 | * during boot if the user requests it. | |
132 | */ | |
a8fcfc19 | 133 | if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) { |
3cd85250 AF |
134 | if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) |
135 | return 0; | |
136 | if (smt_enabled_at_boot | |
137 | && cpu_thread_in_core(nr) >= smt_enabled_at_boot) | |
138 | return 0; | |
139 | } | |
140 | ||
141 | return 1; | |
142 | } | |
143 | ||
144 | ||
5ad57078 | 145 | #ifdef CONFIG_PPC64 |
cad5cef6 | 146 | int smp_generic_kick_cpu(int nr) |
1da177e4 | 147 | { |
c642af9c | 148 | if (nr < 0 || nr >= nr_cpu_ids) |
f8d0d5dc | 149 | return -EINVAL; |
1da177e4 LT |
150 | |
151 | /* | |
152 | * The processor is currently spinning, waiting for the | |
153 | * cpu_start field to become non-zero After we set cpu_start, | |
154 | * the processor will continue on to secondary_start | |
155 | */ | |
d2e60075 NP |
156 | if (!paca_ptrs[nr]->cpu_start) { |
157 | paca_ptrs[nr]->cpu_start = 1; | |
fb82b839 BH |
158 | smp_mb(); |
159 | return 0; | |
160 | } | |
161 | ||
162 | #ifdef CONFIG_HOTPLUG_CPU | |
163 | /* | |
164 | * Ok it's not there, so it might be soft-unplugged, let's | |
165 | * try to bring it back | |
166 | */ | |
ae5cab47 | 167 | generic_set_cpu_up(nr); |
fb82b839 BH |
168 | smp_wmb(); |
169 | smp_send_reschedule(nr); | |
170 | #endif /* CONFIG_HOTPLUG_CPU */ | |
de300974 ME |
171 | |
172 | return 0; | |
1da177e4 | 173 | } |
fb82b839 | 174 | #endif /* CONFIG_PPC64 */ |
1da177e4 | 175 | |
25ddd738 MM |
176 | static irqreturn_t call_function_action(int irq, void *data) |
177 | { | |
178 | generic_smp_call_function_interrupt(); | |
179 | return IRQ_HANDLED; | |
180 | } | |
181 | ||
182 | static irqreturn_t reschedule_action(int irq, void *data) | |
183 | { | |
184748cc | 184 | scheduler_ipi(); |
25ddd738 MM |
185 | return IRQ_HANDLED; |
186 | } | |
187 | ||
bc907113 | 188 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
1b67bee1 | 189 | static irqreturn_t tick_broadcast_ipi_action(int irq, void *data) |
25ddd738 | 190 | { |
3f984620 | 191 | timer_broadcast_interrupt(); |
25ddd738 MM |
192 | return IRQ_HANDLED; |
193 | } | |
bc907113 | 194 | #endif |
25ddd738 | 195 | |
ddd703ca NP |
196 | #ifdef CONFIG_NMI_IPI |
197 | static irqreturn_t nmi_ipi_action(int irq, void *data) | |
25ddd738 | 198 | { |
ddd703ca | 199 | smp_handle_nmi_ipi(get_irq_regs()); |
25ddd738 MM |
200 | return IRQ_HANDLED; |
201 | } | |
ddd703ca | 202 | #endif |
25ddd738 MM |
203 | |
204 | static irq_handler_t smp_ipi_action[] = { | |
205 | [PPC_MSG_CALL_FUNCTION] = call_function_action, | |
206 | [PPC_MSG_RESCHEDULE] = reschedule_action, | |
bc907113 | 207 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
1b67bee1 | 208 | [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action, |
bc907113 | 209 | #endif |
ddd703ca NP |
210 | #ifdef CONFIG_NMI_IPI |
211 | [PPC_MSG_NMI_IPI] = nmi_ipi_action, | |
212 | #endif | |
25ddd738 MM |
213 | }; |
214 | ||
ddd703ca NP |
215 | /* |
216 | * The NMI IPI is a fallback and not truly non-maskable. It is simpler | |
217 | * than going through the call function infrastructure, and strongly | |
218 | * serialized, so it is more appropriate for debugging. | |
219 | */ | |
25ddd738 MM |
220 | const char *smp_ipi_name[] = { |
221 | [PPC_MSG_CALL_FUNCTION] = "ipi call function", | |
222 | [PPC_MSG_RESCHEDULE] = "ipi reschedule", | |
bc907113 | 223 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
1b67bee1 | 224 | [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast", |
bc907113 | 225 | #endif |
21bfd6a8 | 226 | #ifdef CONFIG_NMI_IPI |
ddd703ca | 227 | [PPC_MSG_NMI_IPI] = "nmi ipi", |
21bfd6a8 | 228 | #endif |
25ddd738 MM |
229 | }; |
230 | ||
231 | /* optional function to request ipi, for controllers with >= 4 ipis */ | |
232 | int smp_request_message_ipi(int virq, int msg) | |
233 | { | |
234 | int err; | |
235 | ||
ddd703ca | 236 | if (msg < 0 || msg > PPC_MSG_NMI_IPI) |
25ddd738 | 237 | return -EINVAL; |
ddd703ca NP |
238 | #ifndef CONFIG_NMI_IPI |
239 | if (msg == PPC_MSG_NMI_IPI) | |
25ddd738 | 240 | return 1; |
25ddd738 | 241 | #endif |
ddd703ca | 242 | |
3b5e16d7 | 243 | err = request_irq(virq, smp_ipi_action[msg], |
e6651de9 | 244 | IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND, |
b0d436c7 | 245 | smp_ipi_name[msg], NULL); |
25ddd738 MM |
246 | WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", |
247 | virq, smp_ipi_name[msg], err); | |
248 | ||
249 | return err; | |
250 | } | |
251 | ||
1ece355b | 252 | #ifdef CONFIG_PPC_SMP_MUXED_IPI |
23d72bfd | 253 | struct cpu_messages { |
bd7f561f | 254 | long messages; /* current messages */ |
23d72bfd MM |
255 | }; |
256 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); | |
257 | ||
31639c77 | 258 | void smp_muxed_ipi_set_message(int cpu, int msg) |
23d72bfd MM |
259 | { |
260 | struct cpu_messages *info = &per_cpu(ipi_message, cpu); | |
71454272 | 261 | char *message = (char *)&info->messages; |
23d72bfd | 262 | |
9fb1b36c PM |
263 | /* |
264 | * Order previous accesses before accesses in the IPI handler. | |
265 | */ | |
266 | smp_mb(); | |
71454272 | 267 | message[msg] = 1; |
31639c77 SW |
268 | } |
269 | ||
270 | void smp_muxed_ipi_message_pass(int cpu, int msg) | |
271 | { | |
31639c77 | 272 | smp_muxed_ipi_set_message(cpu, msg); |
b866cc21 | 273 | |
9fb1b36c PM |
274 | /* |
275 | * cause_ipi functions are required to include a full barrier | |
276 | * before doing whatever causes the IPI. | |
277 | */ | |
b866cc21 | 278 | smp_ops->cause_ipi(cpu); |
23d72bfd MM |
279 | } |
280 | ||
0654de1c | 281 | #ifdef __BIG_ENDIAN__ |
bd7f561f | 282 | #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A))) |
0654de1c | 283 | #else |
bd7f561f | 284 | #define IPI_MESSAGE(A) (1uL << (8 * (A))) |
0654de1c AB |
285 | #endif |
286 | ||
23d72bfd MM |
287 | irqreturn_t smp_ipi_demux(void) |
288 | { | |
23d72bfd | 289 | mb(); /* order any irq clear */ |
71454272 | 290 | |
b87ac021 NP |
291 | return smp_ipi_demux_relaxed(); |
292 | } | |
293 | ||
294 | /* sync-free variant. Callers should ensure synchronization */ | |
295 | irqreturn_t smp_ipi_demux_relaxed(void) | |
23d72bfd | 296 | { |
b866cc21 | 297 | struct cpu_messages *info; |
bd7f561f | 298 | unsigned long all; |
23d72bfd | 299 | |
b866cc21 | 300 | info = this_cpu_ptr(&ipi_message); |
71454272 | 301 | do { |
9fb1b36c | 302 | all = xchg(&info->messages, 0); |
e17769eb SW |
303 | #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) |
304 | /* | |
305 | * Must check for PPC_MSG_RM_HOST_ACTION messages | |
306 | * before PPC_MSG_CALL_FUNCTION messages because when | |
307 | * a VM is destroyed, we call kick_all_cpus_sync() | |
308 | * to ensure that any pending PPC_MSG_RM_HOST_ACTION | |
309 | * messages have completed before we free any VCPUs. | |
310 | */ | |
311 | if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION)) | |
312 | kvmppc_xics_ipi_action(); | |
313 | #endif | |
0654de1c | 314 | if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) |
23d72bfd | 315 | generic_smp_call_function_interrupt(); |
0654de1c | 316 | if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) |
880102e7 | 317 | scheduler_ipi(); |
bc907113 | 318 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
1b67bee1 | 319 | if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST)) |
3f984620 | 320 | timer_broadcast_interrupt(); |
bc907113 | 321 | #endif |
ddd703ca NP |
322 | #ifdef CONFIG_NMI_IPI |
323 | if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI)) | |
324 | nmi_ipi_action(0, NULL); | |
325 | #endif | |
71454272 MM |
326 | } while (info->messages); |
327 | ||
23d72bfd MM |
328 | return IRQ_HANDLED; |
329 | } | |
1ece355b | 330 | #endif /* CONFIG_PPC_SMP_MUXED_IPI */ |
23d72bfd | 331 | |
9ca980dc PM |
332 | static inline void do_message_pass(int cpu, int msg) |
333 | { | |
334 | if (smp_ops->message_pass) | |
335 | smp_ops->message_pass(cpu, msg); | |
336 | #ifdef CONFIG_PPC_SMP_MUXED_IPI | |
337 | else | |
338 | smp_muxed_ipi_message_pass(cpu, msg); | |
339 | #endif | |
340 | } | |
341 | ||
1da177e4 LT |
342 | void smp_send_reschedule(int cpu) |
343 | { | |
8cffc6ac | 344 | if (likely(smp_ops)) |
9ca980dc | 345 | do_message_pass(cpu, PPC_MSG_RESCHEDULE); |
1da177e4 | 346 | } |
de56a948 | 347 | EXPORT_SYMBOL_GPL(smp_send_reschedule); |
1da177e4 | 348 | |
b7d7a240 JA |
349 | void arch_send_call_function_single_ipi(int cpu) |
350 | { | |
402d9a1e | 351 | do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); |
b7d7a240 JA |
352 | } |
353 | ||
f063ea02 | 354 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
b7d7a240 JA |
355 | { |
356 | unsigned int cpu; | |
357 | ||
f063ea02 | 358 | for_each_cpu(cpu, mask) |
9ca980dc | 359 | do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); |
b7d7a240 JA |
360 | } |
361 | ||
ddd703ca NP |
362 | #ifdef CONFIG_NMI_IPI |
363 | ||
364 | /* | |
365 | * "NMI IPI" system. | |
366 | * | |
367 | * NMI IPIs may not be recoverable, so should not be used as ongoing part of | |
368 | * a running system. They can be used for crash, debug, halt/reboot, etc. | |
369 | * | |
ddd703ca | 370 | * The IPI call waits with interrupts disabled until all targets enter the |
88b9a3d1 NP |
371 | * NMI handler, then returns. Subsequent IPIs can be issued before targets |
372 | * have returned from their handlers, so there is no guarantee about | |
373 | * concurrency or re-entrancy. | |
ddd703ca | 374 | * |
88b9a3d1 | 375 | * A new NMI can be issued before all targets exit the handler. |
ddd703ca NP |
376 | * |
377 | * The IPI call may time out without all targets entering the NMI handler. | |
378 | * In that case, there is some logic to recover (and ignore subsequent | |
379 | * NMI interrupts that may eventually be raised), but the platform interrupt | |
380 | * handler may not be able to distinguish this from other exception causes, | |
381 | * which may cause a crash. | |
382 | */ | |
383 | ||
384 | static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0); | |
385 | static struct cpumask nmi_ipi_pending_mask; | |
88b9a3d1 | 386 | static bool nmi_ipi_busy = false; |
ddd703ca NP |
387 | static void (*nmi_ipi_function)(struct pt_regs *) = NULL; |
388 | ||
389 | static void nmi_ipi_lock_start(unsigned long *flags) | |
390 | { | |
391 | raw_local_irq_save(*flags); | |
392 | hard_irq_disable(); | |
393 | while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { | |
394 | raw_local_irq_restore(*flags); | |
0459ddfd | 395 | spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); |
ddd703ca NP |
396 | raw_local_irq_save(*flags); |
397 | hard_irq_disable(); | |
398 | } | |
399 | } | |
400 | ||
401 | static void nmi_ipi_lock(void) | |
402 | { | |
403 | while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) | |
0459ddfd | 404 | spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); |
ddd703ca NP |
405 | } |
406 | ||
407 | static void nmi_ipi_unlock(void) | |
408 | { | |
409 | smp_mb(); | |
410 | WARN_ON(atomic_read(&__nmi_ipi_lock) != 1); | |
411 | atomic_set(&__nmi_ipi_lock, 0); | |
412 | } | |
413 | ||
414 | static void nmi_ipi_unlock_end(unsigned long *flags) | |
415 | { | |
416 | nmi_ipi_unlock(); | |
417 | raw_local_irq_restore(*flags); | |
418 | } | |
419 | ||
420 | /* | |
421 | * Platform NMI handler calls this to ack | |
422 | */ | |
423 | int smp_handle_nmi_ipi(struct pt_regs *regs) | |
424 | { | |
88b9a3d1 | 425 | void (*fn)(struct pt_regs *) = NULL; |
ddd703ca NP |
426 | unsigned long flags; |
427 | int me = raw_smp_processor_id(); | |
428 | int ret = 0; | |
429 | ||
430 | /* | |
431 | * Unexpected NMIs are possible here because the interrupt may not | |
432 | * be able to distinguish NMI IPIs from other types of NMIs, or | |
433 | * because the caller may have timed out. | |
434 | */ | |
435 | nmi_ipi_lock_start(&flags); | |
88b9a3d1 NP |
436 | if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) { |
437 | cpumask_clear_cpu(me, &nmi_ipi_pending_mask); | |
438 | fn = READ_ONCE(nmi_ipi_function); | |
439 | WARN_ON_ONCE(!fn); | |
440 | ret = 1; | |
441 | } | |
ddd703ca NP |
442 | nmi_ipi_unlock_end(&flags); |
443 | ||
88b9a3d1 NP |
444 | if (fn) |
445 | fn(regs); | |
446 | ||
ddd703ca NP |
447 | return ret; |
448 | } | |
449 | ||
6ba55716 | 450 | static void do_smp_send_nmi_ipi(int cpu, bool safe) |
ddd703ca | 451 | { |
6ba55716 | 452 | if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) |
c64af645 NP |
453 | return; |
454 | ||
ddd703ca NP |
455 | if (cpu >= 0) { |
456 | do_message_pass(cpu, PPC_MSG_NMI_IPI); | |
457 | } else { | |
458 | int c; | |
459 | ||
460 | for_each_online_cpu(c) { | |
461 | if (c == raw_smp_processor_id()) | |
462 | continue; | |
463 | do_message_pass(c, PPC_MSG_NMI_IPI); | |
464 | } | |
465 | } | |
466 | } | |
467 | ||
468 | /* | |
469 | * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS. | |
470 | * - fn is the target callback function. | |
471 | * - delay_us > 0 is the delay before giving up waiting for targets to | |
88b9a3d1 | 472 | * begin executing the handler, == 0 specifies indefinite delay. |
ddd703ca | 473 | */ |
6fe243fe NP |
474 | static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), |
475 | u64 delay_us, bool safe) | |
ddd703ca NP |
476 | { |
477 | unsigned long flags; | |
478 | int me = raw_smp_processor_id(); | |
479 | int ret = 1; | |
480 | ||
481 | BUG_ON(cpu == me); | |
482 | BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS); | |
483 | ||
484 | if (unlikely(!smp_ops)) | |
485 | return 0; | |
486 | ||
ddd703ca | 487 | nmi_ipi_lock_start(&flags); |
88b9a3d1 | 488 | while (nmi_ipi_busy) { |
ddd703ca | 489 | nmi_ipi_unlock_end(&flags); |
88b9a3d1 | 490 | spin_until_cond(!nmi_ipi_busy); |
ddd703ca NP |
491 | nmi_ipi_lock_start(&flags); |
492 | } | |
88b9a3d1 | 493 | nmi_ipi_busy = true; |
ddd703ca NP |
494 | nmi_ipi_function = fn; |
495 | ||
88b9a3d1 NP |
496 | WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask)); |
497 | ||
ddd703ca NP |
498 | if (cpu < 0) { |
499 | /* ALL_OTHERS */ | |
500 | cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask); | |
501 | cpumask_clear_cpu(me, &nmi_ipi_pending_mask); | |
502 | } else { | |
ddd703ca NP |
503 | cpumask_set_cpu(cpu, &nmi_ipi_pending_mask); |
504 | } | |
88b9a3d1 | 505 | |
ddd703ca NP |
506 | nmi_ipi_unlock(); |
507 | ||
88b9a3d1 NP |
508 | /* Interrupts remain hard disabled */ |
509 | ||
6ba55716 | 510 | do_smp_send_nmi_ipi(cpu, safe); |
ddd703ca | 511 | |
5b73151f | 512 | nmi_ipi_lock(); |
88b9a3d1 | 513 | /* nmi_ipi_busy is set here, so unlock/lock is okay */ |
ddd703ca | 514 | while (!cpumask_empty(&nmi_ipi_pending_mask)) { |
5b73151f | 515 | nmi_ipi_unlock(); |
ddd703ca | 516 | udelay(1); |
5b73151f NP |
517 | nmi_ipi_lock(); |
518 | if (delay_us) { | |
519 | delay_us--; | |
520 | if (!delay_us) | |
88b9a3d1 | 521 | break; |
5b73151f NP |
522 | } |
523 | } | |
524 | ||
ddd703ca | 525 | if (!cpumask_empty(&nmi_ipi_pending_mask)) { |
5b73151f | 526 | /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */ |
ddd703ca NP |
527 | ret = 0; |
528 | cpumask_clear(&nmi_ipi_pending_mask); | |
529 | } | |
5b73151f | 530 | |
88b9a3d1 NP |
531 | nmi_ipi_function = NULL; |
532 | nmi_ipi_busy = false; | |
533 | ||
ddd703ca NP |
534 | nmi_ipi_unlock_end(&flags); |
535 | ||
536 | return ret; | |
537 | } | |
6ba55716 ME |
538 | |
539 | int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) | |
540 | { | |
541 | return __smp_send_nmi_ipi(cpu, fn, delay_us, false); | |
542 | } | |
543 | ||
544 | int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) | |
545 | { | |
546 | return __smp_send_nmi_ipi(cpu, fn, delay_us, true); | |
547 | } | |
ddd703ca NP |
548 | #endif /* CONFIG_NMI_IPI */ |
549 | ||
1b67bee1 SB |
550 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
551 | void tick_broadcast(const struct cpumask *mask) | |
552 | { | |
553 | unsigned int cpu; | |
554 | ||
555 | for_each_cpu(cpu, mask) | |
556 | do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); | |
557 | } | |
558 | #endif | |
559 | ||
ddd703ca NP |
560 | #ifdef CONFIG_DEBUGGER |
561 | void debugger_ipi_callback(struct pt_regs *regs) | |
1da177e4 | 562 | { |
ddd703ca NP |
563 | debugger_ipi(regs); |
564 | } | |
e0476371 | 565 | |
ddd703ca NP |
566 | void smp_send_debugger_break(void) |
567 | { | |
568 | smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000); | |
1da177e4 LT |
569 | } |
570 | #endif | |
571 | ||
da665885 | 572 | #ifdef CONFIG_KEXEC_CORE |
cc532915 ME |
573 | void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) |
574 | { | |
4145f358 BS |
575 | int cpu; |
576 | ||
ddd703ca | 577 | smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000); |
4145f358 BS |
578 | if (kdump_in_progress() && crash_wake_offline) { |
579 | for_each_present_cpu(cpu) { | |
580 | if (cpu_online(cpu)) | |
581 | continue; | |
582 | /* | |
583 | * crash_ipi_callback will wait for | |
584 | * all cpus, including offline CPUs. | |
585 | * We don't care about nmi_ipi_function. | |
586 | * Offline cpus will jump straight into | |
587 | * crash_ipi_callback, we can skip the | |
588 | * entire NMI dance and waiting for | |
589 | * cpus to clear pending mask, etc. | |
590 | */ | |
6ba55716 | 591 | do_smp_send_nmi_ipi(cpu, false); |
4145f358 BS |
592 | } |
593 | } | |
cc532915 ME |
594 | } |
595 | #endif | |
596 | ||
ac61c115 NP |
597 | #ifdef CONFIG_NMI_IPI |
598 | static void nmi_stop_this_cpu(struct pt_regs *regs) | |
599 | { | |
600 | /* | |
6029755e | 601 | * IRQs are already hard disabled by the smp_handle_nmi_ipi. |
ac61c115 | 602 | */ |
6029755e NP |
603 | spin_begin(); |
604 | while (1) | |
605 | spin_cpu_relax(); | |
ac61c115 | 606 | } |
ac61c115 | 607 | |
8fd7675c SS |
608 | void smp_send_stop(void) |
609 | { | |
ac61c115 | 610 | smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000); |
6029755e NP |
611 | } |
612 | ||
613 | #else /* CONFIG_NMI_IPI */ | |
614 | ||
615 | static void stop_this_cpu(void *dummy) | |
616 | { | |
6029755e NP |
617 | hard_irq_disable(); |
618 | spin_begin(); | |
619 | while (1) | |
620 | spin_cpu_relax(); | |
621 | } | |
622 | ||
623 | void smp_send_stop(void) | |
624 | { | |
625 | static bool stopped = false; | |
626 | ||
627 | /* | |
628 | * Prevent waiting on csd lock from a previous smp_send_stop. | |
629 | * This is racy, but in general callers try to do the right | |
630 | * thing and only fire off one smp_send_stop (e.g., see | |
631 | * kernel/panic.c) | |
632 | */ | |
633 | if (stopped) | |
634 | return; | |
635 | ||
636 | stopped = true; | |
637 | ||
8691e5a8 | 638 | smp_call_function(stop_this_cpu, NULL, 0); |
1da177e4 | 639 | } |
6029755e | 640 | #endif /* CONFIG_NMI_IPI */ |
1da177e4 | 641 | |
7c19c2e5 | 642 | struct task_struct *current_set[NR_CPUS]; |
1da177e4 | 643 | |
cad5cef6 | 644 | static void smp_store_cpu_info(int id) |
1da177e4 | 645 | { |
6b7487fc | 646 | per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); |
3160b097 BB |
647 | #ifdef CONFIG_PPC_FSL_BOOK3E |
648 | per_cpu(next_tlbcam_idx, id) | |
649 | = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; | |
650 | #endif | |
1da177e4 LT |
651 | } |
652 | ||
df52f671 OH |
653 | /* |
654 | * Relationships between CPUs are maintained in a set of per-cpu cpumasks so | |
655 | * rather than just passing around the cpumask we pass around a function that | |
656 | * returns the that cpumask for the given CPU. | |
657 | */ | |
658 | static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int)) | |
659 | { | |
660 | cpumask_set_cpu(i, get_cpumask(j)); | |
661 | cpumask_set_cpu(j, get_cpumask(i)); | |
662 | } | |
663 | ||
664 | #ifdef CONFIG_HOTPLUG_CPU | |
665 | static void set_cpus_unrelated(int i, int j, | |
666 | struct cpumask *(*get_cpumask)(int)) | |
667 | { | |
668 | cpumask_clear_cpu(i, get_cpumask(j)); | |
669 | cpumask_clear_cpu(j, get_cpumask(i)); | |
670 | } | |
671 | #endif | |
672 | ||
3ab33d6d SD |
673 | /* |
674 | * Extends set_cpus_related. Instead of setting one CPU at a time in | |
675 | * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask. | |
676 | */ | |
677 | static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int), | |
678 | struct cpumask *(*dstmask)(int)) | |
679 | { | |
680 | struct cpumask *mask; | |
681 | int k; | |
682 | ||
683 | mask = srcmask(j); | |
684 | for_each_cpu(k, srcmask(i)) | |
685 | cpumask_or(dstmask(k), dstmask(k), mask); | |
686 | ||
687 | if (i == j) | |
688 | return; | |
689 | ||
690 | mask = srcmask(i); | |
691 | for_each_cpu(k, srcmask(j)) | |
692 | cpumask_or(dstmask(k), dstmask(k), mask); | |
693 | } | |
694 | ||
425752c6 GS |
695 | /* |
696 | * parse_thread_groups: Parses the "ibm,thread-groups" device tree | |
697 | * property for the CPU device node @dn and stores | |
698 | * the parsed output in the thread_groups | |
699 | * structure @tg if the ibm,thread-groups[0] | |
700 | * matches @property. | |
701 | * | |
702 | * @dn: The device node of the CPU device. | |
703 | * @tg: Pointer to a thread group structure into which the parsed | |
704 | * output of "ibm,thread-groups" is stored. | |
705 | * @property: The property of the thread-group that the caller is | |
706 | * interested in. | |
707 | * | |
708 | * ibm,thread-groups[0..N-1] array defines which group of threads in | |
709 | * the CPU-device node can be grouped together based on the property. | |
710 | * | |
711 | * ibm,thread-groups[0] tells us the property based on which the | |
712 | * threads are being grouped together. If this value is 1, it implies | |
713 | * that the threads in the same group share L1, translation cache. | |
714 | * | |
715 | * ibm,thread-groups[1] tells us how many such thread groups exist. | |
716 | * | |
717 | * ibm,thread-groups[2] tells us the number of threads in each such | |
718 | * group. | |
719 | * | |
720 | * ibm,thread-groups[3..N-1] is the list of threads identified by | |
721 | * "ibm,ppc-interrupt-server#s" arranged as per their membership in | |
722 | * the grouping. | |
723 | * | |
724 | * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it | |
725 | * implies that there are 2 groups of 4 threads each, where each group | |
726 | * of threads share L1, translation cache. | |
727 | * | |
728 | * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8} | |
729 | * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10, | |
730 | * 11, 12} structure | |
731 | * | |
732 | * Returns 0 on success, -EINVAL if the property does not exist, | |
733 | * -ENODATA if property does not have a value, and -EOVERFLOW if the | |
734 | * property data isn't large enough. | |
735 | */ | |
736 | static int parse_thread_groups(struct device_node *dn, | |
737 | struct thread_groups *tg, | |
738 | unsigned int property) | |
739 | { | |
740 | int i; | |
741 | u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE]; | |
742 | u32 *thread_list; | |
743 | size_t total_threads; | |
744 | int ret; | |
745 | ||
746 | ret = of_property_read_u32_array(dn, "ibm,thread-groups", | |
747 | thread_group_array, 3); | |
748 | if (ret) | |
749 | return ret; | |
750 | ||
751 | tg->property = thread_group_array[0]; | |
752 | tg->nr_groups = thread_group_array[1]; | |
753 | tg->threads_per_group = thread_group_array[2]; | |
754 | if (tg->property != property || | |
755 | tg->nr_groups < 1 || | |
756 | tg->threads_per_group < 1) | |
757 | return -ENODATA; | |
758 | ||
759 | total_threads = tg->nr_groups * tg->threads_per_group; | |
760 | ||
761 | ret = of_property_read_u32_array(dn, "ibm,thread-groups", | |
762 | thread_group_array, | |
763 | 3 + total_threads); | |
764 | if (ret) | |
765 | return ret; | |
766 | ||
767 | thread_list = &thread_group_array[3]; | |
768 | ||
769 | for (i = 0 ; i < total_threads; i++) | |
770 | tg->thread_list[i] = thread_list[i]; | |
771 | ||
772 | return 0; | |
773 | } | |
774 | ||
775 | /* | |
776 | * get_cpu_thread_group_start : Searches the thread group in tg->thread_list | |
777 | * that @cpu belongs to. | |
778 | * | |
779 | * @cpu : The logical CPU whose thread group is being searched. | |
780 | * @tg : The thread-group structure of the CPU node which @cpu belongs | |
781 | * to. | |
782 | * | |
783 | * Returns the index to tg->thread_list that points to the the start | |
784 | * of the thread_group that @cpu belongs to. | |
785 | * | |
786 | * Returns -1 if cpu doesn't belong to any of the groups pointed to by | |
787 | * tg->thread_list. | |
788 | */ | |
789 | static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg) | |
790 | { | |
791 | int hw_cpu_id = get_hard_smp_processor_id(cpu); | |
792 | int i, j; | |
793 | ||
794 | for (i = 0; i < tg->nr_groups; i++) { | |
795 | int group_start = i * tg->threads_per_group; | |
796 | ||
797 | for (j = 0; j < tg->threads_per_group; j++) { | |
798 | int idx = group_start + j; | |
799 | ||
800 | if (tg->thread_list[idx] == hw_cpu_id) | |
801 | return group_start; | |
802 | } | |
803 | } | |
804 | ||
805 | return -1; | |
806 | } | |
807 | ||
808 | static int init_cpu_l1_cache_map(int cpu) | |
809 | ||
810 | { | |
811 | struct device_node *dn = of_get_cpu_node(cpu, NULL); | |
812 | struct thread_groups tg = {.property = 0, | |
813 | .nr_groups = 0, | |
814 | .threads_per_group = 0}; | |
815 | int first_thread = cpu_first_thread_sibling(cpu); | |
816 | int i, cpu_group_start = -1, err = 0; | |
817 | ||
818 | if (!dn) | |
819 | return -ENODATA; | |
820 | ||
821 | err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1); | |
822 | if (err) | |
823 | goto out; | |
824 | ||
425752c6 GS |
825 | cpu_group_start = get_cpu_thread_group_start(cpu, &tg); |
826 | ||
827 | if (unlikely(cpu_group_start == -1)) { | |
828 | WARN_ON_ONCE(1); | |
829 | err = -ENODATA; | |
830 | goto out; | |
831 | } | |
832 | ||
6e086302 SD |
833 | zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu), |
834 | GFP_KERNEL, cpu_to_node(cpu)); | |
835 | ||
425752c6 GS |
836 | for (i = first_thread; i < first_thread + threads_per_core; i++) { |
837 | int i_group_start = get_cpu_thread_group_start(i, &tg); | |
838 | ||
839 | if (unlikely(i_group_start == -1)) { | |
840 | WARN_ON_ONCE(1); | |
841 | err = -ENODATA; | |
842 | goto out; | |
843 | } | |
844 | ||
845 | if (i_group_start == cpu_group_start) | |
846 | cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu)); | |
847 | } | |
848 | ||
849 | out: | |
850 | of_node_put(dn); | |
851 | return err; | |
852 | } | |
853 | ||
5e93f16a SD |
854 | static bool shared_caches; |
855 | ||
856 | #ifdef CONFIG_SCHED_SMT | |
857 | /* cpumask of CPUs with asymmetric SMT dependency */ | |
858 | static int powerpc_smt_flags(void) | |
859 | { | |
860 | int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; | |
861 | ||
862 | if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { | |
863 | printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); | |
864 | flags |= SD_ASYM_PACKING; | |
865 | } | |
866 | return flags; | |
867 | } | |
868 | #endif | |
869 | ||
870 | /* | |
871 | * P9 has a slightly odd architecture where pairs of cores share an L2 cache. | |
872 | * This topology makes it *much* cheaper to migrate tasks between adjacent cores | |
873 | * since the migrated task remains cache hot. We want to take advantage of this | |
874 | * at the scheduler level so an extra topology level is required. | |
875 | */ | |
876 | static int powerpc_shared_cache_flags(void) | |
877 | { | |
878 | return SD_SHARE_PKG_RESOURCES; | |
879 | } | |
880 | ||
881 | /* | |
882 | * We can't just pass cpu_l2_cache_mask() directly because | |
883 | * returns a non-const pointer and the compiler barfs on that. | |
884 | */ | |
885 | static const struct cpumask *shared_cache_mask(int cpu) | |
886 | { | |
caa8e29d | 887 | return per_cpu(cpu_l2_cache_map, cpu); |
5e93f16a SD |
888 | } |
889 | ||
890 | #ifdef CONFIG_SCHED_SMT | |
891 | static const struct cpumask *smallcore_smt_mask(int cpu) | |
892 | { | |
893 | return cpu_smallcore_mask(cpu); | |
894 | } | |
895 | #endif | |
896 | ||
72730bfc SD |
897 | static struct cpumask *cpu_coregroup_mask(int cpu) |
898 | { | |
899 | return per_cpu(cpu_coregroup_map, cpu); | |
900 | } | |
901 | ||
902 | static bool has_coregroup_support(void) | |
903 | { | |
904 | return coregroup_enabled; | |
905 | } | |
906 | ||
907 | static const struct cpumask *cpu_mc_mask(int cpu) | |
908 | { | |
909 | return cpu_coregroup_mask(cpu); | |
910 | } | |
911 | ||
5e93f16a SD |
912 | static struct sched_domain_topology_level powerpc_topology[] = { |
913 | #ifdef CONFIG_SCHED_SMT | |
914 | { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, | |
915 | #endif | |
916 | { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) }, | |
72730bfc | 917 | { cpu_mc_mask, SD_INIT_NAME(MC) }, |
5e93f16a SD |
918 | { cpu_cpu_mask, SD_INIT_NAME(DIE) }, |
919 | { NULL, }, | |
920 | }; | |
921 | ||
425752c6 GS |
922 | static int init_big_cores(void) |
923 | { | |
924 | int cpu; | |
925 | ||
926 | for_each_possible_cpu(cpu) { | |
927 | int err = init_cpu_l1_cache_map(cpu); | |
928 | ||
929 | if (err) | |
930 | return err; | |
931 | ||
932 | zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu), | |
933 | GFP_KERNEL, | |
934 | cpu_to_node(cpu)); | |
935 | } | |
936 | ||
937 | has_big_cores = true; | |
938 | return 0; | |
939 | } | |
940 | ||
1da177e4 LT |
941 | void __init smp_prepare_cpus(unsigned int max_cpus) |
942 | { | |
943 | unsigned int cpu; | |
944 | ||
945 | DBG("smp_prepare_cpus\n"); | |
946 | ||
947 | /* | |
948 | * setup_cpu may need to be called on the boot cpu. We havent | |
949 | * spun any cpus up but lets be paranoid. | |
950 | */ | |
951 | BUG_ON(boot_cpuid != smp_processor_id()); | |
952 | ||
953 | /* Fixup boot cpu */ | |
954 | smp_store_cpu_info(boot_cpuid); | |
955 | cpu_callin_map[boot_cpuid] = 1; | |
956 | ||
cc1ba8ea AB |
957 | for_each_possible_cpu(cpu) { |
958 | zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), | |
959 | GFP_KERNEL, cpu_to_node(cpu)); | |
2a636a56 OH |
960 | zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu), |
961 | GFP_KERNEL, cpu_to_node(cpu)); | |
cc1ba8ea AB |
962 | zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), |
963 | GFP_KERNEL, cpu_to_node(cpu)); | |
72730bfc SD |
964 | if (has_coregroup_support()) |
965 | zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu), | |
966 | GFP_KERNEL, cpu_to_node(cpu)); | |
967 | ||
d0fd24bb | 968 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
2fabf084 NA |
969 | /* |
970 | * numa_node_id() works after this. | |
971 | */ | |
bc3c4327 LZ |
972 | if (cpu_present(cpu)) { |
973 | set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); | |
974 | set_cpu_numa_mem(cpu, | |
975 | local_memory_node(numa_cpu_lookup_table[cpu])); | |
976 | } | |
d0fd24bb | 977 | #endif |
4ca234a9 SD |
978 | /* |
979 | * cpu_core_map is now more updated and exists only since | |
980 | * its been exported for long. It only will have a snapshot | |
981 | * of cpu_cpu_mask. | |
982 | */ | |
983 | cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu)); | |
cc1ba8ea AB |
984 | } |
985 | ||
df52f671 | 986 | /* Init the cpumasks so the boot CPU is related to itself */ |
cc1ba8ea | 987 | cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); |
2a636a56 | 988 | cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid)); |
cc1ba8ea | 989 | |
72730bfc SD |
990 | if (has_coregroup_support()) |
991 | cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid)); | |
992 | ||
425752c6 GS |
993 | init_big_cores(); |
994 | if (has_big_cores) { | |
995 | cpumask_set_cpu(boot_cpuid, | |
996 | cpu_smallcore_mask(boot_cpuid)); | |
997 | } | |
998 | ||
dfee0efe CG |
999 | if (smp_ops && smp_ops->probe) |
1000 | smp_ops->probe(); | |
1da177e4 LT |
1001 | } |
1002 | ||
cad5cef6 | 1003 | void smp_prepare_boot_cpu(void) |
1da177e4 LT |
1004 | { |
1005 | BUG_ON(smp_processor_id() != boot_cpuid); | |
5ad57078 | 1006 | #ifdef CONFIG_PPC64 |
d2e60075 | 1007 | paca_ptrs[boot_cpuid]->__current = current; |
5ad57078 | 1008 | #endif |
8c272261 | 1009 | set_numa_node(numa_cpu_lookup_table[boot_cpuid]); |
7c19c2e5 | 1010 | current_set[boot_cpuid] = current; |
1da177e4 LT |
1011 | } |
1012 | ||
1013 | #ifdef CONFIG_HOTPLUG_CPU | |
1da177e4 LT |
1014 | |
1015 | int generic_cpu_disable(void) | |
1016 | { | |
1017 | unsigned int cpu = smp_processor_id(); | |
1018 | ||
1019 | if (cpu == boot_cpuid) | |
1020 | return -EBUSY; | |
1021 | ||
ea0f1cab | 1022 | set_cpu_online(cpu, false); |
799d6046 | 1023 | #ifdef CONFIG_PPC64 |
a7f290da | 1024 | vdso_data->processorCount--; |
094fe2e7 | 1025 | #endif |
a978e139 BH |
1026 | /* Update affinity of all IRQs previously aimed at this CPU */ |
1027 | irq_migrate_all_off_this_cpu(); | |
1028 | ||
687b8f24 ME |
1029 | /* |
1030 | * Depending on the details of the interrupt controller, it's possible | |
1031 | * that one of the interrupts we just migrated away from this CPU is | |
1032 | * actually already pending on this CPU. If we leave it in that state | |
1033 | * the interrupt will never be EOI'ed, and will never fire again. So | |
1034 | * temporarily enable interrupts here, to allow any pending interrupt to | |
1035 | * be received (and EOI'ed), before we take this CPU offline. | |
1036 | */ | |
a978e139 BH |
1037 | local_irq_enable(); |
1038 | mdelay(1); | |
1039 | local_irq_disable(); | |
1040 | ||
1da177e4 LT |
1041 | return 0; |
1042 | } | |
1043 | ||
1da177e4 LT |
1044 | void generic_cpu_die(unsigned int cpu) |
1045 | { | |
1046 | int i; | |
1047 | ||
1048 | for (i = 0; i < 100; i++) { | |
0d8d4d42 | 1049 | smp_rmb(); |
2f4f1f81 | 1050 | if (is_cpu_dead(cpu)) |
1da177e4 LT |
1051 | return; |
1052 | msleep(100); | |
1053 | } | |
1054 | printk(KERN_ERR "CPU%d didn't die...\n", cpu); | |
1055 | } | |
1056 | ||
105765f4 BH |
1057 | void generic_set_cpu_dead(unsigned int cpu) |
1058 | { | |
1059 | per_cpu(cpu_state, cpu) = CPU_DEAD; | |
1060 | } | |
fb82b839 | 1061 | |
ae5cab47 ZC |
1062 | /* |
1063 | * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise | |
1064 | * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(), | |
1065 | * which makes the delay in generic_cpu_die() not happen. | |
1066 | */ | |
1067 | void generic_set_cpu_up(unsigned int cpu) | |
1068 | { | |
1069 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | |
1070 | } | |
1071 | ||
fb82b839 BH |
1072 | int generic_check_cpu_restart(unsigned int cpu) |
1073 | { | |
1074 | return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; | |
1075 | } | |
512691d4 | 1076 | |
2f4f1f81 | 1077 | int is_cpu_dead(unsigned int cpu) |
1078 | { | |
1079 | return per_cpu(cpu_state, cpu) == CPU_DEAD; | |
1080 | } | |
1081 | ||
441c19c8 | 1082 | static bool secondaries_inhibited(void) |
512691d4 | 1083 | { |
441c19c8 | 1084 | return kvm_hv_mode_active(); |
512691d4 PM |
1085 | } |
1086 | ||
1087 | #else /* HOTPLUG_CPU */ | |
1088 | ||
1089 | #define secondaries_inhibited() 0 | |
1090 | ||
1da177e4 LT |
1091 | #endif |
1092 | ||
17e32eac | 1093 | static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) |
c56e5853 | 1094 | { |
c56e5853 | 1095 | #ifdef CONFIG_PPC64 |
d2e60075 | 1096 | paca_ptrs[cpu]->__current = idle; |
678c668a CL |
1097 | paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) + |
1098 | THREAD_SIZE - STACK_FRAME_OVERHEAD; | |
c56e5853 | 1099 | #endif |
ed1cd6de | 1100 | idle->cpu = cpu; |
7c19c2e5 | 1101 | secondary_current = current_set[cpu] = idle; |
c56e5853 BH |
1102 | } |
1103 | ||
061d19f2 | 1104 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) |
1da177e4 | 1105 | { |
c56e5853 | 1106 | int rc, c; |
1da177e4 | 1107 | |
512691d4 PM |
1108 | /* |
1109 | * Don't allow secondary threads to come online if inhibited | |
1110 | */ | |
1111 | if (threads_per_core > 1 && secondaries_inhibited() && | |
6f5e40a3 | 1112 | cpu_thread_in_subcore(cpu)) |
512691d4 PM |
1113 | return -EBUSY; |
1114 | ||
8cffc6ac BH |
1115 | if (smp_ops == NULL || |
1116 | (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) | |
1da177e4 LT |
1117 | return -EINVAL; |
1118 | ||
17e32eac | 1119 | cpu_idle_thread_init(cpu, tidle); |
c560bbce | 1120 | |
14d4ae5c BH |
1121 | /* |
1122 | * The platform might need to allocate resources prior to bringing | |
1123 | * up the CPU | |
1124 | */ | |
1125 | if (smp_ops->prepare_cpu) { | |
1126 | rc = smp_ops->prepare_cpu(cpu); | |
1127 | if (rc) | |
1128 | return rc; | |
1129 | } | |
1130 | ||
1da177e4 LT |
1131 | /* Make sure callin-map entry is 0 (can be leftover a CPU |
1132 | * hotplug | |
1133 | */ | |
1134 | cpu_callin_map[cpu] = 0; | |
1135 | ||
1136 | /* The information for processor bringup must | |
1137 | * be written out to main store before we release | |
1138 | * the processor. | |
1139 | */ | |
0d8d4d42 | 1140 | smp_mb(); |
1da177e4 LT |
1141 | |
1142 | /* wake up cpus */ | |
1143 | DBG("smp: kicking cpu %d\n", cpu); | |
de300974 ME |
1144 | rc = smp_ops->kick_cpu(cpu); |
1145 | if (rc) { | |
1146 | pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); | |
1147 | return rc; | |
1148 | } | |
1da177e4 LT |
1149 | |
1150 | /* | |
1151 | * wait to see if the cpu made a callin (is actually up). | |
1152 | * use this value that I found through experimentation. | |
1153 | * -- Cort | |
1154 | */ | |
1155 | if (system_state < SYSTEM_RUNNING) | |
ee0339f2 | 1156 | for (c = 50000; c && !cpu_callin_map[cpu]; c--) |
1da177e4 LT |
1157 | udelay(100); |
1158 | #ifdef CONFIG_HOTPLUG_CPU | |
1159 | else | |
1160 | /* | |
1161 | * CPUs can take much longer to come up in the | |
1162 | * hotplug case. Wait five seconds. | |
1163 | */ | |
67764263 GS |
1164 | for (c = 5000; c && !cpu_callin_map[cpu]; c--) |
1165 | msleep(1); | |
1da177e4 LT |
1166 | #endif |
1167 | ||
1168 | if (!cpu_callin_map[cpu]) { | |
6685a477 | 1169 | printk(KERN_ERR "Processor %u is stuck.\n", cpu); |
1da177e4 LT |
1170 | return -ENOENT; |
1171 | } | |
1172 | ||
6685a477 | 1173 | DBG("Processor %u found.\n", cpu); |
1da177e4 LT |
1174 | |
1175 | if (smp_ops->give_timebase) | |
1176 | smp_ops->give_timebase(); | |
1177 | ||
875ebe94 | 1178 | /* Wait until cpu puts itself in the online & active maps */ |
4e287e65 | 1179 | spin_until_cond(cpu_online(cpu)); |
1da177e4 LT |
1180 | |
1181 | return 0; | |
1182 | } | |
1183 | ||
e9efed3b NL |
1184 | /* Return the value of the reg property corresponding to the given |
1185 | * logical cpu. | |
1186 | */ | |
1187 | int cpu_to_core_id(int cpu) | |
1188 | { | |
1189 | struct device_node *np; | |
f8a1883a | 1190 | const __be32 *reg; |
e9efed3b NL |
1191 | int id = -1; |
1192 | ||
1193 | np = of_get_cpu_node(cpu, NULL); | |
1194 | if (!np) | |
1195 | goto out; | |
1196 | ||
1197 | reg = of_get_property(np, "reg", NULL); | |
1198 | if (!reg) | |
1199 | goto out; | |
1200 | ||
f8a1883a | 1201 | id = be32_to_cpup(reg); |
e9efed3b NL |
1202 | out: |
1203 | of_node_put(np); | |
1204 | return id; | |
1205 | } | |
f8ab4810 | 1206 | EXPORT_SYMBOL_GPL(cpu_to_core_id); |
e9efed3b | 1207 | |
99d86705 VS |
1208 | /* Helper routines for cpu to core mapping */ |
1209 | int cpu_core_index_of_thread(int cpu) | |
1210 | { | |
1211 | return cpu >> threads_shift; | |
1212 | } | |
1213 | EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); | |
1214 | ||
1215 | int cpu_first_thread_of_core(int core) | |
1216 | { | |
1217 | return core << threads_shift; | |
1218 | } | |
1219 | EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); | |
1220 | ||
104699c0 | 1221 | /* Must be called when no change can occur to cpu_present_mask, |
440a0857 NL |
1222 | * i.e. during cpu online or offline. |
1223 | */ | |
1224 | static struct device_node *cpu_to_l2cache(int cpu) | |
1225 | { | |
1226 | struct device_node *np; | |
b2ea25b9 | 1227 | struct device_node *cache; |
440a0857 NL |
1228 | |
1229 | if (!cpu_present(cpu)) | |
1230 | return NULL; | |
1231 | ||
1232 | np = of_get_cpu_node(cpu, NULL); | |
1233 | if (np == NULL) | |
1234 | return NULL; | |
1235 | ||
b2ea25b9 NL |
1236 | cache = of_find_next_cache_node(np); |
1237 | ||
440a0857 NL |
1238 | of_node_put(np); |
1239 | ||
b2ea25b9 | 1240 | return cache; |
440a0857 | 1241 | } |
1da177e4 | 1242 | |
1f3a4181 | 1243 | static bool update_mask_by_l2(int cpu) |
a8a5356c | 1244 | { |
3ab33d6d | 1245 | struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; |
256f2d4b | 1246 | struct device_node *l2_cache, *np; |
3ab33d6d | 1247 | cpumask_var_t mask; |
e3d8b67e | 1248 | int i; |
256f2d4b | 1249 | |
a8a5356c | 1250 | l2_cache = cpu_to_l2cache(cpu); |
f6606cfd SD |
1251 | if (!l2_cache) { |
1252 | struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask; | |
1253 | ||
1254 | /* | |
1255 | * If no l2cache for this CPU, assume all siblings to share | |
1256 | * cache with this CPU. | |
1257 | */ | |
1258 | if (has_big_cores) | |
1259 | sibling_mask = cpu_smallcore_mask; | |
1260 | ||
1261 | for_each_cpu(i, sibling_mask(cpu)) | |
1262 | set_cpus_related(cpu, i, cpu_l2_cache_mask); | |
1263 | ||
df52f671 | 1264 | return false; |
f6606cfd | 1265 | } |
df52f671 | 1266 | |
3ab33d6d SD |
1267 | alloc_cpumask_var_node(&mask, GFP_KERNEL, cpu_to_node(cpu)); |
1268 | cpumask_and(mask, cpu_online_mask, cpu_cpu_mask(cpu)); | |
1269 | ||
1270 | if (has_big_cores) | |
1271 | submask_fn = cpu_smallcore_mask; | |
1272 | ||
1273 | /* Update l2-cache mask with all the CPUs that are part of submask */ | |
1274 | or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask); | |
1275 | ||
1276 | /* Skip all CPUs already part of current CPU l2-cache mask */ | |
1277 | cpumask_andnot(mask, mask, cpu_l2_cache_mask(cpu)); | |
1278 | ||
1279 | for_each_cpu(i, mask) { | |
df52f671 OH |
1280 | /* |
1281 | * when updating the marks the current CPU has not been marked | |
1282 | * online, but we need to update the cache masks | |
1283 | */ | |
256f2d4b | 1284 | np = cpu_to_l2cache(i); |
df52f671 | 1285 | |
3ab33d6d SD |
1286 | /* Skip all CPUs already part of current CPU l2-cache */ |
1287 | if (np == l2_cache) { | |
1288 | or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask); | |
1289 | cpumask_andnot(mask, mask, submask_fn(i)); | |
1290 | } else { | |
1291 | cpumask_andnot(mask, mask, cpu_l2_cache_mask(i)); | |
1292 | } | |
df52f671 | 1293 | |
a8a5356c PM |
1294 | of_node_put(np); |
1295 | } | |
1296 | of_node_put(l2_cache); | |
3ab33d6d | 1297 | free_cpumask_var(mask); |
df52f671 OH |
1298 | |
1299 | return true; | |
1300 | } | |
1301 | ||
1302 | #ifdef CONFIG_HOTPLUG_CPU | |
1303 | static void remove_cpu_from_masks(int cpu) | |
1304 | { | |
70edd4a7 | 1305 | struct cpumask *(*mask_fn)(int) = cpu_sibling_mask; |
df52f671 OH |
1306 | int i; |
1307 | ||
70edd4a7 SD |
1308 | if (shared_caches) |
1309 | mask_fn = cpu_l2_cache_mask; | |
1310 | ||
1311 | for_each_cpu(i, mask_fn(cpu)) { | |
2a636a56 | 1312 | set_cpus_unrelated(cpu, i, cpu_l2_cache_mask); |
df52f671 | 1313 | set_cpus_unrelated(cpu, i, cpu_sibling_mask); |
425752c6 GS |
1314 | if (has_big_cores) |
1315 | set_cpus_unrelated(cpu, i, cpu_smallcore_mask); | |
70edd4a7 SD |
1316 | } |
1317 | ||
1318 | if (has_coregroup_support()) { | |
1319 | for_each_cpu(i, cpu_coregroup_mask(cpu)) | |
72730bfc | 1320 | set_cpus_unrelated(cpu, i, cpu_coregroup_mask); |
df52f671 OH |
1321 | } |
1322 | } | |
1323 | #endif | |
1324 | ||
425752c6 GS |
1325 | static inline void add_cpu_to_smallcore_masks(int cpu) |
1326 | { | |
661e3d42 | 1327 | int i; |
425752c6 GS |
1328 | |
1329 | if (!has_big_cores) | |
1330 | return; | |
1331 | ||
1332 | cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu)); | |
1333 | ||
661e3d42 SD |
1334 | for_each_cpu(i, per_cpu(cpu_l1_cache_map, cpu)) { |
1335 | if (cpu_online(i)) | |
425752c6 GS |
1336 | set_cpus_related(i, cpu, cpu_smallcore_mask); |
1337 | } | |
1338 | } | |
1339 | ||
b8a97cb4 SD |
1340 | static void update_coregroup_mask(int cpu) |
1341 | { | |
70a94089 SD |
1342 | struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; |
1343 | cpumask_var_t mask; | |
b8a97cb4 SD |
1344 | int coregroup_id = cpu_to_coregroup_id(cpu); |
1345 | int i; | |
1346 | ||
70a94089 SD |
1347 | alloc_cpumask_var_node(&mask, GFP_KERNEL, cpu_to_node(cpu)); |
1348 | cpumask_and(mask, cpu_online_mask, cpu_cpu_mask(cpu)); | |
1349 | ||
1350 | if (shared_caches) | |
1351 | submask_fn = cpu_l2_cache_mask; | |
1352 | ||
1353 | /* Update coregroup mask with all the CPUs that are part of submask */ | |
1354 | or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask); | |
1355 | ||
1356 | /* Skip all CPUs already part of coregroup mask */ | |
1357 | cpumask_andnot(mask, mask, cpu_coregroup_mask(cpu)); | |
b8a97cb4 | 1358 | |
70a94089 SD |
1359 | for_each_cpu(i, mask) { |
1360 | /* Skip all CPUs not part of this coregroup */ | |
1361 | if (coregroup_id == cpu_to_coregroup_id(i)) { | |
1362 | or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask); | |
1363 | cpumask_andnot(mask, mask, submask_fn(i)); | |
1364 | } else { | |
1365 | cpumask_andnot(mask, mask, cpu_coregroup_mask(i)); | |
1366 | } | |
b8a97cb4 | 1367 | } |
70a94089 | 1368 | free_cpumask_var(mask); |
b8a97cb4 SD |
1369 | } |
1370 | ||
df52f671 OH |
1371 | static void add_cpu_to_masks(int cpu) |
1372 | { | |
1373 | int first_thread = cpu_first_thread_sibling(cpu); | |
df52f671 OH |
1374 | int i; |
1375 | ||
1376 | /* | |
1377 | * This CPU will not be in the online mask yet so we need to manually | |
1378 | * add it to it's own thread sibling mask. | |
1379 | */ | |
1380 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); | |
1381 | ||
1382 | for (i = first_thread; i < first_thread + threads_per_core; i++) | |
1383 | if (cpu_online(i)) | |
1384 | set_cpus_related(i, cpu, cpu_sibling_mask); | |
1385 | ||
425752c6 | 1386 | add_cpu_to_smallcore_masks(cpu); |
1f3a4181 | 1387 | update_mask_by_l2(cpu); |
2a636a56 | 1388 | |
b8a97cb4 SD |
1389 | if (has_coregroup_support()) |
1390 | update_coregroup_mask(cpu); | |
a8a5356c PM |
1391 | } |
1392 | ||
1da177e4 | 1393 | /* Activate a secondary processor. */ |
061d19f2 | 1394 | void start_secondary(void *unused) |
1da177e4 LT |
1395 | { |
1396 | unsigned int cpu = smp_processor_id(); | |
1397 | ||
f1f10076 | 1398 | mmgrab(&init_mm); |
1da177e4 LT |
1399 | current->active_mm = &init_mm; |
1400 | ||
1401 | smp_store_cpu_info(cpu); | |
5ad57078 | 1402 | set_dec(tb_ticks_per_jiffy); |
e4d76e1c | 1403 | preempt_disable(); |
1be6f10f | 1404 | cpu_callin_map[cpu] = 1; |
1da177e4 | 1405 | |
757cbd46 KG |
1406 | if (smp_ops->setup_cpu) |
1407 | smp_ops->setup_cpu(cpu); | |
1da177e4 LT |
1408 | if (smp_ops->take_timebase) |
1409 | smp_ops->take_timebase(); | |
1410 | ||
d831d0b8 TB |
1411 | secondary_cpu_time_init(); |
1412 | ||
aeeafbfa BH |
1413 | #ifdef CONFIG_PPC64 |
1414 | if (system_state == SYSTEM_RUNNING) | |
1415 | vdso_data->processorCount++; | |
18ad51dd AB |
1416 | |
1417 | vdso_getcpu_init(); | |
aeeafbfa | 1418 | #endif |
df52f671 OH |
1419 | /* Update topology CPU masks */ |
1420 | add_cpu_to_masks(cpu); | |
1da177e4 | 1421 | |
96d91431 OH |
1422 | /* |
1423 | * Check for any shared caches. Note that this must be done on a | |
1424 | * per-core basis because one core in the pair might be disabled. | |
1425 | */ | |
caa8e29d SD |
1426 | if (!shared_caches) { |
1427 | struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask; | |
1428 | struct cpumask *mask = cpu_l2_cache_mask(cpu); | |
1429 | ||
1430 | if (has_big_cores) | |
1431 | sibling_mask = cpu_smallcore_mask; | |
1432 | ||
1433 | if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu))) | |
1434 | shared_caches = true; | |
1435 | } | |
96d91431 | 1436 | |
bc3c4327 LZ |
1437 | set_numa_node(numa_cpu_lookup_table[cpu]); |
1438 | set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); | |
1439 | ||
cce606fe LZ |
1440 | smp_wmb(); |
1441 | notify_cpu_starting(cpu); | |
1442 | set_cpu_online(cpu, true); | |
1443 | ||
b6aeddea ME |
1444 | boot_init_stack_canary(); |
1445 | ||
1da177e4 LT |
1446 | local_irq_enable(); |
1447 | ||
d1039786 NR |
1448 | /* We can enable ftrace for secondary cpus now */ |
1449 | this_cpu_enable_ftrace(); | |
1450 | ||
fc6d73d6 | 1451 | cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); |
fa3f82c8 BH |
1452 | |
1453 | BUG(); | |
1da177e4 LT |
1454 | } |
1455 | ||
1456 | int setup_profiling_timer(unsigned int multiplier) | |
1457 | { | |
1458 | return 0; | |
1459 | } | |
1460 | ||
3c6032a8 SD |
1461 | static void fixup_topology(void) |
1462 | { | |
375370a1 SD |
1463 | int i; |
1464 | ||
3c6032a8 SD |
1465 | #ifdef CONFIG_SCHED_SMT |
1466 | if (has_big_cores) { | |
1467 | pr_info("Big cores detected but using small core scheduling\n"); | |
72730bfc | 1468 | powerpc_topology[smt_idx].mask = smallcore_smt_mask; |
3c6032a8 SD |
1469 | } |
1470 | #endif | |
72730bfc SD |
1471 | |
1472 | if (!has_coregroup_support()) | |
1473 | powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask; | |
375370a1 SD |
1474 | |
1475 | /* | |
1476 | * Try to consolidate topology levels here instead of | |
1477 | * allowing scheduler to degenerate. | |
1478 | * - Dont consolidate if masks are different. | |
1479 | * - Dont consolidate if sd_flags exists and are different. | |
1480 | */ | |
1481 | for (i = 1; i <= die_idx; i++) { | |
1482 | if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask) | |
1483 | continue; | |
1484 | ||
1485 | if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags && | |
1486 | powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags) | |
1487 | continue; | |
1488 | ||
1489 | if (!powerpc_topology[i - 1].sd_flags) | |
1490 | powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags; | |
1491 | ||
1492 | powerpc_topology[i].mask = powerpc_topology[i + 1].mask; | |
1493 | powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags; | |
1494 | #ifdef CONFIG_SCHED_DEBUG | |
1495 | powerpc_topology[i].name = powerpc_topology[i + 1].name; | |
1496 | #endif | |
1497 | } | |
3c6032a8 SD |
1498 | } |
1499 | ||
6d11b87d TG |
1500 | void __init smp_cpus_done(unsigned int max_cpus) |
1501 | { | |
1502 | /* | |
7b7622bb | 1503 | * We are running pinned to the boot CPU, see rest_init(). |
1da177e4 | 1504 | */ |
757cbd46 | 1505 | if (smp_ops && smp_ops->setup_cpu) |
7b7622bb | 1506 | smp_ops->setup_cpu(boot_cpuid); |
4b703a23 | 1507 | |
d7294445 BH |
1508 | if (smp_ops && smp_ops->bringup_done) |
1509 | smp_ops->bringup_done(); | |
1510 | ||
4b703a23 | 1511 | dump_numa_cpu_topology(); |
d7294445 | 1512 | |
3c6032a8 | 1513 | fixup_topology(); |
2ef0ca54 | 1514 | set_sched_topology(powerpc_topology); |
e1f0ece1 MN |
1515 | } |
1516 | ||
1da177e4 LT |
1517 | #ifdef CONFIG_HOTPLUG_CPU |
1518 | int __cpu_disable(void) | |
1519 | { | |
e2075f79 | 1520 | int cpu = smp_processor_id(); |
e2075f79 | 1521 | int err; |
1da177e4 | 1522 | |
e2075f79 NL |
1523 | if (!smp_ops->cpu_disable) |
1524 | return -ENOSYS; | |
1525 | ||
424ef016 NR |
1526 | this_cpu_disable_ftrace(); |
1527 | ||
e2075f79 NL |
1528 | err = smp_ops->cpu_disable(); |
1529 | if (err) | |
1530 | return err; | |
1531 | ||
1532 | /* Update sibling maps */ | |
df52f671 | 1533 | remove_cpu_from_masks(cpu); |
e2075f79 NL |
1534 | |
1535 | return 0; | |
1da177e4 LT |
1536 | } |
1537 | ||
1538 | void __cpu_die(unsigned int cpu) | |
1539 | { | |
1540 | if (smp_ops->cpu_die) | |
1541 | smp_ops->cpu_die(cpu); | |
1542 | } | |
d0174c72 | 1543 | |
1ea21ba2 ME |
1544 | void arch_cpu_idle_dead(void) |
1545 | { | |
1546 | sched_preempt_enable_no_resched(); | |
1ea21ba2 | 1547 | |
424ef016 NR |
1548 | /* |
1549 | * Disable on the down path. This will be re-enabled by | |
1550 | * start_secondary() via start_secondary_resume() below | |
1551 | */ | |
1552 | this_cpu_disable_ftrace(); | |
1553 | ||
39f87561 ME |
1554 | if (smp_ops->cpu_offline_self) |
1555 | smp_ops->cpu_offline_self(); | |
fa3f82c8 BH |
1556 | |
1557 | /* If we return, we re-enter start_secondary */ | |
1558 | start_secondary_resume(); | |
abb17f9c | 1559 | } |
fa3f82c8 | 1560 | |
1da177e4 | 1561 | #endif |