Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * SMP support for ppc. | |
4 | * | |
5 | * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great | |
6 | * deal of code from the sparc and intel versions. | |
7 | * | |
8 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | |
9 | * | |
10 | * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and | |
11 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com | |
1da177e4 LT |
12 | */ |
13 | ||
14 | #undef DEBUG | |
15 | ||
1da177e4 | 16 | #include <linux/kernel.h> |
4b16f8e2 | 17 | #include <linux/export.h> |
68e21be2 | 18 | #include <linux/sched/mm.h> |
678c668a | 19 | #include <linux/sched/task_stack.h> |
105ab3d8 | 20 | #include <linux/sched/topology.h> |
1da177e4 LT |
21 | #include <linux/smp.h> |
22 | #include <linux/interrupt.h> | |
23 | #include <linux/delay.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/spinlock.h> | |
26 | #include <linux/cache.h> | |
27 | #include <linux/err.h> | |
8a25a2fd | 28 | #include <linux/device.h> |
1da177e4 LT |
29 | #include <linux/cpu.h> |
30 | #include <linux/notifier.h> | |
4b703a23 | 31 | #include <linux/topology.h> |
665e87ff | 32 | #include <linux/profile.h> |
4e287e65 | 33 | #include <linux/processor.h> |
7241d26e | 34 | #include <linux/random.h> |
b6aeddea | 35 | #include <linux/stackprotector.h> |
65fddcfc | 36 | #include <linux/pgtable.h> |
cd7aa5d2 | 37 | #include <linux/clockchips.h> |
c7255058 | 38 | #include <linux/kexec.h> |
1da177e4 LT |
39 | |
40 | #include <asm/ptrace.h> | |
60063497 | 41 | #include <linux/atomic.h> |
1da177e4 | 42 | #include <asm/irq.h> |
1b67bee1 | 43 | #include <asm/hw_irq.h> |
441c19c8 | 44 | #include <asm/kvm_ppc.h> |
b866cc21 | 45 | #include <asm/dbell.h> |
1da177e4 | 46 | #include <asm/page.h> |
1da177e4 | 47 | #include <asm/smp.h> |
1da177e4 LT |
48 | #include <asm/time.h> |
49 | #include <asm/machdep.h> | |
e2075f79 | 50 | #include <asm/cputhreads.h> |
1da177e4 | 51 | #include <asm/cputable.h> |
bbeb3f4c | 52 | #include <asm/mpic.h> |
a7f290da | 53 | #include <asm/vdso_datapage.h> |
5ad57078 PM |
54 | #ifdef CONFIG_PPC64 |
55 | #include <asm/paca.h> | |
56 | #endif | |
18ad51dd | 57 | #include <asm/vdso.h> |
ae3a197e | 58 | #include <asm/debug.h> |
b92a226e | 59 | #include <asm/cpu_has_feature.h> |
d1039786 | 60 | #include <asm/ftrace.h> |
e0d8e991 | 61 | #include <asm/kup.h> |
06e629c2 | 62 | #include <asm/fadump.h> |
5ad57078 | 63 | |
1da177e4 | 64 | #ifdef DEBUG |
f9e4ec57 | 65 | #include <asm/udbg.h> |
1da177e4 LT |
66 | #define DBG(fmt...) udbg_printf(fmt) |
67 | #else | |
68 | #define DBG(fmt...) | |
69 | #endif | |
70 | ||
c56e5853 | 71 | #ifdef CONFIG_HOTPLUG_CPU |
fb82b839 BH |
72 | /* State of each CPU during hotplug phases */ |
73 | static DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |
c56e5853 BH |
74 | #endif |
75 | ||
7c19c2e5 | 76 | struct task_struct *secondary_current; |
425752c6 | 77 | bool has_big_cores; |
f9f130ff | 78 | bool coregroup_enabled; |
9538abee | 79 | bool thread_group_shares_l2; |
e9ef81e1 | 80 | bool thread_group_shares_l3; |
f9e4ec57 | 81 | |
cc1ba8ea | 82 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
425752c6 | 83 | DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map); |
2a636a56 | 84 | DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map); |
cc1ba8ea | 85 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); |
078277ac | 86 | static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map); |
1da177e4 | 87 | |
d5a7430d | 88 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
2a636a56 | 89 | EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map); |
440a0857 | 90 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
425752c6 GS |
91 | EXPORT_SYMBOL_GPL(has_big_cores); |
92 | ||
72730bfc SD |
93 | enum { |
94 | #ifdef CONFIG_SCHED_SMT | |
95 | smt_idx, | |
96 | #endif | |
97 | cache_idx, | |
98 | mc_idx, | |
99 | die_idx, | |
100 | }; | |
101 | ||
425752c6 GS |
102 | #define MAX_THREAD_LIST_SIZE 8 |
103 | #define THREAD_GROUP_SHARE_L1 1 | |
e9ef81e1 | 104 | #define THREAD_GROUP_SHARE_L2_L3 2 |
425752c6 GS |
105 | struct thread_groups { |
106 | unsigned int property; | |
107 | unsigned int nr_groups; | |
108 | unsigned int threads_per_group; | |
109 | unsigned int thread_list[MAX_THREAD_LIST_SIZE]; | |
110 | }; | |
111 | ||
790a1662 | 112 | /* Maximum number of properties that groups of threads within a core can share */ |
9538abee | 113 | #define MAX_THREAD_GROUP_PROPERTIES 2 |
790a1662 GS |
114 | |
115 | struct thread_groups_list { | |
116 | unsigned int nr_properties; | |
117 | struct thread_groups property_tgs[MAX_THREAD_GROUP_PROPERTIES]; | |
118 | }; | |
119 | ||
120 | static struct thread_groups_list tgl[NR_CPUS] __initdata; | |
425752c6 | 121 | /* |
1fdc1d66 | 122 | * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to |
425752c6 GS |
123 | * the set its siblings that share the L1-cache. |
124 | */ | |
a4bec516 | 125 | DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map); |
1da177e4 | 126 | |
9538abee GS |
127 | /* |
128 | * On some big-cores system, thread_group_l2_cache_map for each CPU | |
129 | * corresponds to the set its siblings within the core that share the | |
130 | * L2-cache. | |
131 | */ | |
a4bec516 | 132 | DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map); |
9538abee | 133 | |
e9ef81e1 PS |
134 | /* |
135 | * On P10, thread_group_l3_cache_map for each CPU is equal to the | |
136 | * thread_group_l2_cache_map | |
137 | */ | |
138 | DEFINE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map); | |
139 | ||
5ad57078 | 140 | /* SMP operations for this machine */ |
1da177e4 LT |
141 | struct smp_ops_t *smp_ops; |
142 | ||
7ccbe504 BH |
143 | /* Can't be static due to PowerMac hackery */ |
144 | volatile unsigned int cpu_callin_map[NR_CPUS]; | |
1da177e4 | 145 | |
1da177e4 LT |
146 | int smt_enabled_at_boot = 1; |
147 | ||
3cd85250 AF |
148 | /* |
149 | * Returns 1 if the specified cpu should be brought up during boot. | |
150 | * Used to inhibit booting threads if they've been disabled or | |
151 | * limited on the command line | |
152 | */ | |
153 | int smp_generic_cpu_bootable(unsigned int nr) | |
154 | { | |
155 | /* Special case - we inhibit secondary thread startup | |
156 | * during boot if the user requests it. | |
157 | */ | |
a8fcfc19 | 158 | if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) { |
3cd85250 AF |
159 | if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) |
160 | return 0; | |
161 | if (smt_enabled_at_boot | |
162 | && cpu_thread_in_core(nr) >= smt_enabled_at_boot) | |
163 | return 0; | |
164 | } | |
165 | ||
166 | return 1; | |
167 | } | |
168 | ||
169 | ||
5ad57078 | 170 | #ifdef CONFIG_PPC64 |
cad5cef6 | 171 | int smp_generic_kick_cpu(int nr) |
1da177e4 | 172 | { |
c642af9c | 173 | if (nr < 0 || nr >= nr_cpu_ids) |
f8d0d5dc | 174 | return -EINVAL; |
1da177e4 LT |
175 | |
176 | /* | |
177 | * The processor is currently spinning, waiting for the | |
178 | * cpu_start field to become non-zero After we set cpu_start, | |
179 | * the processor will continue on to secondary_start | |
180 | */ | |
d2e60075 NP |
181 | if (!paca_ptrs[nr]->cpu_start) { |
182 | paca_ptrs[nr]->cpu_start = 1; | |
fb82b839 BH |
183 | smp_mb(); |
184 | return 0; | |
185 | } | |
186 | ||
187 | #ifdef CONFIG_HOTPLUG_CPU | |
188 | /* | |
189 | * Ok it's not there, so it might be soft-unplugged, let's | |
190 | * try to bring it back | |
191 | */ | |
ae5cab47 | 192 | generic_set_cpu_up(nr); |
fb82b839 BH |
193 | smp_wmb(); |
194 | smp_send_reschedule(nr); | |
195 | #endif /* CONFIG_HOTPLUG_CPU */ | |
de300974 ME |
196 | |
197 | return 0; | |
1da177e4 | 198 | } |
fb82b839 | 199 | #endif /* CONFIG_PPC64 */ |
1da177e4 | 200 | |
25ddd738 MM |
201 | static irqreturn_t call_function_action(int irq, void *data) |
202 | { | |
203 | generic_smp_call_function_interrupt(); | |
204 | return IRQ_HANDLED; | |
205 | } | |
206 | ||
207 | static irqreturn_t reschedule_action(int irq, void *data) | |
208 | { | |
184748cc | 209 | scheduler_ipi(); |
25ddd738 MM |
210 | return IRQ_HANDLED; |
211 | } | |
212 | ||
bc907113 | 213 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
1b67bee1 | 214 | static irqreturn_t tick_broadcast_ipi_action(int irq, void *data) |
25ddd738 | 215 | { |
3f984620 | 216 | timer_broadcast_interrupt(); |
25ddd738 MM |
217 | return IRQ_HANDLED; |
218 | } | |
bc907113 | 219 | #endif |
25ddd738 | 220 | |
ddd703ca NP |
221 | #ifdef CONFIG_NMI_IPI |
222 | static irqreturn_t nmi_ipi_action(int irq, void *data) | |
25ddd738 | 223 | { |
ddd703ca | 224 | smp_handle_nmi_ipi(get_irq_regs()); |
25ddd738 MM |
225 | return IRQ_HANDLED; |
226 | } | |
ddd703ca | 227 | #endif |
25ddd738 MM |
228 | |
229 | static irq_handler_t smp_ipi_action[] = { | |
230 | [PPC_MSG_CALL_FUNCTION] = call_function_action, | |
231 | [PPC_MSG_RESCHEDULE] = reschedule_action, | |
bc907113 | 232 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
1b67bee1 | 233 | [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action, |
bc907113 | 234 | #endif |
ddd703ca NP |
235 | #ifdef CONFIG_NMI_IPI |
236 | [PPC_MSG_NMI_IPI] = nmi_ipi_action, | |
237 | #endif | |
25ddd738 MM |
238 | }; |
239 | ||
ddd703ca NP |
240 | /* |
241 | * The NMI IPI is a fallback and not truly non-maskable. It is simpler | |
242 | * than going through the call function infrastructure, and strongly | |
243 | * serialized, so it is more appropriate for debugging. | |
244 | */ | |
25ddd738 MM |
245 | const char *smp_ipi_name[] = { |
246 | [PPC_MSG_CALL_FUNCTION] = "ipi call function", | |
247 | [PPC_MSG_RESCHEDULE] = "ipi reschedule", | |
bc907113 | 248 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
1b67bee1 | 249 | [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast", |
bc907113 | 250 | #endif |
21bfd6a8 | 251 | #ifdef CONFIG_NMI_IPI |
ddd703ca | 252 | [PPC_MSG_NMI_IPI] = "nmi ipi", |
21bfd6a8 | 253 | #endif |
25ddd738 MM |
254 | }; |
255 | ||
256 | /* optional function to request ipi, for controllers with >= 4 ipis */ | |
257 | int smp_request_message_ipi(int virq, int msg) | |
258 | { | |
259 | int err; | |
260 | ||
ddd703ca | 261 | if (msg < 0 || msg > PPC_MSG_NMI_IPI) |
25ddd738 | 262 | return -EINVAL; |
ddd703ca NP |
263 | #ifndef CONFIG_NMI_IPI |
264 | if (msg == PPC_MSG_NMI_IPI) | |
25ddd738 | 265 | return 1; |
25ddd738 | 266 | #endif |
ddd703ca | 267 | |
3b5e16d7 | 268 | err = request_irq(virq, smp_ipi_action[msg], |
e6651de9 | 269 | IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND, |
b0d436c7 | 270 | smp_ipi_name[msg], NULL); |
25ddd738 MM |
271 | WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", |
272 | virq, smp_ipi_name[msg], err); | |
273 | ||
274 | return err; | |
275 | } | |
276 | ||
1ece355b | 277 | #ifdef CONFIG_PPC_SMP_MUXED_IPI |
23d72bfd | 278 | struct cpu_messages { |
bd7f561f | 279 | long messages; /* current messages */ |
23d72bfd MM |
280 | }; |
281 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); | |
282 | ||
31639c77 | 283 | void smp_muxed_ipi_set_message(int cpu, int msg) |
23d72bfd MM |
284 | { |
285 | struct cpu_messages *info = &per_cpu(ipi_message, cpu); | |
71454272 | 286 | char *message = (char *)&info->messages; |
23d72bfd | 287 | |
9fb1b36c PM |
288 | /* |
289 | * Order previous accesses before accesses in the IPI handler. | |
290 | */ | |
291 | smp_mb(); | |
71454272 | 292 | message[msg] = 1; |
31639c77 SW |
293 | } |
294 | ||
295 | void smp_muxed_ipi_message_pass(int cpu, int msg) | |
296 | { | |
31639c77 | 297 | smp_muxed_ipi_set_message(cpu, msg); |
b866cc21 | 298 | |
9fb1b36c PM |
299 | /* |
300 | * cause_ipi functions are required to include a full barrier | |
301 | * before doing whatever causes the IPI. | |
302 | */ | |
b866cc21 | 303 | smp_ops->cause_ipi(cpu); |
23d72bfd MM |
304 | } |
305 | ||
0654de1c | 306 | #ifdef __BIG_ENDIAN__ |
bd7f561f | 307 | #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A))) |
0654de1c | 308 | #else |
bd7f561f | 309 | #define IPI_MESSAGE(A) (1uL << (8 * (A))) |
0654de1c AB |
310 | #endif |
311 | ||
23d72bfd MM |
312 | irqreturn_t smp_ipi_demux(void) |
313 | { | |
23d72bfd | 314 | mb(); /* order any irq clear */ |
71454272 | 315 | |
b87ac021 NP |
316 | return smp_ipi_demux_relaxed(); |
317 | } | |
318 | ||
319 | /* sync-free variant. Callers should ensure synchronization */ | |
320 | irqreturn_t smp_ipi_demux_relaxed(void) | |
23d72bfd | 321 | { |
b866cc21 | 322 | struct cpu_messages *info; |
bd7f561f | 323 | unsigned long all; |
23d72bfd | 324 | |
b866cc21 | 325 | info = this_cpu_ptr(&ipi_message); |
71454272 | 326 | do { |
9fb1b36c | 327 | all = xchg(&info->messages, 0); |
e17769eb SW |
328 | #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) |
329 | /* | |
330 | * Must check for PPC_MSG_RM_HOST_ACTION messages | |
331 | * before PPC_MSG_CALL_FUNCTION messages because when | |
332 | * a VM is destroyed, we call kick_all_cpus_sync() | |
333 | * to ensure that any pending PPC_MSG_RM_HOST_ACTION | |
334 | * messages have completed before we free any VCPUs. | |
335 | */ | |
336 | if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION)) | |
337 | kvmppc_xics_ipi_action(); | |
338 | #endif | |
0654de1c | 339 | if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) |
23d72bfd | 340 | generic_smp_call_function_interrupt(); |
0654de1c | 341 | if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) |
880102e7 | 342 | scheduler_ipi(); |
bc907113 | 343 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
1b67bee1 | 344 | if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST)) |
3f984620 | 345 | timer_broadcast_interrupt(); |
bc907113 | 346 | #endif |
ddd703ca NP |
347 | #ifdef CONFIG_NMI_IPI |
348 | if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI)) | |
349 | nmi_ipi_action(0, NULL); | |
350 | #endif | |
71454272 MM |
351 | } while (info->messages); |
352 | ||
23d72bfd MM |
353 | return IRQ_HANDLED; |
354 | } | |
1ece355b | 355 | #endif /* CONFIG_PPC_SMP_MUXED_IPI */ |
23d72bfd | 356 | |
9ca980dc PM |
357 | static inline void do_message_pass(int cpu, int msg) |
358 | { | |
359 | if (smp_ops->message_pass) | |
360 | smp_ops->message_pass(cpu, msg); | |
361 | #ifdef CONFIG_PPC_SMP_MUXED_IPI | |
362 | else | |
363 | smp_muxed_ipi_message_pass(cpu, msg); | |
364 | #endif | |
365 | } | |
366 | ||
1da177e4 LT |
367 | void smp_send_reschedule(int cpu) |
368 | { | |
8cffc6ac | 369 | if (likely(smp_ops)) |
9ca980dc | 370 | do_message_pass(cpu, PPC_MSG_RESCHEDULE); |
1da177e4 | 371 | } |
de56a948 | 372 | EXPORT_SYMBOL_GPL(smp_send_reschedule); |
1da177e4 | 373 | |
b7d7a240 JA |
374 | void arch_send_call_function_single_ipi(int cpu) |
375 | { | |
402d9a1e | 376 | do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); |
b7d7a240 JA |
377 | } |
378 | ||
f063ea02 | 379 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
b7d7a240 JA |
380 | { |
381 | unsigned int cpu; | |
382 | ||
f063ea02 | 383 | for_each_cpu(cpu, mask) |
9ca980dc | 384 | do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); |
b7d7a240 JA |
385 | } |
386 | ||
ddd703ca NP |
387 | #ifdef CONFIG_NMI_IPI |
388 | ||
389 | /* | |
390 | * "NMI IPI" system. | |
391 | * | |
392 | * NMI IPIs may not be recoverable, so should not be used as ongoing part of | |
393 | * a running system. They can be used for crash, debug, halt/reboot, etc. | |
394 | * | |
ddd703ca | 395 | * The IPI call waits with interrupts disabled until all targets enter the |
88b9a3d1 NP |
396 | * NMI handler, then returns. Subsequent IPIs can be issued before targets |
397 | * have returned from their handlers, so there is no guarantee about | |
398 | * concurrency or re-entrancy. | |
ddd703ca | 399 | * |
88b9a3d1 | 400 | * A new NMI can be issued before all targets exit the handler. |
ddd703ca NP |
401 | * |
402 | * The IPI call may time out without all targets entering the NMI handler. | |
403 | * In that case, there is some logic to recover (and ignore subsequent | |
404 | * NMI interrupts that may eventually be raised), but the platform interrupt | |
405 | * handler may not be able to distinguish this from other exception causes, | |
406 | * which may cause a crash. | |
407 | */ | |
408 | ||
409 | static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0); | |
410 | static struct cpumask nmi_ipi_pending_mask; | |
88b9a3d1 | 411 | static bool nmi_ipi_busy = false; |
ddd703ca NP |
412 | static void (*nmi_ipi_function)(struct pt_regs *) = NULL; |
413 | ||
5352090a | 414 | noinstr static void nmi_ipi_lock_start(unsigned long *flags) |
ddd703ca NP |
415 | { |
416 | raw_local_irq_save(*flags); | |
417 | hard_irq_disable(); | |
5352090a | 418 | while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { |
ddd703ca | 419 | raw_local_irq_restore(*flags); |
5352090a | 420 | spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0); |
ddd703ca NP |
421 | raw_local_irq_save(*flags); |
422 | hard_irq_disable(); | |
423 | } | |
424 | } | |
425 | ||
5352090a | 426 | noinstr static void nmi_ipi_lock(void) |
ddd703ca | 427 | { |
5352090a DA |
428 | while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) |
429 | spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0); | |
ddd703ca NP |
430 | } |
431 | ||
5352090a | 432 | noinstr static void nmi_ipi_unlock(void) |
ddd703ca NP |
433 | { |
434 | smp_mb(); | |
5352090a DA |
435 | WARN_ON(arch_atomic_read(&__nmi_ipi_lock) != 1); |
436 | arch_atomic_set(&__nmi_ipi_lock, 0); | |
ddd703ca NP |
437 | } |
438 | ||
5352090a | 439 | noinstr static void nmi_ipi_unlock_end(unsigned long *flags) |
ddd703ca NP |
440 | { |
441 | nmi_ipi_unlock(); | |
442 | raw_local_irq_restore(*flags); | |
443 | } | |
444 | ||
445 | /* | |
446 | * Platform NMI handler calls this to ack | |
447 | */ | |
5352090a | 448 | noinstr int smp_handle_nmi_ipi(struct pt_regs *regs) |
ddd703ca | 449 | { |
88b9a3d1 | 450 | void (*fn)(struct pt_regs *) = NULL; |
ddd703ca NP |
451 | unsigned long flags; |
452 | int me = raw_smp_processor_id(); | |
453 | int ret = 0; | |
454 | ||
455 | /* | |
456 | * Unexpected NMIs are possible here because the interrupt may not | |
457 | * be able to distinguish NMI IPIs from other types of NMIs, or | |
458 | * because the caller may have timed out. | |
459 | */ | |
460 | nmi_ipi_lock_start(&flags); | |
88b9a3d1 NP |
461 | if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) { |
462 | cpumask_clear_cpu(me, &nmi_ipi_pending_mask); | |
463 | fn = READ_ONCE(nmi_ipi_function); | |
464 | WARN_ON_ONCE(!fn); | |
465 | ret = 1; | |
466 | } | |
ddd703ca NP |
467 | nmi_ipi_unlock_end(&flags); |
468 | ||
88b9a3d1 NP |
469 | if (fn) |
470 | fn(regs); | |
471 | ||
ddd703ca NP |
472 | return ret; |
473 | } | |
474 | ||
6ba55716 | 475 | static void do_smp_send_nmi_ipi(int cpu, bool safe) |
ddd703ca | 476 | { |
6ba55716 | 477 | if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) |
c64af645 NP |
478 | return; |
479 | ||
ddd703ca NP |
480 | if (cpu >= 0) { |
481 | do_message_pass(cpu, PPC_MSG_NMI_IPI); | |
482 | } else { | |
483 | int c; | |
484 | ||
485 | for_each_online_cpu(c) { | |
486 | if (c == raw_smp_processor_id()) | |
487 | continue; | |
488 | do_message_pass(c, PPC_MSG_NMI_IPI); | |
489 | } | |
490 | } | |
491 | } | |
492 | ||
493 | /* | |
494 | * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS. | |
495 | * - fn is the target callback function. | |
496 | * - delay_us > 0 is the delay before giving up waiting for targets to | |
88b9a3d1 | 497 | * begin executing the handler, == 0 specifies indefinite delay. |
ddd703ca | 498 | */ |
6fe243fe NP |
499 | static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), |
500 | u64 delay_us, bool safe) | |
ddd703ca NP |
501 | { |
502 | unsigned long flags; | |
503 | int me = raw_smp_processor_id(); | |
504 | int ret = 1; | |
505 | ||
506 | BUG_ON(cpu == me); | |
507 | BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS); | |
508 | ||
509 | if (unlikely(!smp_ops)) | |
510 | return 0; | |
511 | ||
ddd703ca | 512 | nmi_ipi_lock_start(&flags); |
88b9a3d1 | 513 | while (nmi_ipi_busy) { |
ddd703ca | 514 | nmi_ipi_unlock_end(&flags); |
88b9a3d1 | 515 | spin_until_cond(!nmi_ipi_busy); |
ddd703ca NP |
516 | nmi_ipi_lock_start(&flags); |
517 | } | |
88b9a3d1 | 518 | nmi_ipi_busy = true; |
ddd703ca NP |
519 | nmi_ipi_function = fn; |
520 | ||
88b9a3d1 NP |
521 | WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask)); |
522 | ||
ddd703ca NP |
523 | if (cpu < 0) { |
524 | /* ALL_OTHERS */ | |
525 | cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask); | |
526 | cpumask_clear_cpu(me, &nmi_ipi_pending_mask); | |
527 | } else { | |
ddd703ca NP |
528 | cpumask_set_cpu(cpu, &nmi_ipi_pending_mask); |
529 | } | |
88b9a3d1 | 530 | |
ddd703ca NP |
531 | nmi_ipi_unlock(); |
532 | ||
88b9a3d1 NP |
533 | /* Interrupts remain hard disabled */ |
534 | ||
6ba55716 | 535 | do_smp_send_nmi_ipi(cpu, safe); |
ddd703ca | 536 | |
5b73151f | 537 | nmi_ipi_lock(); |
88b9a3d1 | 538 | /* nmi_ipi_busy is set here, so unlock/lock is okay */ |
ddd703ca | 539 | while (!cpumask_empty(&nmi_ipi_pending_mask)) { |
5b73151f | 540 | nmi_ipi_unlock(); |
ddd703ca | 541 | udelay(1); |
5b73151f NP |
542 | nmi_ipi_lock(); |
543 | if (delay_us) { | |
544 | delay_us--; | |
545 | if (!delay_us) | |
88b9a3d1 | 546 | break; |
5b73151f NP |
547 | } |
548 | } | |
549 | ||
ddd703ca | 550 | if (!cpumask_empty(&nmi_ipi_pending_mask)) { |
5b73151f | 551 | /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */ |
ddd703ca NP |
552 | ret = 0; |
553 | cpumask_clear(&nmi_ipi_pending_mask); | |
554 | } | |
5b73151f | 555 | |
88b9a3d1 NP |
556 | nmi_ipi_function = NULL; |
557 | nmi_ipi_busy = false; | |
558 | ||
ddd703ca NP |
559 | nmi_ipi_unlock_end(&flags); |
560 | ||
561 | return ret; | |
562 | } | |
6ba55716 ME |
563 | |
564 | int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) | |
565 | { | |
566 | return __smp_send_nmi_ipi(cpu, fn, delay_us, false); | |
567 | } | |
568 | ||
569 | int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) | |
570 | { | |
571 | return __smp_send_nmi_ipi(cpu, fn, delay_us, true); | |
572 | } | |
ddd703ca NP |
573 | #endif /* CONFIG_NMI_IPI */ |
574 | ||
1b67bee1 SB |
575 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
576 | void tick_broadcast(const struct cpumask *mask) | |
577 | { | |
578 | unsigned int cpu; | |
579 | ||
580 | for_each_cpu(cpu, mask) | |
581 | do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); | |
582 | } | |
583 | #endif | |
584 | ||
ddd703ca | 585 | #ifdef CONFIG_DEBUGGER |
157c9f40 | 586 | static void debugger_ipi_callback(struct pt_regs *regs) |
1da177e4 | 587 | { |
ddd703ca NP |
588 | debugger_ipi(regs); |
589 | } | |
e0476371 | 590 | |
ddd703ca NP |
591 | void smp_send_debugger_break(void) |
592 | { | |
593 | smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000); | |
1da177e4 LT |
594 | } |
595 | #endif | |
596 | ||
da665885 | 597 | #ifdef CONFIG_KEXEC_CORE |
cc532915 ME |
598 | void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) |
599 | { | |
4145f358 BS |
600 | int cpu; |
601 | ||
ddd703ca | 602 | smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000); |
4145f358 BS |
603 | if (kdump_in_progress() && crash_wake_offline) { |
604 | for_each_present_cpu(cpu) { | |
605 | if (cpu_online(cpu)) | |
606 | continue; | |
607 | /* | |
608 | * crash_ipi_callback will wait for | |
609 | * all cpus, including offline CPUs. | |
610 | * We don't care about nmi_ipi_function. | |
611 | * Offline cpus will jump straight into | |
612 | * crash_ipi_callback, we can skip the | |
613 | * entire NMI dance and waiting for | |
614 | * cpus to clear pending mask, etc. | |
615 | */ | |
6ba55716 | 616 | do_smp_send_nmi_ipi(cpu, false); |
4145f358 BS |
617 | } |
618 | } | |
cc532915 ME |
619 | } |
620 | #endif | |
621 | ||
219572d2 HB |
622 | void crash_smp_send_stop(void) |
623 | { | |
624 | static bool stopped = false; | |
625 | ||
06e629c2 HB |
626 | /* |
627 | * In case of fadump, register data for all CPUs is captured by f/w | |
628 | * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before | |
629 | * this rtas call to avoid tricky post processing of those CPUs' | |
630 | * backtraces. | |
631 | */ | |
632 | if (should_fadump_crash()) | |
633 | return; | |
634 | ||
219572d2 HB |
635 | if (stopped) |
636 | return; | |
637 | ||
638 | stopped = true; | |
639 | ||
c7255058 HB |
640 | #ifdef CONFIG_KEXEC_CORE |
641 | if (kexec_crash_image) { | |
642 | crash_kexec_prepare(); | |
643 | return; | |
644 | } | |
645 | #endif | |
646 | ||
647 | smp_send_stop(); | |
219572d2 HB |
648 | } |
649 | ||
ac61c115 NP |
650 | #ifdef CONFIG_NMI_IPI |
651 | static void nmi_stop_this_cpu(struct pt_regs *regs) | |
652 | { | |
653 | /* | |
6029755e | 654 | * IRQs are already hard disabled by the smp_handle_nmi_ipi. |
ac61c115 | 655 | */ |
bab26238 NP |
656 | set_cpu_online(smp_processor_id(), false); |
657 | ||
6029755e NP |
658 | spin_begin(); |
659 | while (1) | |
660 | spin_cpu_relax(); | |
ac61c115 | 661 | } |
ac61c115 | 662 | |
8fd7675c SS |
663 | void smp_send_stop(void) |
664 | { | |
ac61c115 | 665 | smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000); |
6029755e NP |
666 | } |
667 | ||
668 | #else /* CONFIG_NMI_IPI */ | |
669 | ||
670 | static void stop_this_cpu(void *dummy) | |
671 | { | |
6029755e | 672 | hard_irq_disable(); |
bab26238 NP |
673 | |
674 | /* | |
675 | * Offlining CPUs in stop_this_cpu can result in scheduler warnings, | |
676 | * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants | |
677 | * to know other CPUs are offline before it breaks locks to flush | |
678 | * printk buffers, in case we panic()ed while holding the lock. | |
679 | */ | |
680 | set_cpu_online(smp_processor_id(), false); | |
681 | ||
6029755e NP |
682 | spin_begin(); |
683 | while (1) | |
684 | spin_cpu_relax(); | |
685 | } | |
686 | ||
687 | void smp_send_stop(void) | |
688 | { | |
689 | static bool stopped = false; | |
690 | ||
691 | /* | |
692 | * Prevent waiting on csd lock from a previous smp_send_stop. | |
693 | * This is racy, but in general callers try to do the right | |
694 | * thing and only fire off one smp_send_stop (e.g., see | |
695 | * kernel/panic.c) | |
696 | */ | |
697 | if (stopped) | |
698 | return; | |
699 | ||
700 | stopped = true; | |
701 | ||
8691e5a8 | 702 | smp_call_function(stop_this_cpu, NULL, 0); |
1da177e4 | 703 | } |
6029755e | 704 | #endif /* CONFIG_NMI_IPI */ |
1da177e4 | 705 | |
e15c703b | 706 | static struct task_struct *current_set[NR_CPUS]; |
1da177e4 | 707 | |
cad5cef6 | 708 | static void smp_store_cpu_info(int id) |
1da177e4 | 709 | { |
6b7487fc | 710 | per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); |
3e731858 | 711 | #ifdef CONFIG_PPC_E500 |
3160b097 BB |
712 | per_cpu(next_tlbcam_idx, id) |
713 | = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; | |
714 | #endif | |
1da177e4 LT |
715 | } |
716 | ||
df52f671 OH |
717 | /* |
718 | * Relationships between CPUs are maintained in a set of per-cpu cpumasks so | |
719 | * rather than just passing around the cpumask we pass around a function that | |
720 | * returns the that cpumask for the given CPU. | |
721 | */ | |
722 | static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int)) | |
723 | { | |
724 | cpumask_set_cpu(i, get_cpumask(j)); | |
725 | cpumask_set_cpu(j, get_cpumask(i)); | |
726 | } | |
727 | ||
728 | #ifdef CONFIG_HOTPLUG_CPU | |
729 | static void set_cpus_unrelated(int i, int j, | |
730 | struct cpumask *(*get_cpumask)(int)) | |
731 | { | |
732 | cpumask_clear_cpu(i, get_cpumask(j)); | |
733 | cpumask_clear_cpu(j, get_cpumask(i)); | |
734 | } | |
735 | #endif | |
736 | ||
3ab33d6d SD |
737 | /* |
738 | * Extends set_cpus_related. Instead of setting one CPU at a time in | |
739 | * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask. | |
740 | */ | |
741 | static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int), | |
742 | struct cpumask *(*dstmask)(int)) | |
743 | { | |
744 | struct cpumask *mask; | |
745 | int k; | |
746 | ||
747 | mask = srcmask(j); | |
748 | for_each_cpu(k, srcmask(i)) | |
749 | cpumask_or(dstmask(k), dstmask(k), mask); | |
750 | ||
751 | if (i == j) | |
752 | return; | |
753 | ||
754 | mask = srcmask(i); | |
755 | for_each_cpu(k, srcmask(j)) | |
756 | cpumask_or(dstmask(k), dstmask(k), mask); | |
757 | } | |
758 | ||
425752c6 GS |
759 | /* |
760 | * parse_thread_groups: Parses the "ibm,thread-groups" device tree | |
761 | * property for the CPU device node @dn and stores | |
790a1662 GS |
762 | * the parsed output in the thread_groups_list |
763 | * structure @tglp. | |
425752c6 GS |
764 | * |
765 | * @dn: The device node of the CPU device. | |
790a1662 | 766 | * @tglp: Pointer to a thread group list structure into which the parsed |
425752c6 | 767 | * output of "ibm,thread-groups" is stored. |
425752c6 GS |
768 | * |
769 | * ibm,thread-groups[0..N-1] array defines which group of threads in | |
770 | * the CPU-device node can be grouped together based on the property. | |
771 | * | |
790a1662 GS |
772 | * This array can represent thread groupings for multiple properties. |
773 | * | |
774 | * ibm,thread-groups[i + 0] tells us the property based on which the | |
425752c6 | 775 | * threads are being grouped together. If this value is 1, it implies |
9538abee GS |
776 | * that the threads in the same group share L1, translation cache. If |
777 | * the value is 2, it implies that the threads in the same group share | |
778 | * the same L2 cache. | |
425752c6 | 779 | * |
790a1662 GS |
780 | * ibm,thread-groups[i+1] tells us how many such thread groups exist for the |
781 | * property ibm,thread-groups[i] | |
425752c6 | 782 | * |
790a1662 | 783 | * ibm,thread-groups[i+2] tells us the number of threads in each such |
425752c6 | 784 | * group. |
790a1662 | 785 | * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then, |
425752c6 | 786 | * |
790a1662 | 787 | * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by |
425752c6 GS |
788 | * "ibm,ppc-interrupt-server#s" arranged as per their membership in |
789 | * the grouping. | |
790 | * | |
790a1662 GS |
791 | * Example: |
792 | * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15] | |
793 | * This can be decomposed up into two consecutive arrays: | |
794 | * a) [1,2,4,8,10,12,14,9,11,13,15] | |
795 | * b) [2,2,4,8,10,12,14,9,11,13,15] | |
796 | * | |
797 | * where in, | |
798 | * | |
799 | * a) provides information of Property "1" being shared by "2" groups, | |
800 | * each with "4" threads each. The "ibm,ppc-interrupt-server#s" of | |
801 | * the first group is {8,10,12,14} and the | |
802 | * "ibm,ppc-interrupt-server#s" of the second group is | |
803 | * {9,11,13,15}. Property "1" is indicative of the thread in the | |
804 | * group sharing L1 cache, translation cache and Instruction Data | |
805 | * flow. | |
425752c6 | 806 | * |
790a1662 GS |
807 | * b) provides information of Property "2" being shared by "2" groups, |
808 | * each group with "4" threads. The "ibm,ppc-interrupt-server#s" of | |
809 | * the first group is {8,10,12,14} and the | |
810 | * "ibm,ppc-interrupt-server#s" of the second group is | |
811 | * {9,11,13,15}. Property "2" indicates that the threads in each | |
812 | * group share the L2-cache. | |
425752c6 GS |
813 | * |
814 | * Returns 0 on success, -EINVAL if the property does not exist, | |
815 | * -ENODATA if property does not have a value, and -EOVERFLOW if the | |
816 | * property data isn't large enough. | |
817 | */ | |
818 | static int parse_thread_groups(struct device_node *dn, | |
790a1662 | 819 | struct thread_groups_list *tglp) |
425752c6 | 820 | { |
790a1662 GS |
821 | unsigned int property_idx = 0; |
822 | u32 *thread_group_array; | |
425752c6 | 823 | size_t total_threads; |
790a1662 GS |
824 | int ret = 0, count; |
825 | u32 *thread_list; | |
826 | int i = 0; | |
425752c6 | 827 | |
790a1662 GS |
828 | count = of_property_count_u32_elems(dn, "ibm,thread-groups"); |
829 | thread_group_array = kcalloc(count, sizeof(u32), GFP_KERNEL); | |
425752c6 | 830 | ret = of_property_read_u32_array(dn, "ibm,thread-groups", |
790a1662 | 831 | thread_group_array, count); |
425752c6 | 832 | if (ret) |
790a1662 | 833 | goto out_free; |
425752c6 | 834 | |
790a1662 GS |
835 | while (i < count && property_idx < MAX_THREAD_GROUP_PROPERTIES) { |
836 | int j; | |
837 | struct thread_groups *tg = &tglp->property_tgs[property_idx++]; | |
425752c6 | 838 | |
790a1662 GS |
839 | tg->property = thread_group_array[i]; |
840 | tg->nr_groups = thread_group_array[i + 1]; | |
841 | tg->threads_per_group = thread_group_array[i + 2]; | |
842 | total_threads = tg->nr_groups * tg->threads_per_group; | |
425752c6 | 843 | |
790a1662 | 844 | thread_list = &thread_group_array[i + 3]; |
425752c6 | 845 | |
790a1662 GS |
846 | for (j = 0; j < total_threads; j++) |
847 | tg->thread_list[j] = thread_list[j]; | |
848 | i = i + 3 + total_threads; | |
849 | } | |
425752c6 | 850 | |
790a1662 GS |
851 | tglp->nr_properties = property_idx; |
852 | ||
853 | out_free: | |
854 | kfree(thread_group_array); | |
855 | return ret; | |
425752c6 GS |
856 | } |
857 | ||
858 | /* | |
859 | * get_cpu_thread_group_start : Searches the thread group in tg->thread_list | |
860 | * that @cpu belongs to. | |
861 | * | |
862 | * @cpu : The logical CPU whose thread group is being searched. | |
863 | * @tg : The thread-group structure of the CPU node which @cpu belongs | |
864 | * to. | |
865 | * | |
87c78b61 | 866 | * Returns the index to tg->thread_list that points to the start |
425752c6 GS |
867 | * of the thread_group that @cpu belongs to. |
868 | * | |
869 | * Returns -1 if cpu doesn't belong to any of the groups pointed to by | |
870 | * tg->thread_list. | |
871 | */ | |
872 | static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg) | |
873 | { | |
874 | int hw_cpu_id = get_hard_smp_processor_id(cpu); | |
875 | int i, j; | |
876 | ||
877 | for (i = 0; i < tg->nr_groups; i++) { | |
878 | int group_start = i * tg->threads_per_group; | |
879 | ||
880 | for (j = 0; j < tg->threads_per_group; j++) { | |
881 | int idx = group_start + j; | |
882 | ||
883 | if (tg->thread_list[idx] == hw_cpu_id) | |
884 | return group_start; | |
885 | } | |
886 | } | |
887 | ||
888 | return -1; | |
889 | } | |
890 | ||
790a1662 GS |
891 | static struct thread_groups *__init get_thread_groups(int cpu, |
892 | int group_property, | |
893 | int *err) | |
894 | { | |
895 | struct device_node *dn = of_get_cpu_node(cpu, NULL); | |
896 | struct thread_groups_list *cpu_tgl = &tgl[cpu]; | |
897 | struct thread_groups *tg = NULL; | |
898 | int i; | |
899 | *err = 0; | |
900 | ||
901 | if (!dn) { | |
902 | *err = -ENODATA; | |
903 | return NULL; | |
904 | } | |
905 | ||
906 | if (!cpu_tgl->nr_properties) { | |
907 | *err = parse_thread_groups(dn, cpu_tgl); | |
908 | if (*err) | |
909 | goto out; | |
910 | } | |
911 | ||
912 | for (i = 0; i < cpu_tgl->nr_properties; i++) { | |
913 | if (cpu_tgl->property_tgs[i].property == group_property) { | |
914 | tg = &cpu_tgl->property_tgs[i]; | |
915 | break; | |
916 | } | |
917 | } | |
918 | ||
919 | if (!tg) | |
920 | *err = -EINVAL; | |
921 | out: | |
922 | of_node_put(dn); | |
923 | return tg; | |
924 | } | |
925 | ||
d276960d NC |
926 | static int __init update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg, |
927 | int cpu, int cpu_group_start) | |
e9ef81e1 PS |
928 | { |
929 | int first_thread = cpu_first_thread_sibling(cpu); | |
930 | int i; | |
931 | ||
932 | zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu)); | |
933 | ||
934 | for (i = first_thread; i < first_thread + threads_per_core; i++) { | |
935 | int i_group_start = get_cpu_thread_group_start(i, tg); | |
936 | ||
937 | if (unlikely(i_group_start == -1)) { | |
938 | WARN_ON_ONCE(1); | |
939 | return -ENODATA; | |
940 | } | |
941 | ||
942 | if (i_group_start == cpu_group_start) | |
943 | cpumask_set_cpu(i, *mask); | |
944 | } | |
945 | ||
946 | return 0; | |
947 | } | |
948 | ||
fbd2b672 | 949 | static int __init init_thread_group_cache_map(int cpu, int cache_property) |
425752c6 GS |
950 | |
951 | { | |
e9ef81e1 | 952 | int cpu_group_start = -1, err = 0; |
790a1662 | 953 | struct thread_groups *tg = NULL; |
9538abee | 954 | cpumask_var_t *mask = NULL; |
425752c6 | 955 | |
9538abee | 956 | if (cache_property != THREAD_GROUP_SHARE_L1 && |
e9ef81e1 | 957 | cache_property != THREAD_GROUP_SHARE_L2_L3) |
fbd2b672 GS |
958 | return -EINVAL; |
959 | ||
960 | tg = get_thread_groups(cpu, cache_property, &err); | |
e9ef81e1 | 961 | |
790a1662 GS |
962 | if (!tg) |
963 | return err; | |
425752c6 | 964 | |
790a1662 | 965 | cpu_group_start = get_cpu_thread_group_start(cpu, tg); |
425752c6 GS |
966 | |
967 | if (unlikely(cpu_group_start == -1)) { | |
968 | WARN_ON_ONCE(1); | |
790a1662 | 969 | return -ENODATA; |
425752c6 GS |
970 | } |
971 | ||
e9ef81e1 | 972 | if (cache_property == THREAD_GROUP_SHARE_L1) { |
9538abee | 973 | mask = &per_cpu(thread_group_l1_cache_map, cpu); |
e9ef81e1 PS |
974 | update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); |
975 | } | |
976 | else if (cache_property == THREAD_GROUP_SHARE_L2_L3) { | |
9538abee | 977 | mask = &per_cpu(thread_group_l2_cache_map, cpu); |
e9ef81e1 PS |
978 | update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); |
979 | mask = &per_cpu(thread_group_l3_cache_map, cpu); | |
980 | update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); | |
425752c6 GS |
981 | } |
982 | ||
e9ef81e1 | 983 | |
790a1662 | 984 | return 0; |
425752c6 GS |
985 | } |
986 | ||
5e93f16a SD |
987 | static bool shared_caches; |
988 | ||
989 | #ifdef CONFIG_SCHED_SMT | |
990 | /* cpumask of CPUs with asymmetric SMT dependency */ | |
991 | static int powerpc_smt_flags(void) | |
992 | { | |
993 | int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; | |
994 | ||
995 | if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { | |
996 | printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); | |
997 | flags |= SD_ASYM_PACKING; | |
998 | } | |
999 | return flags; | |
1000 | } | |
1001 | #endif | |
1002 | ||
1003 | /* | |
1004 | * P9 has a slightly odd architecture where pairs of cores share an L2 cache. | |
1005 | * This topology makes it *much* cheaper to migrate tasks between adjacent cores | |
1006 | * since the migrated task remains cache hot. We want to take advantage of this | |
1007 | * at the scheduler level so an extra topology level is required. | |
1008 | */ | |
1009 | static int powerpc_shared_cache_flags(void) | |
1010 | { | |
1011 | return SD_SHARE_PKG_RESOURCES; | |
1012 | } | |
1013 | ||
1014 | /* | |
1015 | * We can't just pass cpu_l2_cache_mask() directly because | |
1016 | * returns a non-const pointer and the compiler barfs on that. | |
1017 | */ | |
1018 | static const struct cpumask *shared_cache_mask(int cpu) | |
1019 | { | |
caa8e29d | 1020 | return per_cpu(cpu_l2_cache_map, cpu); |
5e93f16a SD |
1021 | } |
1022 | ||
1023 | #ifdef CONFIG_SCHED_SMT | |
1024 | static const struct cpumask *smallcore_smt_mask(int cpu) | |
1025 | { | |
1026 | return cpu_smallcore_mask(cpu); | |
1027 | } | |
1028 | #endif | |
1029 | ||
72730bfc SD |
1030 | static struct cpumask *cpu_coregroup_mask(int cpu) |
1031 | { | |
1032 | return per_cpu(cpu_coregroup_map, cpu); | |
1033 | } | |
1034 | ||
1035 | static bool has_coregroup_support(void) | |
1036 | { | |
1037 | return coregroup_enabled; | |
1038 | } | |
1039 | ||
1040 | static const struct cpumask *cpu_mc_mask(int cpu) | |
1041 | { | |
1042 | return cpu_coregroup_mask(cpu); | |
1043 | } | |
1044 | ||
5e93f16a SD |
1045 | static struct sched_domain_topology_level powerpc_topology[] = { |
1046 | #ifdef CONFIG_SCHED_SMT | |
1047 | { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, | |
1048 | #endif | |
1049 | { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) }, | |
72730bfc | 1050 | { cpu_mc_mask, SD_INIT_NAME(MC) }, |
5e93f16a SD |
1051 | { cpu_cpu_mask, SD_INIT_NAME(DIE) }, |
1052 | { NULL, }, | |
1053 | }; | |
1054 | ||
9014eab6 | 1055 | static int __init init_big_cores(void) |
425752c6 GS |
1056 | { |
1057 | int cpu; | |
1058 | ||
1059 | for_each_possible_cpu(cpu) { | |
fbd2b672 | 1060 | int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1); |
425752c6 GS |
1061 | |
1062 | if (err) | |
1063 | return err; | |
1064 | ||
1065 | zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu), | |
1066 | GFP_KERNEL, | |
1067 | cpu_to_node(cpu)); | |
1068 | } | |
1069 | ||
1070 | has_big_cores = true; | |
9538abee GS |
1071 | |
1072 | for_each_possible_cpu(cpu) { | |
e9ef81e1 | 1073 | int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3); |
9538abee GS |
1074 | |
1075 | if (err) | |
1076 | return err; | |
1077 | } | |
1078 | ||
1079 | thread_group_shares_l2 = true; | |
e9ef81e1 PS |
1080 | thread_group_shares_l3 = true; |
1081 | pr_debug("L2/L3 cache only shared by the threads in the small core\n"); | |
1082 | ||
425752c6 GS |
1083 | return 0; |
1084 | } | |
1085 | ||
1da177e4 LT |
1086 | void __init smp_prepare_cpus(unsigned int max_cpus) |
1087 | { | |
1088 | unsigned int cpu; | |
1089 | ||
1090 | DBG("smp_prepare_cpus\n"); | |
1091 | ||
1092 | /* | |
1fd02f66 | 1093 | * setup_cpu may need to be called on the boot cpu. We haven't |
1da177e4 LT |
1094 | * spun any cpus up but lets be paranoid. |
1095 | */ | |
1096 | BUG_ON(boot_cpuid != smp_processor_id()); | |
1097 | ||
1098 | /* Fixup boot cpu */ | |
1099 | smp_store_cpu_info(boot_cpuid); | |
1100 | cpu_callin_map[boot_cpuid] = 1; | |
1101 | ||
cc1ba8ea AB |
1102 | for_each_possible_cpu(cpu) { |
1103 | zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), | |
1104 | GFP_KERNEL, cpu_to_node(cpu)); | |
2a636a56 OH |
1105 | zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu), |
1106 | GFP_KERNEL, cpu_to_node(cpu)); | |
cc1ba8ea AB |
1107 | zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), |
1108 | GFP_KERNEL, cpu_to_node(cpu)); | |
72730bfc SD |
1109 | if (has_coregroup_support()) |
1110 | zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu), | |
1111 | GFP_KERNEL, cpu_to_node(cpu)); | |
1112 | ||
a9ee6cf5 | 1113 | #ifdef CONFIG_NUMA |
2fabf084 NA |
1114 | /* |
1115 | * numa_node_id() works after this. | |
1116 | */ | |
bc3c4327 LZ |
1117 | if (cpu_present(cpu)) { |
1118 | set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); | |
1119 | set_cpu_numa_mem(cpu, | |
1120 | local_memory_node(numa_cpu_lookup_table[cpu])); | |
1121 | } | |
d0fd24bb | 1122 | #endif |
cc1ba8ea AB |
1123 | } |
1124 | ||
df52f671 | 1125 | /* Init the cpumasks so the boot CPU is related to itself */ |
cc1ba8ea | 1126 | cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); |
2a636a56 | 1127 | cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid)); |
c47f892d | 1128 | cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); |
cc1ba8ea | 1129 | |
72730bfc SD |
1130 | if (has_coregroup_support()) |
1131 | cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid)); | |
1132 | ||
425752c6 GS |
1133 | init_big_cores(); |
1134 | if (has_big_cores) { | |
1135 | cpumask_set_cpu(boot_cpuid, | |
1136 | cpu_smallcore_mask(boot_cpuid)); | |
1137 | } | |
1138 | ||
c1e53367 | 1139 | if (cpu_to_chip_id(boot_cpuid) != -1) { |
8efd249b | 1140 | int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); |
c1e53367 SD |
1141 | |
1142 | /* | |
1143 | * All threads of a core will all belong to the same core, | |
1144 | * chip_id_lookup_table will have one entry per core. | |
1145 | * Assumption: if boot_cpuid doesn't have a chip-id, then no | |
1146 | * other CPUs, will also not have chip-id. | |
1147 | */ | |
1148 | chip_id_lookup_table = kcalloc(idx, sizeof(int), GFP_KERNEL); | |
1149 | if (chip_id_lookup_table) | |
1150 | memset(chip_id_lookup_table, -1, sizeof(int) * idx); | |
1151 | } | |
1152 | ||
dfee0efe CG |
1153 | if (smp_ops && smp_ops->probe) |
1154 | smp_ops->probe(); | |
1da177e4 LT |
1155 | } |
1156 | ||
cad5cef6 | 1157 | void smp_prepare_boot_cpu(void) |
1da177e4 LT |
1158 | { |
1159 | BUG_ON(smp_processor_id() != boot_cpuid); | |
5ad57078 | 1160 | #ifdef CONFIG_PPC64 |
d2e60075 | 1161 | paca_ptrs[boot_cpuid]->__current = current; |
5ad57078 | 1162 | #endif |
8c272261 | 1163 | set_numa_node(numa_cpu_lookup_table[boot_cpuid]); |
7c19c2e5 | 1164 | current_set[boot_cpuid] = current; |
1da177e4 LT |
1165 | } |
1166 | ||
1167 | #ifdef CONFIG_HOTPLUG_CPU | |
1da177e4 LT |
1168 | |
1169 | int generic_cpu_disable(void) | |
1170 | { | |
1171 | unsigned int cpu = smp_processor_id(); | |
1172 | ||
1173 | if (cpu == boot_cpuid) | |
1174 | return -EBUSY; | |
1175 | ||
ea0f1cab | 1176 | set_cpu_online(cpu, false); |
799d6046 | 1177 | #ifdef CONFIG_PPC64 |
a7f290da | 1178 | vdso_data->processorCount--; |
094fe2e7 | 1179 | #endif |
a978e139 BH |
1180 | /* Update affinity of all IRQs previously aimed at this CPU */ |
1181 | irq_migrate_all_off_this_cpu(); | |
1182 | ||
687b8f24 ME |
1183 | /* |
1184 | * Depending on the details of the interrupt controller, it's possible | |
1185 | * that one of the interrupts we just migrated away from this CPU is | |
1186 | * actually already pending on this CPU. If we leave it in that state | |
1187 | * the interrupt will never be EOI'ed, and will never fire again. So | |
1188 | * temporarily enable interrupts here, to allow any pending interrupt to | |
1189 | * be received (and EOI'ed), before we take this CPU offline. | |
1190 | */ | |
a978e139 BH |
1191 | local_irq_enable(); |
1192 | mdelay(1); | |
1193 | local_irq_disable(); | |
1194 | ||
1da177e4 LT |
1195 | return 0; |
1196 | } | |
1197 | ||
1da177e4 LT |
1198 | void generic_cpu_die(unsigned int cpu) |
1199 | { | |
1200 | int i; | |
1201 | ||
1202 | for (i = 0; i < 100; i++) { | |
0d8d4d42 | 1203 | smp_rmb(); |
2f4f1f81 | 1204 | if (is_cpu_dead(cpu)) |
1da177e4 LT |
1205 | return; |
1206 | msleep(100); | |
1207 | } | |
1208 | printk(KERN_ERR "CPU%d didn't die...\n", cpu); | |
1209 | } | |
1210 | ||
105765f4 BH |
1211 | void generic_set_cpu_dead(unsigned int cpu) |
1212 | { | |
1213 | per_cpu(cpu_state, cpu) = CPU_DEAD; | |
1214 | } | |
fb82b839 | 1215 | |
ae5cab47 ZC |
1216 | /* |
1217 | * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise | |
1218 | * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(), | |
1219 | * which makes the delay in generic_cpu_die() not happen. | |
1220 | */ | |
1221 | void generic_set_cpu_up(unsigned int cpu) | |
1222 | { | |
1223 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | |
1224 | } | |
1225 | ||
fb82b839 BH |
1226 | int generic_check_cpu_restart(unsigned int cpu) |
1227 | { | |
1228 | return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; | |
1229 | } | |
512691d4 | 1230 | |
2f4f1f81 | 1231 | int is_cpu_dead(unsigned int cpu) |
1232 | { | |
1233 | return per_cpu(cpu_state, cpu) == CPU_DEAD; | |
1234 | } | |
1235 | ||
441c19c8 | 1236 | static bool secondaries_inhibited(void) |
512691d4 | 1237 | { |
441c19c8 | 1238 | return kvm_hv_mode_active(); |
512691d4 PM |
1239 | } |
1240 | ||
1241 | #else /* HOTPLUG_CPU */ | |
1242 | ||
1243 | #define secondaries_inhibited() 0 | |
1244 | ||
1da177e4 LT |
1245 | #endif |
1246 | ||
17e32eac | 1247 | static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) |
c56e5853 | 1248 | { |
c56e5853 | 1249 | #ifdef CONFIG_PPC64 |
d2e60075 | 1250 | paca_ptrs[cpu]->__current = idle; |
678c668a CL |
1251 | paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) + |
1252 | THREAD_SIZE - STACK_FRAME_OVERHEAD; | |
c56e5853 | 1253 | #endif |
bcf9033e | 1254 | task_thread_info(idle)->cpu = cpu; |
7c19c2e5 | 1255 | secondary_current = current_set[cpu] = idle; |
c56e5853 BH |
1256 | } |
1257 | ||
061d19f2 | 1258 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) |
1da177e4 | 1259 | { |
b37ac189 NL |
1260 | const unsigned long boot_spin_ms = 5 * MSEC_PER_SEC; |
1261 | const bool booting = system_state < SYSTEM_RUNNING; | |
1262 | const unsigned long hp_spin_ms = 1; | |
1263 | unsigned long deadline; | |
1264 | int rc; | |
1265 | const unsigned long spin_wait_ms = booting ? boot_spin_ms : hp_spin_ms; | |
1da177e4 | 1266 | |
512691d4 PM |
1267 | /* |
1268 | * Don't allow secondary threads to come online if inhibited | |
1269 | */ | |
1270 | if (threads_per_core > 1 && secondaries_inhibited() && | |
6f5e40a3 | 1271 | cpu_thread_in_subcore(cpu)) |
512691d4 PM |
1272 | return -EBUSY; |
1273 | ||
8cffc6ac BH |
1274 | if (smp_ops == NULL || |
1275 | (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) | |
1da177e4 LT |
1276 | return -EINVAL; |
1277 | ||
17e32eac | 1278 | cpu_idle_thread_init(cpu, tidle); |
c560bbce | 1279 | |
14d4ae5c BH |
1280 | /* |
1281 | * The platform might need to allocate resources prior to bringing | |
1282 | * up the CPU | |
1283 | */ | |
1284 | if (smp_ops->prepare_cpu) { | |
1285 | rc = smp_ops->prepare_cpu(cpu); | |
1286 | if (rc) | |
1287 | return rc; | |
1288 | } | |
1289 | ||
1da177e4 LT |
1290 | /* Make sure callin-map entry is 0 (can be leftover a CPU |
1291 | * hotplug | |
1292 | */ | |
1293 | cpu_callin_map[cpu] = 0; | |
1294 | ||
1295 | /* The information for processor bringup must | |
1296 | * be written out to main store before we release | |
1297 | * the processor. | |
1298 | */ | |
0d8d4d42 | 1299 | smp_mb(); |
1da177e4 LT |
1300 | |
1301 | /* wake up cpus */ | |
1302 | DBG("smp: kicking cpu %d\n", cpu); | |
de300974 ME |
1303 | rc = smp_ops->kick_cpu(cpu); |
1304 | if (rc) { | |
1305 | pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); | |
1306 | return rc; | |
1307 | } | |
1da177e4 LT |
1308 | |
1309 | /* | |
b37ac189 NL |
1310 | * At boot time, simply spin on the callin word until the |
1311 | * deadline passes. | |
1312 | * | |
1313 | * At run time, spin for an optimistic amount of time to avoid | |
1314 | * sleeping in the common case. | |
1da177e4 | 1315 | */ |
b37ac189 NL |
1316 | deadline = jiffies + msecs_to_jiffies(spin_wait_ms); |
1317 | spin_until_cond(cpu_callin_map[cpu] || time_is_before_jiffies(deadline)); | |
1318 | ||
1319 | if (!cpu_callin_map[cpu] && system_state >= SYSTEM_RUNNING) { | |
1320 | const unsigned long sleep_interval_us = 10 * USEC_PER_MSEC; | |
1321 | const unsigned long sleep_wait_ms = 100 * MSEC_PER_SEC; | |
1322 | ||
1323 | deadline = jiffies + msecs_to_jiffies(sleep_wait_ms); | |
1324 | while (!cpu_callin_map[cpu] && time_is_after_jiffies(deadline)) | |
1325 | fsleep(sleep_interval_us); | |
1326 | } | |
1da177e4 LT |
1327 | |
1328 | if (!cpu_callin_map[cpu]) { | |
6685a477 | 1329 | printk(KERN_ERR "Processor %u is stuck.\n", cpu); |
1da177e4 LT |
1330 | return -ENOENT; |
1331 | } | |
1332 | ||
6685a477 | 1333 | DBG("Processor %u found.\n", cpu); |
1da177e4 LT |
1334 | |
1335 | if (smp_ops->give_timebase) | |
1336 | smp_ops->give_timebase(); | |
1337 | ||
875ebe94 | 1338 | /* Wait until cpu puts itself in the online & active maps */ |
4e287e65 | 1339 | spin_until_cond(cpu_online(cpu)); |
1da177e4 LT |
1340 | |
1341 | return 0; | |
1342 | } | |
1343 | ||
e9efed3b NL |
1344 | /* Return the value of the reg property corresponding to the given |
1345 | * logical cpu. | |
1346 | */ | |
1347 | int cpu_to_core_id(int cpu) | |
1348 | { | |
1349 | struct device_node *np; | |
e9efed3b NL |
1350 | int id = -1; |
1351 | ||
1352 | np = of_get_cpu_node(cpu, NULL); | |
1353 | if (!np) | |
1354 | goto out; | |
1355 | ||
41408b22 | 1356 | id = of_get_cpu_hwid(np, 0); |
e9efed3b NL |
1357 | out: |
1358 | of_node_put(np); | |
1359 | return id; | |
1360 | } | |
f8ab4810 | 1361 | EXPORT_SYMBOL_GPL(cpu_to_core_id); |
e9efed3b | 1362 | |
99d86705 VS |
1363 | /* Helper routines for cpu to core mapping */ |
1364 | int cpu_core_index_of_thread(int cpu) | |
1365 | { | |
1366 | return cpu >> threads_shift; | |
1367 | } | |
1368 | EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); | |
1369 | ||
1370 | int cpu_first_thread_of_core(int core) | |
1371 | { | |
1372 | return core << threads_shift; | |
1373 | } | |
1374 | EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); | |
1375 | ||
104699c0 | 1376 | /* Must be called when no change can occur to cpu_present_mask, |
440a0857 NL |
1377 | * i.e. during cpu online or offline. |
1378 | */ | |
1379 | static struct device_node *cpu_to_l2cache(int cpu) | |
1380 | { | |
1381 | struct device_node *np; | |
b2ea25b9 | 1382 | struct device_node *cache; |
440a0857 NL |
1383 | |
1384 | if (!cpu_present(cpu)) | |
1385 | return NULL; | |
1386 | ||
1387 | np = of_get_cpu_node(cpu, NULL); | |
1388 | if (np == NULL) | |
1389 | return NULL; | |
1390 | ||
b2ea25b9 NL |
1391 | cache = of_find_next_cache_node(np); |
1392 | ||
440a0857 NL |
1393 | of_node_put(np); |
1394 | ||
b2ea25b9 | 1395 | return cache; |
440a0857 | 1396 | } |
1da177e4 | 1397 | |
84dbf66c | 1398 | static bool update_mask_by_l2(int cpu, cpumask_var_t *mask) |
a8a5356c | 1399 | { |
3ab33d6d | 1400 | struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; |
256f2d4b | 1401 | struct device_node *l2_cache, *np; |
e3d8b67e | 1402 | int i; |
256f2d4b | 1403 | |
966730a6 SD |
1404 | if (has_big_cores) |
1405 | submask_fn = cpu_smallcore_mask; | |
1406 | ||
9538abee GS |
1407 | /* |
1408 | * If the threads in a thread-group share L2 cache, then the | |
1409 | * L2-mask can be obtained from thread_group_l2_cache_map. | |
1410 | */ | |
1411 | if (thread_group_shares_l2) { | |
1412 | cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu)); | |
1413 | ||
1414 | for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) { | |
1415 | if (cpu_online(i)) | |
1416 | set_cpus_related(i, cpu, cpu_l2_cache_mask); | |
1417 | } | |
1418 | ||
1419 | /* Verify that L1-cache siblings are a subset of L2 cache-siblings */ | |
1420 | if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) && | |
1421 | !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) { | |
1422 | pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n", | |
1423 | cpu); | |
1424 | } | |
1425 | ||
1426 | return true; | |
1427 | } | |
1428 | ||
a8a5356c | 1429 | l2_cache = cpu_to_l2cache(cpu); |
84dbf66c SD |
1430 | if (!l2_cache || !*mask) { |
1431 | /* Assume only core siblings share cache with this CPU */ | |
5bf63497 | 1432 | for_each_cpu(i, cpu_sibling_mask(cpu)) |
f6606cfd SD |
1433 | set_cpus_related(cpu, i, cpu_l2_cache_mask); |
1434 | ||
df52f671 | 1435 | return false; |
f6606cfd | 1436 | } |
df52f671 | 1437 | |
84dbf66c | 1438 | cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu)); |
3ab33d6d | 1439 | |
3ab33d6d SD |
1440 | /* Update l2-cache mask with all the CPUs that are part of submask */ |
1441 | or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask); | |
1442 | ||
1443 | /* Skip all CPUs already part of current CPU l2-cache mask */ | |
84dbf66c | 1444 | cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu)); |
3ab33d6d | 1445 | |
84dbf66c | 1446 | for_each_cpu(i, *mask) { |
df52f671 OH |
1447 | /* |
1448 | * when updating the marks the current CPU has not been marked | |
1449 | * online, but we need to update the cache masks | |
1450 | */ | |
256f2d4b | 1451 | np = cpu_to_l2cache(i); |
df52f671 | 1452 | |
3ab33d6d SD |
1453 | /* Skip all CPUs already part of current CPU l2-cache */ |
1454 | if (np == l2_cache) { | |
1455 | or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask); | |
84dbf66c | 1456 | cpumask_andnot(*mask, *mask, submask_fn(i)); |
3ab33d6d | 1457 | } else { |
84dbf66c | 1458 | cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i)); |
3ab33d6d | 1459 | } |
df52f671 | 1460 | |
a8a5356c PM |
1461 | of_node_put(np); |
1462 | } | |
1463 | of_node_put(l2_cache); | |
df52f671 OH |
1464 | |
1465 | return true; | |
1466 | } | |
1467 | ||
1468 | #ifdef CONFIG_HOTPLUG_CPU | |
1469 | static void remove_cpu_from_masks(int cpu) | |
1470 | { | |
70edd4a7 | 1471 | struct cpumask *(*mask_fn)(int) = cpu_sibling_mask; |
df52f671 OH |
1472 | int i; |
1473 | ||
9a245d0e SD |
1474 | unmap_cpu_from_node(cpu); |
1475 | ||
70edd4a7 SD |
1476 | if (shared_caches) |
1477 | mask_fn = cpu_l2_cache_mask; | |
1478 | ||
1479 | for_each_cpu(i, mask_fn(cpu)) { | |
2a636a56 | 1480 | set_cpus_unrelated(cpu, i, cpu_l2_cache_mask); |
df52f671 | 1481 | set_cpus_unrelated(cpu, i, cpu_sibling_mask); |
425752c6 GS |
1482 | if (has_big_cores) |
1483 | set_cpus_unrelated(cpu, i, cpu_smallcore_mask); | |
70edd4a7 SD |
1484 | } |
1485 | ||
c47f892d SD |
1486 | for_each_cpu(i, cpu_core_mask(cpu)) |
1487 | set_cpus_unrelated(cpu, i, cpu_core_mask); | |
1488 | ||
70edd4a7 SD |
1489 | if (has_coregroup_support()) { |
1490 | for_each_cpu(i, cpu_coregroup_mask(cpu)) | |
72730bfc | 1491 | set_cpus_unrelated(cpu, i, cpu_coregroup_mask); |
df52f671 OH |
1492 | } |
1493 | } | |
1494 | #endif | |
1495 | ||
425752c6 GS |
1496 | static inline void add_cpu_to_smallcore_masks(int cpu) |
1497 | { | |
661e3d42 | 1498 | int i; |
425752c6 GS |
1499 | |
1500 | if (!has_big_cores) | |
1501 | return; | |
1502 | ||
1503 | cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu)); | |
1504 | ||
1fdc1d66 | 1505 | for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) { |
661e3d42 | 1506 | if (cpu_online(i)) |
425752c6 GS |
1507 | set_cpus_related(i, cpu, cpu_smallcore_mask); |
1508 | } | |
1509 | } | |
1510 | ||
84dbf66c | 1511 | static void update_coregroup_mask(int cpu, cpumask_var_t *mask) |
b8a97cb4 | 1512 | { |
70a94089 | 1513 | struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; |
b8a97cb4 SD |
1514 | int coregroup_id = cpu_to_coregroup_id(cpu); |
1515 | int i; | |
1516 | ||
70a94089 SD |
1517 | if (shared_caches) |
1518 | submask_fn = cpu_l2_cache_mask; | |
1519 | ||
84dbf66c SD |
1520 | if (!*mask) { |
1521 | /* Assume only siblings are part of this CPU's coregroup */ | |
1522 | for_each_cpu(i, submask_fn(cpu)) | |
1523 | set_cpus_related(cpu, i, cpu_coregroup_mask); | |
1524 | ||
1525 | return; | |
1526 | } | |
1527 | ||
1528 | cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu)); | |
1529 | ||
70a94089 SD |
1530 | /* Update coregroup mask with all the CPUs that are part of submask */ |
1531 | or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask); | |
1532 | ||
1533 | /* Skip all CPUs already part of coregroup mask */ | |
84dbf66c | 1534 | cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu)); |
b8a97cb4 | 1535 | |
84dbf66c | 1536 | for_each_cpu(i, *mask) { |
70a94089 SD |
1537 | /* Skip all CPUs not part of this coregroup */ |
1538 | if (coregroup_id == cpu_to_coregroup_id(i)) { | |
1539 | or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask); | |
84dbf66c | 1540 | cpumask_andnot(*mask, *mask, submask_fn(i)); |
70a94089 | 1541 | } else { |
84dbf66c | 1542 | cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i)); |
70a94089 | 1543 | } |
b8a97cb4 SD |
1544 | } |
1545 | } | |
1546 | ||
df52f671 OH |
1547 | static void add_cpu_to_masks(int cpu) |
1548 | { | |
c47f892d | 1549 | struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; |
df52f671 | 1550 | int first_thread = cpu_first_thread_sibling(cpu); |
84dbf66c | 1551 | cpumask_var_t mask; |
c1e53367 | 1552 | int chip_id = -1; |
c47f892d | 1553 | bool ret; |
df52f671 OH |
1554 | int i; |
1555 | ||
1556 | /* | |
1557 | * This CPU will not be in the online mask yet so we need to manually | |
1558 | * add it to it's own thread sibling mask. | |
1559 | */ | |
9a245d0e | 1560 | map_cpu_to_node(cpu, cpu_to_node(cpu)); |
df52f671 | 1561 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); |
b8b92803 | 1562 | cpumask_set_cpu(cpu, cpu_core_mask(cpu)); |
df52f671 OH |
1563 | |
1564 | for (i = first_thread; i < first_thread + threads_per_core; i++) | |
1565 | if (cpu_online(i)) | |
1566 | set_cpus_related(i, cpu, cpu_sibling_mask); | |
1567 | ||
425752c6 | 1568 | add_cpu_to_smallcore_masks(cpu); |
84dbf66c SD |
1569 | |
1570 | /* In CPU-hotplug path, hence use GFP_ATOMIC */ | |
c47f892d | 1571 | ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu)); |
84dbf66c | 1572 | update_mask_by_l2(cpu, &mask); |
2a636a56 | 1573 | |
b8a97cb4 | 1574 | if (has_coregroup_support()) |
84dbf66c SD |
1575 | update_coregroup_mask(cpu, &mask); |
1576 | ||
c1e53367 SD |
1577 | if (chip_id_lookup_table && ret) |
1578 | chip_id = cpu_to_chip_id(cpu); | |
1579 | ||
c47f892d SD |
1580 | if (shared_caches) |
1581 | submask_fn = cpu_l2_cache_mask; | |
1582 | ||
1583 | /* Update core_mask with all the CPUs that are part of submask */ | |
1584 | or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask); | |
1585 | ||
1586 | /* Skip all CPUs already part of current CPU core mask */ | |
1587 | cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu)); | |
1588 | ||
b8b92803 SD |
1589 | /* If chip_id is -1; limit the cpu_core_mask to within DIE*/ |
1590 | if (chip_id == -1) | |
1591 | cpumask_and(mask, mask, cpu_cpu_mask(cpu)); | |
1592 | ||
c47f892d SD |
1593 | for_each_cpu(i, mask) { |
1594 | if (chip_id == cpu_to_chip_id(i)) { | |
1595 | or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask); | |
1596 | cpumask_andnot(mask, mask, submask_fn(i)); | |
1597 | } else { | |
1598 | cpumask_andnot(mask, mask, cpu_core_mask(i)); | |
1599 | } | |
1600 | } | |
1601 | ||
84dbf66c | 1602 | free_cpumask_var(mask); |
a8a5356c PM |
1603 | } |
1604 | ||
1da177e4 | 1605 | /* Activate a secondary processor. */ |
061d19f2 | 1606 | void start_secondary(void *unused) |
1da177e4 | 1607 | { |
99f070b6 | 1608 | unsigned int cpu = raw_smp_processor_id(); |
1da177e4 | 1609 | |
86f46f34 CL |
1610 | /* PPC64 calls setup_kup() in early_setup_secondary() */ |
1611 | if (IS_ENABLED(CONFIG_PPC32)) | |
1612 | setup_kup(); | |
1613 | ||
f1f10076 | 1614 | mmgrab(&init_mm); |
1da177e4 LT |
1615 | current->active_mm = &init_mm; |
1616 | ||
1617 | smp_store_cpu_info(cpu); | |
5ad57078 | 1618 | set_dec(tb_ticks_per_jiffy); |
99f070b6 | 1619 | rcu_cpu_starting(cpu); |
1be6f10f | 1620 | cpu_callin_map[cpu] = 1; |
1da177e4 | 1621 | |
757cbd46 KG |
1622 | if (smp_ops->setup_cpu) |
1623 | smp_ops->setup_cpu(cpu); | |
1da177e4 LT |
1624 | if (smp_ops->take_timebase) |
1625 | smp_ops->take_timebase(); | |
1626 | ||
d831d0b8 TB |
1627 | secondary_cpu_time_init(); |
1628 | ||
aeeafbfa BH |
1629 | #ifdef CONFIG_PPC64 |
1630 | if (system_state == SYSTEM_RUNNING) | |
1631 | vdso_data->processorCount++; | |
18ad51dd AB |
1632 | |
1633 | vdso_getcpu_init(); | |
aeeafbfa | 1634 | #endif |
6980d13f SD |
1635 | set_numa_node(numa_cpu_lookup_table[cpu]); |
1636 | set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); | |
1637 | ||
df52f671 OH |
1638 | /* Update topology CPU masks */ |
1639 | add_cpu_to_masks(cpu); | |
1da177e4 | 1640 | |
96d91431 OH |
1641 | /* |
1642 | * Check for any shared caches. Note that this must be done on a | |
1643 | * per-core basis because one core in the pair might be disabled. | |
1644 | */ | |
caa8e29d SD |
1645 | if (!shared_caches) { |
1646 | struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask; | |
1647 | struct cpumask *mask = cpu_l2_cache_mask(cpu); | |
1648 | ||
1649 | if (has_big_cores) | |
1650 | sibling_mask = cpu_smallcore_mask; | |
1651 | ||
1652 | if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu))) | |
1653 | shared_caches = true; | |
1654 | } | |
96d91431 | 1655 | |
cce606fe LZ |
1656 | smp_wmb(); |
1657 | notify_cpu_starting(cpu); | |
1658 | set_cpu_online(cpu, true); | |
1659 | ||
b6aeddea ME |
1660 | boot_init_stack_canary(); |
1661 | ||
1da177e4 LT |
1662 | local_irq_enable(); |
1663 | ||
d1039786 NR |
1664 | /* We can enable ftrace for secondary cpus now */ |
1665 | this_cpu_enable_ftrace(); | |
1666 | ||
fc6d73d6 | 1667 | cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); |
fa3f82c8 BH |
1668 | |
1669 | BUG(); | |
1da177e4 LT |
1670 | } |
1671 | ||
d276960d | 1672 | static void __init fixup_topology(void) |
3c6032a8 | 1673 | { |
375370a1 SD |
1674 | int i; |
1675 | ||
3c6032a8 SD |
1676 | #ifdef CONFIG_SCHED_SMT |
1677 | if (has_big_cores) { | |
1678 | pr_info("Big cores detected but using small core scheduling\n"); | |
72730bfc | 1679 | powerpc_topology[smt_idx].mask = smallcore_smt_mask; |
3c6032a8 SD |
1680 | } |
1681 | #endif | |
72730bfc SD |
1682 | |
1683 | if (!has_coregroup_support()) | |
1684 | powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask; | |
375370a1 SD |
1685 | |
1686 | /* | |
1687 | * Try to consolidate topology levels here instead of | |
1688 | * allowing scheduler to degenerate. | |
1689 | * - Dont consolidate if masks are different. | |
1690 | * - Dont consolidate if sd_flags exists and are different. | |
1691 | */ | |
1692 | for (i = 1; i <= die_idx; i++) { | |
1693 | if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask) | |
1694 | continue; | |
1695 | ||
1696 | if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags && | |
1697 | powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags) | |
1698 | continue; | |
1699 | ||
1700 | if (!powerpc_topology[i - 1].sd_flags) | |
1701 | powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags; | |
1702 | ||
1703 | powerpc_topology[i].mask = powerpc_topology[i + 1].mask; | |
1704 | powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags; | |
1705 | #ifdef CONFIG_SCHED_DEBUG | |
1706 | powerpc_topology[i].name = powerpc_topology[i + 1].name; | |
1707 | #endif | |
1708 | } | |
3c6032a8 SD |
1709 | } |
1710 | ||
6d11b87d TG |
1711 | void __init smp_cpus_done(unsigned int max_cpus) |
1712 | { | |
1713 | /* | |
7b7622bb | 1714 | * We are running pinned to the boot CPU, see rest_init(). |
1da177e4 | 1715 | */ |
757cbd46 | 1716 | if (smp_ops && smp_ops->setup_cpu) |
7b7622bb | 1717 | smp_ops->setup_cpu(boot_cpuid); |
4b703a23 | 1718 | |
d7294445 BH |
1719 | if (smp_ops && smp_ops->bringup_done) |
1720 | smp_ops->bringup_done(); | |
1721 | ||
4b703a23 | 1722 | dump_numa_cpu_topology(); |
d7294445 | 1723 | |
3c6032a8 | 1724 | fixup_topology(); |
2ef0ca54 | 1725 | set_sched_topology(powerpc_topology); |
e1f0ece1 MN |
1726 | } |
1727 | ||
1da177e4 LT |
1728 | #ifdef CONFIG_HOTPLUG_CPU |
1729 | int __cpu_disable(void) | |
1730 | { | |
e2075f79 | 1731 | int cpu = smp_processor_id(); |
e2075f79 | 1732 | int err; |
1da177e4 | 1733 | |
e2075f79 NL |
1734 | if (!smp_ops->cpu_disable) |
1735 | return -ENOSYS; | |
1736 | ||
424ef016 NR |
1737 | this_cpu_disable_ftrace(); |
1738 | ||
e2075f79 NL |
1739 | err = smp_ops->cpu_disable(); |
1740 | if (err) | |
1741 | return err; | |
1742 | ||
1743 | /* Update sibling maps */ | |
df52f671 | 1744 | remove_cpu_from_masks(cpu); |
e2075f79 NL |
1745 | |
1746 | return 0; | |
1da177e4 LT |
1747 | } |
1748 | ||
1749 | void __cpu_die(unsigned int cpu) | |
1750 | { | |
1751 | if (smp_ops->cpu_die) | |
1752 | smp_ops->cpu_die(cpu); | |
1753 | } | |
d0174c72 | 1754 | |
1ea21ba2 ME |
1755 | void arch_cpu_idle_dead(void) |
1756 | { | |
424ef016 NR |
1757 | /* |
1758 | * Disable on the down path. This will be re-enabled by | |
1759 | * start_secondary() via start_secondary_resume() below | |
1760 | */ | |
1761 | this_cpu_disable_ftrace(); | |
1762 | ||
39f87561 ME |
1763 | if (smp_ops->cpu_offline_self) |
1764 | smp_ops->cpu_offline_self(); | |
fa3f82c8 BH |
1765 | |
1766 | /* If we return, we re-enter start_secondary */ | |
1767 | start_secondary_resume(); | |
abb17f9c | 1768 | } |
fa3f82c8 | 1769 | |
1da177e4 | 1770 | #endif |