4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched/mm.h>
23 #include <linux/sched/topology.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/cache.h>
30 #include <linux/err.h>
31 #include <linux/device.h>
32 #include <linux/cpu.h>
33 #include <linux/notifier.h>
34 #include <linux/topology.h>
35 #include <linux/profile.h>
37 #include <asm/ptrace.h>
38 #include <linux/atomic.h>
40 #include <asm/hw_irq.h>
41 #include <asm/kvm_ppc.h>
43 #include <asm/pgtable.h>
47 #include <asm/machdep.h>
48 #include <asm/cputhreads.h>
49 #include <asm/cputable.h>
51 #include <asm/vdso_datapage.h>
56 #include <asm/debug.h>
57 #include <asm/kexec.h>
58 #include <asm/asm-prototypes.h>
59 #include <asm/cpu_has_feature.h>
63 #define DBG(fmt...) udbg_printf(fmt)
68 #ifdef CONFIG_HOTPLUG_CPU
69 /* State of each CPU during hotplug phases */
70 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
73 struct thread_info *secondary_ti;
75 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
76 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
78 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
79 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
81 /* SMP operations for this machine */
82 struct smp_ops_t *smp_ops;
84 /* Can't be static due to PowerMac hackery */
85 volatile unsigned int cpu_callin_map[NR_CPUS];
87 int smt_enabled_at_boot = 1;
89 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
92 * Returns 1 if the specified cpu should be brought up during boot.
93 * Used to inhibit booting threads if they've been disabled or
94 * limited on the command line
96 int smp_generic_cpu_bootable(unsigned int nr)
98 /* Special case - we inhibit secondary thread startup
99 * during boot if the user requests it.
101 if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
102 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
104 if (smt_enabled_at_boot
105 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
114 int smp_generic_kick_cpu(int nr)
116 BUG_ON(nr < 0 || nr >= NR_CPUS);
119 * The processor is currently spinning, waiting for the
120 * cpu_start field to become non-zero After we set cpu_start,
121 * the processor will continue on to secondary_start
123 if (!paca[nr].cpu_start) {
124 paca[nr].cpu_start = 1;
129 #ifdef CONFIG_HOTPLUG_CPU
131 * Ok it's not there, so it might be soft-unplugged, let's
132 * try to bring it back
134 generic_set_cpu_up(nr);
136 smp_send_reschedule(nr);
137 #endif /* CONFIG_HOTPLUG_CPU */
141 #endif /* CONFIG_PPC64 */
143 static irqreturn_t call_function_action(int irq, void *data)
145 generic_smp_call_function_interrupt();
149 static irqreturn_t reschedule_action(int irq, void *data)
155 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
157 tick_broadcast_ipi_handler();
161 static irqreturn_t debug_ipi_action(int irq, void *data)
163 if (crash_ipi_function_ptr) {
164 crash_ipi_function_ptr(get_irq_regs());
168 #ifdef CONFIG_DEBUGGER
169 debugger_ipi(get_irq_regs());
170 #endif /* CONFIG_DEBUGGER */
175 static irq_handler_t smp_ipi_action[] = {
176 [PPC_MSG_CALL_FUNCTION] = call_function_action,
177 [PPC_MSG_RESCHEDULE] = reschedule_action,
178 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
179 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
182 const char *smp_ipi_name[] = {
183 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
184 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
185 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
186 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
189 /* optional function to request ipi, for controllers with >= 4 ipis */
190 int smp_request_message_ipi(int virq, int msg)
194 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
197 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC_CORE)
198 if (msg == PPC_MSG_DEBUGGER_BREAK) {
202 err = request_irq(virq, smp_ipi_action[msg],
203 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
204 smp_ipi_name[msg], NULL);
205 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
206 virq, smp_ipi_name[msg], err);
211 #ifdef CONFIG_PPC_SMP_MUXED_IPI
212 struct cpu_messages {
213 long messages; /* current messages */
214 unsigned long data; /* data for cause ipi */
216 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
218 void smp_muxed_ipi_set_data(int cpu, unsigned long data)
220 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
225 void smp_muxed_ipi_set_message(int cpu, int msg)
227 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
228 char *message = (char *)&info->messages;
231 * Order previous accesses before accesses in the IPI handler.
237 void smp_muxed_ipi_message_pass(int cpu, int msg)
239 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
241 smp_muxed_ipi_set_message(cpu, msg);
243 * cause_ipi functions are required to include a full barrier
244 * before doing whatever causes the IPI.
246 smp_ops->cause_ipi(cpu, info->data);
249 #ifdef __BIG_ENDIAN__
250 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
252 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
255 irqreturn_t smp_ipi_demux(void)
257 struct cpu_messages *info = this_cpu_ptr(&ipi_message);
260 mb(); /* order any irq clear */
263 all = xchg(&info->messages, 0);
264 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
266 * Must check for PPC_MSG_RM_HOST_ACTION messages
267 * before PPC_MSG_CALL_FUNCTION messages because when
268 * a VM is destroyed, we call kick_all_cpus_sync()
269 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
270 * messages have completed before we free any VCPUs.
272 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
273 kvmppc_xics_ipi_action();
275 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
276 generic_smp_call_function_interrupt();
277 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
279 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
280 tick_broadcast_ipi_handler();
281 if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
282 debug_ipi_action(0, NULL);
283 } while (info->messages);
287 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
289 static inline void do_message_pass(int cpu, int msg)
291 if (smp_ops->message_pass)
292 smp_ops->message_pass(cpu, msg);
293 #ifdef CONFIG_PPC_SMP_MUXED_IPI
295 smp_muxed_ipi_message_pass(cpu, msg);
299 void smp_send_reschedule(int cpu)
302 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
304 EXPORT_SYMBOL_GPL(smp_send_reschedule);
306 void arch_send_call_function_single_ipi(int cpu)
308 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
311 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
315 for_each_cpu(cpu, mask)
316 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
319 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
320 void tick_broadcast(const struct cpumask *mask)
324 for_each_cpu(cpu, mask)
325 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
329 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
330 void smp_send_debugger_break(void)
333 int me = raw_smp_processor_id();
335 if (unlikely(!smp_ops))
338 for_each_online_cpu(cpu)
340 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
344 #ifdef CONFIG_KEXEC_CORE
345 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
347 crash_ipi_function_ptr = crash_ipi_callback;
348 if (crash_ipi_callback) {
350 smp_send_debugger_break();
355 static void stop_this_cpu(void *dummy)
357 /* Remove this CPU */
358 set_cpu_online(smp_processor_id(), false);
365 void smp_send_stop(void)
367 smp_call_function(stop_this_cpu, NULL, 0);
370 struct thread_info *current_set[NR_CPUS];
372 static void smp_store_cpu_info(int id)
374 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
375 #ifdef CONFIG_PPC_FSL_BOOK3E
376 per_cpu(next_tlbcam_idx, id)
377 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
381 void __init smp_prepare_cpus(unsigned int max_cpus)
385 DBG("smp_prepare_cpus\n");
388 * setup_cpu may need to be called on the boot cpu. We havent
389 * spun any cpus up but lets be paranoid.
391 BUG_ON(boot_cpuid != smp_processor_id());
394 smp_store_cpu_info(boot_cpuid);
395 cpu_callin_map[boot_cpuid] = 1;
397 for_each_possible_cpu(cpu) {
398 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
399 GFP_KERNEL, cpu_to_node(cpu));
400 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
401 GFP_KERNEL, cpu_to_node(cpu));
403 * numa_node_id() works after this.
405 if (cpu_present(cpu)) {
406 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
407 set_cpu_numa_mem(cpu,
408 local_memory_node(numa_cpu_lookup_table[cpu]));
412 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
413 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
415 if (smp_ops && smp_ops->probe)
419 void smp_prepare_boot_cpu(void)
421 BUG_ON(smp_processor_id() != boot_cpuid);
423 paca[boot_cpuid].__current = current;
425 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
426 current_set[boot_cpuid] = task_thread_info(current);
429 #ifdef CONFIG_HOTPLUG_CPU
431 int generic_cpu_disable(void)
433 unsigned int cpu = smp_processor_id();
435 if (cpu == boot_cpuid)
438 set_cpu_online(cpu, false);
440 vdso_data->processorCount--;
442 /* Update affinity of all IRQs previously aimed at this CPU */
443 irq_migrate_all_off_this_cpu();
445 /* Give the CPU time to drain in-flight ones */
453 void generic_cpu_die(unsigned int cpu)
457 for (i = 0; i < 100; i++) {
459 if (is_cpu_dead(cpu))
463 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
466 void generic_set_cpu_dead(unsigned int cpu)
468 per_cpu(cpu_state, cpu) = CPU_DEAD;
472 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
473 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
474 * which makes the delay in generic_cpu_die() not happen.
476 void generic_set_cpu_up(unsigned int cpu)
478 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
481 int generic_check_cpu_restart(unsigned int cpu)
483 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
486 int is_cpu_dead(unsigned int cpu)
488 return per_cpu(cpu_state, cpu) == CPU_DEAD;
491 static bool secondaries_inhibited(void)
493 return kvm_hv_mode_active();
496 #else /* HOTPLUG_CPU */
498 #define secondaries_inhibited() 0
502 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
504 struct thread_info *ti = task_thread_info(idle);
507 paca[cpu].__current = idle;
508 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
511 secondary_ti = current_set[cpu] = ti;
514 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
519 * Don't allow secondary threads to come online if inhibited
521 if (threads_per_core > 1 && secondaries_inhibited() &&
522 cpu_thread_in_subcore(cpu))
525 if (smp_ops == NULL ||
526 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
529 cpu_idle_thread_init(cpu, tidle);
532 * The platform might need to allocate resources prior to bringing
535 if (smp_ops->prepare_cpu) {
536 rc = smp_ops->prepare_cpu(cpu);
541 /* Make sure callin-map entry is 0 (can be leftover a CPU
544 cpu_callin_map[cpu] = 0;
546 /* The information for processor bringup must
547 * be written out to main store before we release
553 DBG("smp: kicking cpu %d\n", cpu);
554 rc = smp_ops->kick_cpu(cpu);
556 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
561 * wait to see if the cpu made a callin (is actually up).
562 * use this value that I found through experimentation.
565 if (system_state < SYSTEM_RUNNING)
566 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
568 #ifdef CONFIG_HOTPLUG_CPU
571 * CPUs can take much longer to come up in the
572 * hotplug case. Wait five seconds.
574 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
578 if (!cpu_callin_map[cpu]) {
579 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
583 DBG("Processor %u found.\n", cpu);
585 if (smp_ops->give_timebase)
586 smp_ops->give_timebase();
588 /* Wait until cpu puts itself in the online & active maps */
589 while (!cpu_online(cpu))
595 /* Return the value of the reg property corresponding to the given
598 int cpu_to_core_id(int cpu)
600 struct device_node *np;
604 np = of_get_cpu_node(cpu, NULL);
608 reg = of_get_property(np, "reg", NULL);
612 id = be32_to_cpup(reg);
617 EXPORT_SYMBOL_GPL(cpu_to_core_id);
619 /* Helper routines for cpu to core mapping */
620 int cpu_core_index_of_thread(int cpu)
622 return cpu >> threads_shift;
624 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
626 int cpu_first_thread_of_core(int core)
628 return core << threads_shift;
630 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
632 static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
634 const struct cpumask *mask;
635 struct device_node *np;
639 mask = add ? cpu_online_mask : cpu_present_mask;
640 for_each_cpu(i, mask) {
641 np = of_get_cpu_node(i, NULL);
644 prop = of_get_property(np, "ibm,chip-id", &plen);
645 if (prop && plen == sizeof(int) &&
646 of_read_number(prop, 1) == chipid) {
648 cpumask_set_cpu(cpu, cpu_core_mask(i));
649 cpumask_set_cpu(i, cpu_core_mask(cpu));
651 cpumask_clear_cpu(cpu, cpu_core_mask(i));
652 cpumask_clear_cpu(i, cpu_core_mask(cpu));
659 /* Must be called when no change can occur to cpu_present_mask,
660 * i.e. during cpu online or offline.
662 static struct device_node *cpu_to_l2cache(int cpu)
664 struct device_node *np;
665 struct device_node *cache;
667 if (!cpu_present(cpu))
670 np = of_get_cpu_node(cpu, NULL);
674 cache = of_find_next_cache_node(np);
681 static void traverse_core_siblings(int cpu, bool add)
683 struct device_node *l2_cache, *np;
684 const struct cpumask *mask;
688 /* First see if we have ibm,chip-id properties in cpu nodes */
689 np = of_get_cpu_node(cpu, NULL);
692 prop = of_get_property(np, "ibm,chip-id", &plen);
693 if (prop && plen == sizeof(int))
694 chip = of_read_number(prop, 1);
697 traverse_siblings_chip_id(cpu, add, chip);
702 l2_cache = cpu_to_l2cache(cpu);
703 mask = add ? cpu_online_mask : cpu_present_mask;
704 for_each_cpu(i, mask) {
705 np = cpu_to_l2cache(i);
708 if (np == l2_cache) {
710 cpumask_set_cpu(cpu, cpu_core_mask(i));
711 cpumask_set_cpu(i, cpu_core_mask(cpu));
713 cpumask_clear_cpu(cpu, cpu_core_mask(i));
714 cpumask_clear_cpu(i, cpu_core_mask(cpu));
719 of_node_put(l2_cache);
722 /* Activate a secondary processor. */
723 void start_secondary(void *unused)
725 unsigned int cpu = smp_processor_id();
729 current->active_mm = &init_mm;
731 smp_store_cpu_info(cpu);
732 set_dec(tb_ticks_per_jiffy);
734 cpu_callin_map[cpu] = 1;
736 if (smp_ops->setup_cpu)
737 smp_ops->setup_cpu(cpu);
738 if (smp_ops->take_timebase)
739 smp_ops->take_timebase();
741 secondary_cpu_time_init();
744 if (system_state == SYSTEM_RUNNING)
745 vdso_data->processorCount++;
749 /* Update sibling maps */
750 base = cpu_first_thread_sibling(cpu);
751 for (i = 0; i < threads_per_core; i++) {
752 if (cpu_is_offline(base + i) && (cpu != base + i))
754 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
755 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
757 /* cpu_core_map should be a superset of
758 * cpu_sibling_map even if we don't have cache
759 * information, so update the former here, too.
761 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
762 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
764 traverse_core_siblings(cpu, true);
766 set_numa_node(numa_cpu_lookup_table[cpu]);
767 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
770 notify_cpu_starting(cpu);
771 set_cpu_online(cpu, true);
775 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
780 int setup_profiling_timer(unsigned int multiplier)
785 #ifdef CONFIG_SCHED_SMT
786 /* cpumask of CPUs with asymetric SMT dependancy */
787 static int powerpc_smt_flags(void)
789 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
791 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
792 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
793 flags |= SD_ASYM_PACKING;
799 static struct sched_domain_topology_level powerpc_topology[] = {
800 #ifdef CONFIG_SCHED_SMT
801 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
803 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
807 void __init smp_cpus_done(unsigned int max_cpus)
809 cpumask_var_t old_mask;
811 /* We want the setup_cpu() here to be called from CPU 0, but our
812 * init thread may have been "borrowed" by another CPU in the meantime
813 * se we pin us down to CPU 0 for a short while
815 alloc_cpumask_var(&old_mask, GFP_NOWAIT);
816 cpumask_copy(old_mask, ¤t->cpus_allowed);
817 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
819 if (smp_ops && smp_ops->setup_cpu)
820 smp_ops->setup_cpu(boot_cpuid);
822 set_cpus_allowed_ptr(current, old_mask);
824 free_cpumask_var(old_mask);
826 if (smp_ops && smp_ops->bringup_done)
827 smp_ops->bringup_done();
829 dump_numa_cpu_topology();
831 set_sched_topology(powerpc_topology);
835 #ifdef CONFIG_HOTPLUG_CPU
836 int __cpu_disable(void)
838 int cpu = smp_processor_id();
842 if (!smp_ops->cpu_disable)
845 err = smp_ops->cpu_disable();
849 /* Update sibling maps */
850 base = cpu_first_thread_sibling(cpu);
851 for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) {
852 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
853 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
854 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
855 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
857 traverse_core_siblings(cpu, false);
862 void __cpu_die(unsigned int cpu)
864 if (smp_ops->cpu_die)
865 smp_ops->cpu_die(cpu);
873 /* If we return, we re-enter start_secondary */
874 start_secondary_resume();