2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/sched/task.h>
13 #include <linux/unistd.h>
14 #include <linux/cpu.h>
15 #include <linux/oom.h>
16 #include <linux/rcupdate.h>
17 #include <linux/export.h>
18 #include <linux/bug.h>
19 #include <linux/kthread.h>
20 #include <linux/stop_machine.h>
21 #include <linux/mutex.h>
22 #include <linux/gfp.h>
23 #include <linux/suspend.h>
24 #include <linux/lockdep.h>
25 #include <linux/tick.h>
26 #include <linux/irq.h>
27 #include <linux/nmi.h>
28 #include <linux/smpboot.h>
29 #include <linux/relay.h>
30 #include <linux/slab.h>
31 #include <linux/percpu-rwsem.h>
33 #include <trace/events/power.h>
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/cpuhp.h>
40 * cpuhp_cpu_state - Per cpu hotplug state storage
41 * @state: The current cpu state
42 * @target: The target state
43 * @thread: Pointer to the hotplug thread
44 * @should_run: Thread should execute
45 * @rollback: Perform a rollback
46 * @single: Single callback invocation
47 * @bringup: Single callback bringup or teardown selector
48 * @cb_state: The state for a single callback (install/uninstall)
49 * @result: Result of the operation
50 * @done_up: Signal completion to the issuer of the task for cpu-up
51 * @done_down: Signal completion to the issuer of the task for cpu-down
53 struct cpuhp_cpu_state {
54 enum cpuhp_state state;
55 enum cpuhp_state target;
56 enum cpuhp_state fail;
58 struct task_struct *thread;
64 struct hlist_node *node;
65 struct hlist_node *last;
66 enum cpuhp_state cb_state;
68 struct completion done_up;
69 struct completion done_down;
73 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
74 .fail = CPUHP_INVALID,
77 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
78 static struct lockdep_map cpuhp_state_up_map =
79 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
80 static struct lockdep_map cpuhp_state_down_map =
81 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
84 static inline void cpuhp_lock_acquire(bool bringup)
86 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
89 static inline void cpuhp_lock_release(bool bringup)
91 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
95 static inline void cpuhp_lock_acquire(bool bringup) { }
96 static inline void cpuhp_lock_release(bool bringup) { }
101 * cpuhp_step - Hotplug state machine step
102 * @name: Name of the step
103 * @startup: Startup function of the step
104 * @teardown: Teardown function of the step
105 * @cant_stop: Bringup/teardown can't be stopped at this step
110 int (*single)(unsigned int cpu);
111 int (*multi)(unsigned int cpu,
112 struct hlist_node *node);
115 int (*single)(unsigned int cpu);
116 int (*multi)(unsigned int cpu,
117 struct hlist_node *node);
119 struct hlist_head list;
124 static DEFINE_MUTEX(cpuhp_state_mutex);
125 static struct cpuhp_step cpuhp_hp_states[];
127 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
129 return cpuhp_hp_states + state;
133 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
134 * @cpu: The cpu for which the callback should be invoked
135 * @state: The state to do callbacks for
136 * @bringup: True if the bringup callback should be invoked
137 * @node: For multi-instance, do a single entry callback for install/remove
138 * @lastp: For multi-instance rollback, remember how far we got
140 * Called from cpu hotplug and from the state register machinery.
142 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
143 bool bringup, struct hlist_node *node,
144 struct hlist_node **lastp)
146 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
147 struct cpuhp_step *step = cpuhp_get_step(state);
148 int (*cbm)(unsigned int cpu, struct hlist_node *node);
149 int (*cb)(unsigned int cpu);
152 if (st->fail == state) {
153 st->fail = CPUHP_INVALID;
155 if (!(bringup ? step->startup.single : step->teardown.single))
161 if (!step->multi_instance) {
162 WARN_ON_ONCE(lastp && *lastp);
163 cb = bringup ? step->startup.single : step->teardown.single;
166 trace_cpuhp_enter(cpu, st->target, state, cb);
168 trace_cpuhp_exit(cpu, st->state, state, ret);
171 cbm = bringup ? step->startup.multi : step->teardown.multi;
175 /* Single invocation for instance add/remove */
177 WARN_ON_ONCE(lastp && *lastp);
178 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
179 ret = cbm(cpu, node);
180 trace_cpuhp_exit(cpu, st->state, state, ret);
184 /* State transition. Invoke on all instances */
186 hlist_for_each(node, &step->list) {
187 if (lastp && node == *lastp)
190 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
191 ret = cbm(cpu, node);
192 trace_cpuhp_exit(cpu, st->state, state, ret);
206 /* Rollback the instances if one failed */
207 cbm = !bringup ? step->startup.multi : step->teardown.multi;
211 hlist_for_each(node, &step->list) {
215 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
216 ret = cbm(cpu, node);
217 trace_cpuhp_exit(cpu, st->state, state, ret);
219 * Rollback must not fail,
227 static bool cpuhp_is_ap_state(enum cpuhp_state state)
230 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
231 * purposes as that state is handled explicitly in cpu_down.
233 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
236 static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
238 struct completion *done = bringup ? &st->done_up : &st->done_down;
239 wait_for_completion(done);
242 static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
244 struct completion *done = bringup ? &st->done_up : &st->done_down;
249 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
251 static bool cpuhp_is_atomic_state(enum cpuhp_state state)
253 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
256 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
257 static DEFINE_MUTEX(cpu_add_remove_lock);
258 bool cpuhp_tasks_frozen;
259 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
262 * The following two APIs (cpu_maps_update_begin/done) must be used when
263 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
265 void cpu_maps_update_begin(void)
267 mutex_lock(&cpu_add_remove_lock);
270 void cpu_maps_update_done(void)
272 mutex_unlock(&cpu_add_remove_lock);
276 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
277 * Should always be manipulated under cpu_add_remove_lock
279 static int cpu_hotplug_disabled;
281 #ifdef CONFIG_HOTPLUG_CPU
283 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
285 void cpus_read_lock(void)
287 percpu_down_read(&cpu_hotplug_lock);
289 EXPORT_SYMBOL_GPL(cpus_read_lock);
291 int cpus_read_trylock(void)
293 return percpu_down_read_trylock(&cpu_hotplug_lock);
295 EXPORT_SYMBOL_GPL(cpus_read_trylock);
297 void cpus_read_unlock(void)
299 percpu_up_read(&cpu_hotplug_lock);
301 EXPORT_SYMBOL_GPL(cpus_read_unlock);
303 void cpus_write_lock(void)
305 percpu_down_write(&cpu_hotplug_lock);
308 void cpus_write_unlock(void)
310 percpu_up_write(&cpu_hotplug_lock);
313 void lockdep_assert_cpus_held(void)
315 percpu_rwsem_assert_held(&cpu_hotplug_lock);
319 * Wait for currently running CPU hotplug operations to complete (if any) and
320 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
321 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
322 * hotplug path before performing hotplug operations. So acquiring that lock
323 * guarantees mutual exclusion from any currently running hotplug operations.
325 void cpu_hotplug_disable(void)
327 cpu_maps_update_begin();
328 cpu_hotplug_disabled++;
329 cpu_maps_update_done();
331 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
333 static void __cpu_hotplug_enable(void)
335 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
337 cpu_hotplug_disabled--;
340 void cpu_hotplug_enable(void)
342 cpu_maps_update_begin();
343 __cpu_hotplug_enable();
344 cpu_maps_update_done();
346 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
347 #endif /* CONFIG_HOTPLUG_CPU */
349 #ifdef CONFIG_HOTPLUG_SMT
350 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
351 EXPORT_SYMBOL_GPL(cpu_smt_control);
353 static bool cpu_smt_available __read_mostly;
355 void __init cpu_smt_disable(bool force)
357 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
358 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
362 pr_info("SMT: Force disabled\n");
363 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
365 cpu_smt_control = CPU_SMT_DISABLED;
370 * The decision whether SMT is supported can only be done after the full
371 * CPU identification. Called from architecture code before non boot CPUs
374 void __init cpu_smt_check_topology_early(void)
376 if (!topology_smt_supported())
377 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
381 * If SMT was disabled by BIOS, detect it here, after the CPUs have been
382 * brought online. This ensures the smt/l1tf sysfs entries are consistent
383 * with reality. cpu_smt_available is set to true during the bringup of non
384 * boot CPUs when a SMT sibling is detected. Note, this may overwrite
385 * cpu_smt_control's previous setting.
387 void __init cpu_smt_check_topology(void)
389 if (!cpu_smt_available)
390 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
393 static int __init smt_cmdline_disable(char *str)
395 cpu_smt_disable(str && !strcmp(str, "force"));
398 early_param("nosmt", smt_cmdline_disable);
400 static inline bool cpu_smt_allowed(unsigned int cpu)
402 if (topology_is_primary_thread(cpu))
406 * If the CPU is not a 'primary' thread and the booted_once bit is
407 * set then the processor has SMT support. Store this information
408 * for the late check of SMT support in cpu_smt_check_topology().
410 if (per_cpu(cpuhp_state, cpu).booted_once)
411 cpu_smt_available = true;
413 if (cpu_smt_control == CPU_SMT_ENABLED)
417 * On x86 it's required to boot all logical CPUs at least once so
418 * that the init code can get a chance to set CR4.MCE on each
419 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
420 * core will shutdown the machine.
422 return !per_cpu(cpuhp_state, cpu).booted_once;
425 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
428 static inline enum cpuhp_state
429 cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
431 enum cpuhp_state prev_state = st->state;
433 st->rollback = false;
438 st->bringup = st->state < target;
444 cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
449 * If we have st->last we need to undo partial multi_instance of this
450 * state first. Otherwise start undo at the previous state.
459 st->target = prev_state;
460 st->bringup = !st->bringup;
463 /* Regular hotplug invocation of the AP hotplug thread */
464 static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
466 if (!st->single && st->state == st->target)
471 * Make sure the above stores are visible before should_run becomes
472 * true. Paired with the mb() above in cpuhp_thread_fun()
475 st->should_run = true;
476 wake_up_process(st->thread);
477 wait_for_ap_thread(st, st->bringup);
480 static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
482 enum cpuhp_state prev_state;
485 prev_state = cpuhp_set_state(st, target);
487 if ((ret = st->result)) {
488 cpuhp_reset_state(st, prev_state);
495 static int bringup_wait_for_ap(unsigned int cpu)
497 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
499 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
500 wait_for_ap_thread(st, true);
501 if (WARN_ON_ONCE((!cpu_online(cpu))))
504 /* Unpark the stopper thread and the hotplug thread of the target cpu */
505 stop_machine_unpark(cpu);
506 kthread_unpark(st->thread);
509 * SMT soft disabling on X86 requires to bring the CPU out of the
510 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
511 * CPU marked itself as booted_once in cpu_notify_starting() so the
512 * cpu_smt_allowed() check will now return false if this is not the
515 if (!cpu_smt_allowed(cpu))
518 if (st->target <= CPUHP_AP_ONLINE_IDLE)
521 return cpuhp_kick_ap(st, st->target);
524 static int bringup_cpu(unsigned int cpu)
526 struct task_struct *idle = idle_thread_get(cpu);
530 * Some architectures have to walk the irq descriptors to
531 * setup the vector space for the cpu which comes online.
532 * Prevent irq alloc/free across the bringup.
536 /* Arch-specific enabling code. */
537 ret = __cpu_up(cpu, idle);
541 return bringup_wait_for_ap(cpu);
545 * Hotplug state machine related functions
548 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
550 for (st->state--; st->state > st->target; st->state--)
551 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
554 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
555 enum cpuhp_state target)
557 enum cpuhp_state prev_state = st->state;
560 while (st->state < target) {
562 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
564 st->target = prev_state;
565 undo_cpu_up(cpu, st);
573 * The cpu hotplug threads manage the bringup and teardown of the cpus
575 static void cpuhp_create(unsigned int cpu)
577 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
579 init_completion(&st->done_up);
580 init_completion(&st->done_down);
583 static int cpuhp_should_run(unsigned int cpu)
585 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
587 return st->should_run;
591 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
592 * callbacks when a state gets [un]installed at runtime.
594 * Each invocation of this function by the smpboot thread does a single AP
597 * It has 3 modes of operation:
598 * - single: runs st->cb_state
599 * - up: runs ++st->state, while st->state < st->target
600 * - down: runs st->state--, while st->state > st->target
602 * When complete or on error, should_run is cleared and the completion is fired.
604 static void cpuhp_thread_fun(unsigned int cpu)
606 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
607 bool bringup = st->bringup;
608 enum cpuhp_state state;
611 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
612 * that if we see ->should_run we also see the rest of the state.
616 if (WARN_ON_ONCE(!st->should_run))
619 cpuhp_lock_acquire(bringup);
622 state = st->cb_state;
623 st->should_run = false;
628 st->should_run = (st->state < st->target);
629 WARN_ON_ONCE(st->state > st->target);
633 st->should_run = (st->state > st->target);
634 WARN_ON_ONCE(st->state < st->target);
638 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
640 if (cpuhp_is_atomic_state(state)) {
642 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
646 * STARTING/DYING must not fail!
648 WARN_ON_ONCE(st->result);
650 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
655 * If we fail on a rollback, we're up a creek without no
656 * paddle, no way forward, no way back. We loose, thanks for
659 WARN_ON_ONCE(st->rollback);
660 st->should_run = false;
663 cpuhp_lock_release(bringup);
666 complete_ap_thread(st, bringup);
669 /* Invoke a single callback on a remote cpu */
671 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
672 struct hlist_node *node)
674 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
677 if (!cpu_online(cpu))
680 cpuhp_lock_acquire(false);
681 cpuhp_lock_release(false);
683 cpuhp_lock_acquire(true);
684 cpuhp_lock_release(true);
687 * If we are up and running, use the hotplug thread. For early calls
688 * we invoke the thread function directly.
691 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
693 st->rollback = false;
697 st->bringup = bringup;
698 st->cb_state = state;
704 * If we failed and did a partial, do a rollback.
706 if ((ret = st->result) && st->last) {
708 st->bringup = !bringup;
714 * Clean up the leftovers so the next hotplug operation wont use stale
717 st->node = st->last = NULL;
721 static int cpuhp_kick_ap_work(unsigned int cpu)
723 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
724 enum cpuhp_state prev_state = st->state;
727 cpuhp_lock_acquire(false);
728 cpuhp_lock_release(false);
730 cpuhp_lock_acquire(true);
731 cpuhp_lock_release(true);
733 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
734 ret = cpuhp_kick_ap(st, st->target);
735 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
740 static struct smp_hotplug_thread cpuhp_threads = {
741 .store = &cpuhp_state.thread,
742 .create = &cpuhp_create,
743 .thread_should_run = cpuhp_should_run,
744 .thread_fn = cpuhp_thread_fun,
745 .thread_comm = "cpuhp/%u",
749 void __init cpuhp_threads_init(void)
751 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
752 kthread_unpark(this_cpu_read(cpuhp_state.thread));
755 #ifdef CONFIG_HOTPLUG_CPU
757 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
760 * This function walks all processes, finds a valid mm struct for each one and
761 * then clears a corresponding bit in mm's cpumask. While this all sounds
762 * trivial, there are various non-obvious corner cases, which this function
763 * tries to solve in a safe manner.
765 * Also note that the function uses a somewhat relaxed locking scheme, so it may
766 * be called only for an already offlined CPU.
768 void clear_tasks_mm_cpumask(int cpu)
770 struct task_struct *p;
773 * This function is called after the cpu is taken down and marked
774 * offline, so its not like new tasks will ever get this cpu set in
775 * their mm mask. -- Peter Zijlstra
776 * Thus, we may use rcu_read_lock() here, instead of grabbing
777 * full-fledged tasklist_lock.
779 WARN_ON(cpu_online(cpu));
781 for_each_process(p) {
782 struct task_struct *t;
785 * Main thread might exit, but other threads may still have
786 * a valid mm. Find one.
788 t = find_lock_task_mm(p);
791 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
797 /* Take this CPU down. */
798 static int take_cpu_down(void *_param)
800 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
801 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
802 int err, cpu = smp_processor_id();
805 /* Ensure this CPU doesn't handle any more interrupts. */
806 err = __cpu_disable();
811 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
812 * do this step again.
814 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
816 /* Invoke the former CPU_DYING callbacks */
817 for (; st->state > target; st->state--) {
818 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
820 * DYING must not fail!
825 /* Give up timekeeping duties */
826 tick_handover_do_timer();
827 /* Park the stopper thread */
828 stop_machine_park(cpu);
832 static int takedown_cpu(unsigned int cpu)
834 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
837 /* Park the smpboot threads */
838 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
841 * Prevent irq alloc/free while the dying cpu reorganizes the
842 * interrupt affinities.
847 * So now all preempt/rcu users must observe !cpu_active().
849 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
851 /* CPU refused to die */
853 /* Unpark the hotplug thread so we can rollback there */
854 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
857 BUG_ON(cpu_online(cpu));
860 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
861 * all runnable tasks from the CPU, there's only the idle task left now
862 * that the migration thread is done doing the stop_machine thing.
864 * Wait for the stop thread to go away.
866 wait_for_ap_thread(st, false);
867 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
869 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
872 hotplug_cpu__broadcast_tick_pull(cpu);
873 /* This actually kills the CPU. */
876 tick_cleanup_dead_cpu(cpu);
877 rcutree_migrate_callbacks(cpu);
881 static void cpuhp_complete_idle_dead(void *arg)
883 struct cpuhp_cpu_state *st = arg;
885 complete_ap_thread(st, false);
888 void cpuhp_report_idle_dead(void)
890 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
892 BUG_ON(st->state != CPUHP_AP_OFFLINE);
893 rcu_report_dead(smp_processor_id());
894 st->state = CPUHP_AP_IDLE_DEAD;
896 * We cannot call complete after rcu_report_dead() so we delegate it
899 smp_call_function_single(cpumask_first(cpu_online_mask),
900 cpuhp_complete_idle_dead, st, 0);
903 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
905 for (st->state++; st->state < st->target; st->state++)
906 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
909 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
910 enum cpuhp_state target)
912 enum cpuhp_state prev_state = st->state;
915 for (; st->state > target; st->state--) {
916 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
918 st->target = prev_state;
919 undo_cpu_down(cpu, st);
926 /* Requires cpu_add_remove_lock to be held */
927 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
928 enum cpuhp_state target)
930 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
931 int prev_state, ret = 0;
933 if (num_online_cpus() == 1)
936 if (!cpu_present(cpu))
941 cpuhp_tasks_frozen = tasks_frozen;
943 prev_state = cpuhp_set_state(st, target);
945 * If the current CPU state is in the range of the AP hotplug thread,
946 * then we need to kick the thread.
948 if (st->state > CPUHP_TEARDOWN_CPU) {
949 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
950 ret = cpuhp_kick_ap_work(cpu);
952 * The AP side has done the error rollback already. Just
953 * return the error code..
959 * We might have stopped still in the range of the AP hotplug
960 * thread. Nothing to do anymore.
962 if (st->state > CPUHP_TEARDOWN_CPU)
968 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
969 * to do the further cleanups.
971 ret = cpuhp_down_callbacks(cpu, st, target);
972 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
973 cpuhp_reset_state(st, prev_state);
980 * Do post unplug cleanup. This is still protected against
981 * concurrent CPU hotplug via cpu_add_remove_lock.
983 lockup_detector_cleanup();
987 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
989 if (cpu_hotplug_disabled)
991 return _cpu_down(cpu, 0, target);
994 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
998 cpu_maps_update_begin();
999 err = cpu_down_maps_locked(cpu, target);
1000 cpu_maps_update_done();
1004 int cpu_down(unsigned int cpu)
1006 return do_cpu_down(cpu, CPUHP_OFFLINE);
1008 EXPORT_SYMBOL(cpu_down);
1011 #define takedown_cpu NULL
1012 #endif /*CONFIG_HOTPLUG_CPU*/
1015 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1016 * @cpu: cpu that just started
1018 * It must be called by the arch code on the new cpu, before the new cpu
1019 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1021 void notify_cpu_starting(unsigned int cpu)
1023 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1024 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1027 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
1028 st->booted_once = true;
1029 while (st->state < target) {
1031 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1033 * STARTING must not fail!
1040 * Called from the idle task. Wake up the controlling task which brings the
1041 * stopper and the hotplug thread of the upcoming CPU up and then delegates
1042 * the rest of the online bringup to the hotplug thread.
1044 void cpuhp_online_idle(enum cpuhp_state state)
1046 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1048 /* Happens for the boot cpu */
1049 if (state != CPUHP_AP_ONLINE_IDLE)
1052 st->state = CPUHP_AP_ONLINE_IDLE;
1053 complete_ap_thread(st, true);
1056 /* Requires cpu_add_remove_lock to be held */
1057 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1059 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1060 struct task_struct *idle;
1065 if (!cpu_present(cpu)) {
1071 * The caller of do_cpu_up might have raced with another
1072 * caller. Ignore it for now.
1074 if (st->state >= target)
1077 if (st->state == CPUHP_OFFLINE) {
1078 /* Let it fail before we try to bring the cpu up */
1079 idle = idle_thread_get(cpu);
1081 ret = PTR_ERR(idle);
1086 cpuhp_tasks_frozen = tasks_frozen;
1088 cpuhp_set_state(st, target);
1090 * If the current CPU state is in the range of the AP hotplug thread,
1091 * then we need to kick the thread once more.
1093 if (st->state > CPUHP_BRINGUP_CPU) {
1094 ret = cpuhp_kick_ap_work(cpu);
1096 * The AP side has done the error rollback already. Just
1097 * return the error code..
1104 * Try to reach the target state. We max out on the BP at
1105 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1106 * responsible for bringing it up to the target state.
1108 target = min((int)target, CPUHP_BRINGUP_CPU);
1109 ret = cpuhp_up_callbacks(cpu, st, target);
1111 cpus_write_unlock();
1115 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
1119 if (!cpu_possible(cpu)) {
1120 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1122 #if defined(CONFIG_IA64)
1123 pr_err("please check additional_cpus= boot parameter\n");
1128 err = try_online_node(cpu_to_node(cpu));
1132 cpu_maps_update_begin();
1134 if (cpu_hotplug_disabled) {
1138 if (!cpu_smt_allowed(cpu)) {
1143 err = _cpu_up(cpu, 0, target);
1145 cpu_maps_update_done();
1149 int cpu_up(unsigned int cpu)
1151 return do_cpu_up(cpu, CPUHP_ONLINE);
1153 EXPORT_SYMBOL_GPL(cpu_up);
1155 #ifdef CONFIG_PM_SLEEP_SMP
1156 static cpumask_var_t frozen_cpus;
1158 int freeze_secondary_cpus(int primary)
1162 cpu_maps_update_begin();
1163 if (!cpu_online(primary))
1164 primary = cpumask_first(cpu_online_mask);
1166 * We take down all of the non-boot CPUs in one shot to avoid races
1167 * with the userspace trying to use the CPU hotplug at the same time
1169 cpumask_clear(frozen_cpus);
1171 pr_info("Disabling non-boot CPUs ...\n");
1172 for_each_online_cpu(cpu) {
1175 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1176 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1177 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1179 cpumask_set_cpu(cpu, frozen_cpus);
1181 pr_err("Error taking CPU%d down: %d\n", cpu, error);
1187 BUG_ON(num_online_cpus() > 1);
1189 pr_err("Non-boot CPUs are not disabled\n");
1192 * Make sure the CPUs won't be enabled by someone else. We need to do
1193 * this even in case of failure as all disable_nonboot_cpus() users are
1194 * supposed to do enable_nonboot_cpus() on the failure path.
1196 cpu_hotplug_disabled++;
1198 cpu_maps_update_done();
1202 void __weak arch_enable_nonboot_cpus_begin(void)
1206 void __weak arch_enable_nonboot_cpus_end(void)
1210 void enable_nonboot_cpus(void)
1214 /* Allow everyone to use the CPU hotplug again */
1215 cpu_maps_update_begin();
1216 __cpu_hotplug_enable();
1217 if (cpumask_empty(frozen_cpus))
1220 pr_info("Enabling non-boot CPUs ...\n");
1222 arch_enable_nonboot_cpus_begin();
1224 for_each_cpu(cpu, frozen_cpus) {
1225 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1226 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1227 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1229 pr_info("CPU%d is up\n", cpu);
1232 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1235 arch_enable_nonboot_cpus_end();
1237 cpumask_clear(frozen_cpus);
1239 cpu_maps_update_done();
1242 static int __init alloc_frozen_cpus(void)
1244 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1248 core_initcall(alloc_frozen_cpus);
1251 * When callbacks for CPU hotplug notifications are being executed, we must
1252 * ensure that the state of the system with respect to the tasks being frozen
1253 * or not, as reported by the notification, remains unchanged *throughout the
1254 * duration* of the execution of the callbacks.
1255 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1257 * This synchronization is implemented by mutually excluding regular CPU
1258 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1259 * Hibernate notifications.
1262 cpu_hotplug_pm_callback(struct notifier_block *nb,
1263 unsigned long action, void *ptr)
1267 case PM_SUSPEND_PREPARE:
1268 case PM_HIBERNATION_PREPARE:
1269 cpu_hotplug_disable();
1272 case PM_POST_SUSPEND:
1273 case PM_POST_HIBERNATION:
1274 cpu_hotplug_enable();
1285 static int __init cpu_hotplug_pm_sync_init(void)
1288 * cpu_hotplug_pm_callback has higher priority than x86
1289 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1290 * to disable cpu hotplug to avoid cpu hotplug race.
1292 pm_notifier(cpu_hotplug_pm_callback, 0);
1295 core_initcall(cpu_hotplug_pm_sync_init);
1297 #endif /* CONFIG_PM_SLEEP_SMP */
1301 #endif /* CONFIG_SMP */
1303 /* Boot processor state steps */
1304 static struct cpuhp_step cpuhp_hp_states[] = {
1307 .startup.single = NULL,
1308 .teardown.single = NULL,
1311 [CPUHP_CREATE_THREADS]= {
1312 .name = "threads:prepare",
1313 .startup.single = smpboot_create_threads,
1314 .teardown.single = NULL,
1317 [CPUHP_PERF_PREPARE] = {
1318 .name = "perf:prepare",
1319 .startup.single = perf_event_init_cpu,
1320 .teardown.single = perf_event_exit_cpu,
1322 [CPUHP_WORKQUEUE_PREP] = {
1323 .name = "workqueue:prepare",
1324 .startup.single = workqueue_prepare_cpu,
1325 .teardown.single = NULL,
1327 [CPUHP_HRTIMERS_PREPARE] = {
1328 .name = "hrtimers:prepare",
1329 .startup.single = hrtimers_prepare_cpu,
1330 .teardown.single = hrtimers_dead_cpu,
1332 [CPUHP_SMPCFD_PREPARE] = {
1333 .name = "smpcfd:prepare",
1334 .startup.single = smpcfd_prepare_cpu,
1335 .teardown.single = smpcfd_dead_cpu,
1337 [CPUHP_RELAY_PREPARE] = {
1338 .name = "relay:prepare",
1339 .startup.single = relay_prepare_cpu,
1340 .teardown.single = NULL,
1342 [CPUHP_SLAB_PREPARE] = {
1343 .name = "slab:prepare",
1344 .startup.single = slab_prepare_cpu,
1345 .teardown.single = slab_dead_cpu,
1347 [CPUHP_RCUTREE_PREP] = {
1348 .name = "RCU/tree:prepare",
1349 .startup.single = rcutree_prepare_cpu,
1350 .teardown.single = rcutree_dead_cpu,
1353 * On the tear-down path, timers_dead_cpu() must be invoked
1354 * before blk_mq_queue_reinit_notify() from notify_dead(),
1355 * otherwise a RCU stall occurs.
1357 [CPUHP_TIMERS_PREPARE] = {
1358 .name = "timers:prepare",
1359 .startup.single = timers_prepare_cpu,
1360 .teardown.single = timers_dead_cpu,
1362 /* Kicks the plugged cpu into life */
1363 [CPUHP_BRINGUP_CPU] = {
1364 .name = "cpu:bringup",
1365 .startup.single = bringup_cpu,
1366 .teardown.single = NULL,
1369 /* Final state before CPU kills itself */
1370 [CPUHP_AP_IDLE_DEAD] = {
1371 .name = "idle:dead",
1374 * Last state before CPU enters the idle loop to die. Transient state
1375 * for synchronization.
1377 [CPUHP_AP_OFFLINE] = {
1378 .name = "ap:offline",
1381 /* First state is scheduler control. Interrupts are disabled */
1382 [CPUHP_AP_SCHED_STARTING] = {
1383 .name = "sched:starting",
1384 .startup.single = sched_cpu_starting,
1385 .teardown.single = sched_cpu_dying,
1387 [CPUHP_AP_RCUTREE_DYING] = {
1388 .name = "RCU/tree:dying",
1389 .startup.single = NULL,
1390 .teardown.single = rcutree_dying_cpu,
1392 [CPUHP_AP_SMPCFD_DYING] = {
1393 .name = "smpcfd:dying",
1394 .startup.single = NULL,
1395 .teardown.single = smpcfd_dying_cpu,
1397 /* Entry state on starting. Interrupts enabled from here on. Transient
1398 * state for synchronsization */
1399 [CPUHP_AP_ONLINE] = {
1400 .name = "ap:online",
1403 * Handled on controll processor until the plugged processor manages
1406 [CPUHP_TEARDOWN_CPU] = {
1407 .name = "cpu:teardown",
1408 .startup.single = NULL,
1409 .teardown.single = takedown_cpu,
1412 /* Handle smpboot threads park/unpark */
1413 [CPUHP_AP_SMPBOOT_THREADS] = {
1414 .name = "smpboot/threads:online",
1415 .startup.single = smpboot_unpark_threads,
1416 .teardown.single = smpboot_park_threads,
1418 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1419 .name = "irq/affinity:online",
1420 .startup.single = irq_affinity_online_cpu,
1421 .teardown.single = NULL,
1423 [CPUHP_AP_PERF_ONLINE] = {
1424 .name = "perf:online",
1425 .startup.single = perf_event_init_cpu,
1426 .teardown.single = perf_event_exit_cpu,
1428 [CPUHP_AP_WATCHDOG_ONLINE] = {
1429 .name = "lockup_detector:online",
1430 .startup.single = lockup_detector_online_cpu,
1431 .teardown.single = lockup_detector_offline_cpu,
1433 [CPUHP_AP_WORKQUEUE_ONLINE] = {
1434 .name = "workqueue:online",
1435 .startup.single = workqueue_online_cpu,
1436 .teardown.single = workqueue_offline_cpu,
1438 [CPUHP_AP_RCUTREE_ONLINE] = {
1439 .name = "RCU/tree:online",
1440 .startup.single = rcutree_online_cpu,
1441 .teardown.single = rcutree_offline_cpu,
1445 * The dynamically registered state space is here
1449 /* Last state is scheduler control setting the cpu active */
1450 [CPUHP_AP_ACTIVE] = {
1451 .name = "sched:active",
1452 .startup.single = sched_cpu_activate,
1453 .teardown.single = sched_cpu_deactivate,
1457 /* CPU is fully up and running. */
1460 .startup.single = NULL,
1461 .teardown.single = NULL,
1465 /* Sanity check for callbacks */
1466 static int cpuhp_cb_check(enum cpuhp_state state)
1468 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1474 * Returns a free for dynamic slot assignment of the Online state. The states
1475 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1476 * by having no name assigned.
1478 static int cpuhp_reserve_state(enum cpuhp_state state)
1480 enum cpuhp_state i, end;
1481 struct cpuhp_step *step;
1484 case CPUHP_AP_ONLINE_DYN:
1485 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
1486 end = CPUHP_AP_ONLINE_DYN_END;
1488 case CPUHP_BP_PREPARE_DYN:
1489 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
1490 end = CPUHP_BP_PREPARE_DYN_END;
1496 for (i = state; i <= end; i++, step++) {
1500 WARN(1, "No more dynamic states available for CPU hotplug\n");
1504 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1505 int (*startup)(unsigned int cpu),
1506 int (*teardown)(unsigned int cpu),
1507 bool multi_instance)
1509 /* (Un)Install the callbacks for further cpu hotplug operations */
1510 struct cpuhp_step *sp;
1514 * If name is NULL, then the state gets removed.
1516 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1517 * the first allocation from these dynamic ranges, so the removal
1518 * would trigger a new allocation and clear the wrong (already
1519 * empty) state, leaving the callbacks of the to be cleared state
1520 * dangling, which causes wreckage on the next hotplug operation.
1522 if (name && (state == CPUHP_AP_ONLINE_DYN ||
1523 state == CPUHP_BP_PREPARE_DYN)) {
1524 ret = cpuhp_reserve_state(state);
1529 sp = cpuhp_get_step(state);
1530 if (name && sp->name)
1533 sp->startup.single = startup;
1534 sp->teardown.single = teardown;
1536 sp->multi_instance = multi_instance;
1537 INIT_HLIST_HEAD(&sp->list);
1541 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1543 return cpuhp_get_step(state)->teardown.single;
1547 * Call the startup/teardown function for a step either on the AP or
1548 * on the current CPU.
1550 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1551 struct hlist_node *node)
1553 struct cpuhp_step *sp = cpuhp_get_step(state);
1557 * If there's nothing to do, we done.
1558 * Relies on the union for multi_instance.
1560 if ((bringup && !sp->startup.single) ||
1561 (!bringup && !sp->teardown.single))
1564 * The non AP bound callbacks can fail on bringup. On teardown
1565 * e.g. module removal we crash for now.
1568 if (cpuhp_is_ap_state(state))
1569 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1571 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1573 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1575 BUG_ON(ret && !bringup);
1580 * Called from __cpuhp_setup_state on a recoverable failure.
1582 * Note: The teardown callbacks for rollback are not allowed to fail!
1584 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1585 struct hlist_node *node)
1589 /* Roll back the already executed steps on the other cpus */
1590 for_each_present_cpu(cpu) {
1591 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1592 int cpustate = st->state;
1594 if (cpu >= failedcpu)
1597 /* Did we invoke the startup call on that cpu ? */
1598 if (cpustate >= state)
1599 cpuhp_issue_call(cpu, state, false, node);
1603 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1604 struct hlist_node *node,
1607 struct cpuhp_step *sp;
1611 lockdep_assert_cpus_held();
1613 sp = cpuhp_get_step(state);
1614 if (sp->multi_instance == false)
1617 mutex_lock(&cpuhp_state_mutex);
1619 if (!invoke || !sp->startup.multi)
1623 * Try to call the startup callback for each present cpu
1624 * depending on the hotplug state of the cpu.
1626 for_each_present_cpu(cpu) {
1627 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1628 int cpustate = st->state;
1630 if (cpustate < state)
1633 ret = cpuhp_issue_call(cpu, state, true, node);
1635 if (sp->teardown.multi)
1636 cpuhp_rollback_install(cpu, state, node);
1642 hlist_add_head(node, &sp->list);
1644 mutex_unlock(&cpuhp_state_mutex);
1648 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1654 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
1658 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1661 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1662 * @state: The state to setup
1663 * @invoke: If true, the startup function is invoked for cpus where
1664 * cpu state >= @state
1665 * @startup: startup callback function
1666 * @teardown: teardown callback function
1667 * @multi_instance: State is set up for multiple instances which get
1670 * The caller needs to hold cpus read locked while calling this function.
1673 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1674 * 0 for all other states
1675 * On failure: proper (negative) error code
1677 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1678 const char *name, bool invoke,
1679 int (*startup)(unsigned int cpu),
1680 int (*teardown)(unsigned int cpu),
1681 bool multi_instance)
1686 lockdep_assert_cpus_held();
1688 if (cpuhp_cb_check(state) || !name)
1691 mutex_lock(&cpuhp_state_mutex);
1693 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1696 dynstate = state == CPUHP_AP_ONLINE_DYN;
1697 if (ret > 0 && dynstate) {
1702 if (ret || !invoke || !startup)
1706 * Try to call the startup callback for each present cpu
1707 * depending on the hotplug state of the cpu.
1709 for_each_present_cpu(cpu) {
1710 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1711 int cpustate = st->state;
1713 if (cpustate < state)
1716 ret = cpuhp_issue_call(cpu, state, true, NULL);
1719 cpuhp_rollback_install(cpu, state, NULL);
1720 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1725 mutex_unlock(&cpuhp_state_mutex);
1727 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1728 * dynamically allocated state in case of success.
1730 if (!ret && dynstate)
1734 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1736 int __cpuhp_setup_state(enum cpuhp_state state,
1737 const char *name, bool invoke,
1738 int (*startup)(unsigned int cpu),
1739 int (*teardown)(unsigned int cpu),
1740 bool multi_instance)
1745 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1746 teardown, multi_instance);
1750 EXPORT_SYMBOL(__cpuhp_setup_state);
1752 int __cpuhp_state_remove_instance(enum cpuhp_state state,
1753 struct hlist_node *node, bool invoke)
1755 struct cpuhp_step *sp = cpuhp_get_step(state);
1758 BUG_ON(cpuhp_cb_check(state));
1760 if (!sp->multi_instance)
1764 mutex_lock(&cpuhp_state_mutex);
1766 if (!invoke || !cpuhp_get_teardown_cb(state))
1769 * Call the teardown callback for each present cpu depending
1770 * on the hotplug state of the cpu. This function is not
1771 * allowed to fail currently!
1773 for_each_present_cpu(cpu) {
1774 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1775 int cpustate = st->state;
1777 if (cpustate >= state)
1778 cpuhp_issue_call(cpu, state, false, node);
1783 mutex_unlock(&cpuhp_state_mutex);
1788 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1791 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
1792 * @state: The state to remove
1793 * @invoke: If true, the teardown function is invoked for cpus where
1794 * cpu state >= @state
1796 * The caller needs to hold cpus read locked while calling this function.
1797 * The teardown callback is currently not allowed to fail. Think
1798 * about module removal!
1800 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
1802 struct cpuhp_step *sp = cpuhp_get_step(state);
1805 BUG_ON(cpuhp_cb_check(state));
1807 lockdep_assert_cpus_held();
1809 mutex_lock(&cpuhp_state_mutex);
1810 if (sp->multi_instance) {
1811 WARN(!hlist_empty(&sp->list),
1812 "Error: Removing state %d which has instances left.\n",
1817 if (!invoke || !cpuhp_get_teardown_cb(state))
1821 * Call the teardown callback for each present cpu depending
1822 * on the hotplug state of the cpu. This function is not
1823 * allowed to fail currently!
1825 for_each_present_cpu(cpu) {
1826 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1827 int cpustate = st->state;
1829 if (cpustate >= state)
1830 cpuhp_issue_call(cpu, state, false, NULL);
1833 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1834 mutex_unlock(&cpuhp_state_mutex);
1836 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
1838 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1841 __cpuhp_remove_state_cpuslocked(state, invoke);
1844 EXPORT_SYMBOL(__cpuhp_remove_state);
1846 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1847 static ssize_t show_cpuhp_state(struct device *dev,
1848 struct device_attribute *attr, char *buf)
1850 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1852 return sprintf(buf, "%d\n", st->state);
1854 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1856 static ssize_t write_cpuhp_target(struct device *dev,
1857 struct device_attribute *attr,
1858 const char *buf, size_t count)
1860 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1861 struct cpuhp_step *sp;
1864 ret = kstrtoint(buf, 10, &target);
1868 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1869 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1872 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1876 ret = lock_device_hotplug_sysfs();
1880 mutex_lock(&cpuhp_state_mutex);
1881 sp = cpuhp_get_step(target);
1882 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1883 mutex_unlock(&cpuhp_state_mutex);
1887 if (st->state < target)
1888 ret = do_cpu_up(dev->id, target);
1890 ret = do_cpu_down(dev->id, target);
1892 unlock_device_hotplug();
1893 return ret ? ret : count;
1896 static ssize_t show_cpuhp_target(struct device *dev,
1897 struct device_attribute *attr, char *buf)
1899 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1901 return sprintf(buf, "%d\n", st->target);
1903 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1906 static ssize_t write_cpuhp_fail(struct device *dev,
1907 struct device_attribute *attr,
1908 const char *buf, size_t count)
1910 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1911 struct cpuhp_step *sp;
1914 ret = kstrtoint(buf, 10, &fail);
1919 * Cannot fail STARTING/DYING callbacks.
1921 if (cpuhp_is_atomic_state(fail))
1925 * Cannot fail anything that doesn't have callbacks.
1927 mutex_lock(&cpuhp_state_mutex);
1928 sp = cpuhp_get_step(fail);
1929 if (!sp->startup.single && !sp->teardown.single)
1931 mutex_unlock(&cpuhp_state_mutex);
1940 static ssize_t show_cpuhp_fail(struct device *dev,
1941 struct device_attribute *attr, char *buf)
1943 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1945 return sprintf(buf, "%d\n", st->fail);
1948 static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
1950 static struct attribute *cpuhp_cpu_attrs[] = {
1951 &dev_attr_state.attr,
1952 &dev_attr_target.attr,
1953 &dev_attr_fail.attr,
1957 static const struct attribute_group cpuhp_cpu_attr_group = {
1958 .attrs = cpuhp_cpu_attrs,
1963 static ssize_t show_cpuhp_states(struct device *dev,
1964 struct device_attribute *attr, char *buf)
1966 ssize_t cur, res = 0;
1969 mutex_lock(&cpuhp_state_mutex);
1970 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
1971 struct cpuhp_step *sp = cpuhp_get_step(i);
1974 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1979 mutex_unlock(&cpuhp_state_mutex);
1982 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1984 static struct attribute *cpuhp_cpu_root_attrs[] = {
1985 &dev_attr_states.attr,
1989 static const struct attribute_group cpuhp_cpu_root_attr_group = {
1990 .attrs = cpuhp_cpu_root_attrs,
1995 #ifdef CONFIG_HOTPLUG_SMT
1997 static const char *smt_states[] = {
1998 [CPU_SMT_ENABLED] = "on",
1999 [CPU_SMT_DISABLED] = "off",
2000 [CPU_SMT_FORCE_DISABLED] = "forceoff",
2001 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
2005 show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2007 return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
2010 static void cpuhp_offline_cpu_device(unsigned int cpu)
2012 struct device *dev = get_cpu_device(cpu);
2014 dev->offline = true;
2015 /* Tell user space about the state change */
2016 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2019 static void cpuhp_online_cpu_device(unsigned int cpu)
2021 struct device *dev = get_cpu_device(cpu);
2023 dev->offline = false;
2024 /* Tell user space about the state change */
2025 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2029 * Architectures that need SMT-specific errata handling during SMT hotplug
2030 * should override this.
2032 void __weak arch_smt_update(void) { };
2034 static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2038 cpu_maps_update_begin();
2039 for_each_online_cpu(cpu) {
2040 if (topology_is_primary_thread(cpu))
2042 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2046 * As this needs to hold the cpu maps lock it's impossible
2047 * to call device_offline() because that ends up calling
2048 * cpu_down() which takes cpu maps lock. cpu maps lock
2049 * needs to be held as this might race against in kernel
2050 * abusers of the hotplug machinery (thermal management).
2052 * So nothing would update device:offline state. That would
2053 * leave the sysfs entry stale and prevent onlining after
2054 * smt control has been changed to 'off' again. This is
2055 * called under the sysfs hotplug lock, so it is properly
2056 * serialized against the regular offline usage.
2058 cpuhp_offline_cpu_device(cpu);
2061 cpu_smt_control = ctrlval;
2064 cpu_maps_update_done();
2068 static int cpuhp_smt_enable(void)
2072 cpu_maps_update_begin();
2073 cpu_smt_control = CPU_SMT_ENABLED;
2075 for_each_present_cpu(cpu) {
2076 /* Skip online CPUs and CPUs on offline nodes */
2077 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2079 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2082 /* See comment in cpuhp_smt_disable() */
2083 cpuhp_online_cpu_device(cpu);
2085 cpu_maps_update_done();
2090 store_smt_control(struct device *dev, struct device_attribute *attr,
2091 const char *buf, size_t count)
2095 if (sysfs_streq(buf, "on"))
2096 ctrlval = CPU_SMT_ENABLED;
2097 else if (sysfs_streq(buf, "off"))
2098 ctrlval = CPU_SMT_DISABLED;
2099 else if (sysfs_streq(buf, "forceoff"))
2100 ctrlval = CPU_SMT_FORCE_DISABLED;
2104 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2107 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2110 ret = lock_device_hotplug_sysfs();
2114 if (ctrlval != cpu_smt_control) {
2116 case CPU_SMT_ENABLED:
2117 ret = cpuhp_smt_enable();
2119 case CPU_SMT_DISABLED:
2120 case CPU_SMT_FORCE_DISABLED:
2121 ret = cpuhp_smt_disable(ctrlval);
2126 unlock_device_hotplug();
2127 return ret ? ret : count;
2129 static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2132 show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2134 bool active = topology_max_smt_threads() > 1;
2136 return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
2138 static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2140 static struct attribute *cpuhp_smt_attrs[] = {
2141 &dev_attr_control.attr,
2142 &dev_attr_active.attr,
2146 static const struct attribute_group cpuhp_smt_attr_group = {
2147 .attrs = cpuhp_smt_attrs,
2152 static int __init cpu_smt_state_init(void)
2154 return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2155 &cpuhp_smt_attr_group);
2159 static inline int cpu_smt_state_init(void) { return 0; }
2162 static int __init cpuhp_sysfs_init(void)
2166 ret = cpu_smt_state_init();
2170 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2171 &cpuhp_cpu_root_attr_group);
2175 for_each_possible_cpu(cpu) {
2176 struct device *dev = get_cpu_device(cpu);
2180 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2186 device_initcall(cpuhp_sysfs_init);
2190 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2191 * represents all NR_CPUS bits binary values of 1<<nr.
2193 * It is used by cpumask_of() to get a constant address to a CPU
2194 * mask value that has a single bit set only.
2197 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2198 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2199 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2200 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2201 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2203 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2205 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2206 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2207 #if BITS_PER_LONG > 32
2208 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2209 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2212 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2214 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2215 EXPORT_SYMBOL(cpu_all_bits);
2217 #ifdef CONFIG_INIT_ALL_POSSIBLE
2218 struct cpumask __cpu_possible_mask __read_mostly
2221 struct cpumask __cpu_possible_mask __read_mostly;
2223 EXPORT_SYMBOL(__cpu_possible_mask);
2225 struct cpumask __cpu_online_mask __read_mostly;
2226 EXPORT_SYMBOL(__cpu_online_mask);
2228 struct cpumask __cpu_present_mask __read_mostly;
2229 EXPORT_SYMBOL(__cpu_present_mask);
2231 struct cpumask __cpu_active_mask __read_mostly;
2232 EXPORT_SYMBOL(__cpu_active_mask);
2234 void init_cpu_present(const struct cpumask *src)
2236 cpumask_copy(&__cpu_present_mask, src);
2239 void init_cpu_possible(const struct cpumask *src)
2241 cpumask_copy(&__cpu_possible_mask, src);
2244 void init_cpu_online(const struct cpumask *src)
2246 cpumask_copy(&__cpu_online_mask, src);
2250 * Activate the first processor.
2252 void __init boot_cpu_init(void)
2254 int cpu = smp_processor_id();
2256 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2257 set_cpu_online(cpu, true);
2258 set_cpu_active(cpu, true);
2259 set_cpu_present(cpu, true);
2260 set_cpu_possible(cpu, true);
2263 __boot_cpu_id = cpu;
2268 * Must be called _AFTER_ setting up the per_cpu areas
2270 void __init boot_cpu_hotplug_init(void)
2273 this_cpu_write(cpuhp_state.booted_once, true);
2275 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);