torture: Replace cpu_up/down() with add/remove_cpu()
[linux-block.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
3f07c014 10#include <linux/sched/signal.h>
ef8bd77f 11#include <linux/sched/hotplug.h>
9ca12ac0 12#include <linux/sched/isolation.h>
29930025 13#include <linux/sched/task.h>
a74cfffb 14#include <linux/sched/smt.h>
1da177e4
LT
15#include <linux/unistd.h>
16#include <linux/cpu.h>
cb79295e
AV
17#include <linux/oom.h>
18#include <linux/rcupdate.h>
9984de1a 19#include <linux/export.h>
e4cc2f87 20#include <linux/bug.h>
1da177e4
LT
21#include <linux/kthread.h>
22#include <linux/stop_machine.h>
81615b62 23#include <linux/mutex.h>
5a0e3ad6 24#include <linux/gfp.h>
79cfbdfa 25#include <linux/suspend.h>
a19423b9 26#include <linux/lockdep.h>
345527b1 27#include <linux/tick.h>
a8994181 28#include <linux/irq.h>
941154bd 29#include <linux/nmi.h>
4cb28ced 30#include <linux/smpboot.h>
e6d4989a 31#include <linux/relay.h>
6731d4f1 32#include <linux/slab.h>
fc8dffd3 33#include <linux/percpu-rwsem.h>
cff7d378 34
bb3632c6 35#include <trace/events/power.h>
cff7d378
TG
36#define CREATE_TRACE_POINTS
37#include <trace/events/cpuhp.h>
1da177e4 38
38498a67
TG
39#include "smpboot.h"
40
cff7d378
TG
41/**
42 * cpuhp_cpu_state - Per cpu hotplug state storage
43 * @state: The current cpu state
44 * @target: The target state
4cb28ced
TG
45 * @thread: Pointer to the hotplug thread
46 * @should_run: Thread should execute
3b9d6da6 47 * @rollback: Perform a rollback
a724632c
TG
48 * @single: Single callback invocation
49 * @bringup: Single callback bringup or teardown selector
50 * @cb_state: The state for a single callback (install/uninstall)
4cb28ced 51 * @result: Result of the operation
5ebe7742
PZ
52 * @done_up: Signal completion to the issuer of the task for cpu-up
53 * @done_down: Signal completion to the issuer of the task for cpu-down
cff7d378
TG
54 */
55struct cpuhp_cpu_state {
56 enum cpuhp_state state;
57 enum cpuhp_state target;
1db49484 58 enum cpuhp_state fail;
4cb28ced
TG
59#ifdef CONFIG_SMP
60 struct task_struct *thread;
61 bool should_run;
3b9d6da6 62 bool rollback;
a724632c
TG
63 bool single;
64 bool bringup;
cf392d10 65 struct hlist_node *node;
4dddfb5f 66 struct hlist_node *last;
4cb28ced 67 enum cpuhp_state cb_state;
4cb28ced 68 int result;
5ebe7742
PZ
69 struct completion done_up;
70 struct completion done_down;
4cb28ced 71#endif
cff7d378
TG
72};
73
1db49484
PZ
74static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
75 .fail = CPUHP_INVALID,
76};
cff7d378 77
e797bda3
TG
78#ifdef CONFIG_SMP
79cpumask_t cpus_booted_once_mask;
80#endif
81
49dfe2a6 82#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
5f4b55e1
PZ
83static struct lockdep_map cpuhp_state_up_map =
84 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
85static struct lockdep_map cpuhp_state_down_map =
86 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
87
88
76dc6c09 89static inline void cpuhp_lock_acquire(bool bringup)
5f4b55e1
PZ
90{
91 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
92}
93
76dc6c09 94static inline void cpuhp_lock_release(bool bringup)
5f4b55e1
PZ
95{
96 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
97}
98#else
99
76dc6c09
MM
100static inline void cpuhp_lock_acquire(bool bringup) { }
101static inline void cpuhp_lock_release(bool bringup) { }
5f4b55e1 102
49dfe2a6
TG
103#endif
104
cff7d378
TG
105/**
106 * cpuhp_step - Hotplug state machine step
107 * @name: Name of the step
108 * @startup: Startup function of the step
109 * @teardown: Teardown function of the step
757c989b 110 * @cant_stop: Bringup/teardown can't be stopped at this step
cff7d378
TG
111 */
112struct cpuhp_step {
cf392d10
TG
113 const char *name;
114 union {
3c1627e9
TG
115 int (*single)(unsigned int cpu);
116 int (*multi)(unsigned int cpu,
117 struct hlist_node *node);
118 } startup;
cf392d10 119 union {
3c1627e9
TG
120 int (*single)(unsigned int cpu);
121 int (*multi)(unsigned int cpu,
122 struct hlist_node *node);
123 } teardown;
cf392d10 124 struct hlist_head list;
cf392d10
TG
125 bool cant_stop;
126 bool multi_instance;
cff7d378
TG
127};
128
98f8cdce 129static DEFINE_MUTEX(cpuhp_state_mutex);
17a2f1ce 130static struct cpuhp_step cpuhp_hp_states[];
cff7d378 131
a724632c
TG
132static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
133{
17a2f1ce 134 return cpuhp_hp_states + state;
a724632c
TG
135}
136
cff7d378
TG
137/**
138 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
139 * @cpu: The cpu for which the callback should be invoked
96abb968 140 * @state: The state to do callbacks for
a724632c 141 * @bringup: True if the bringup callback should be invoked
96abb968
PZ
142 * @node: For multi-instance, do a single entry callback for install/remove
143 * @lastp: For multi-instance rollback, remember how far we got
cff7d378 144 *
cf392d10 145 * Called from cpu hotplug and from the state register machinery.
cff7d378 146 */
a724632c 147static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
96abb968
PZ
148 bool bringup, struct hlist_node *node,
149 struct hlist_node **lastp)
cff7d378
TG
150{
151 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
a724632c 152 struct cpuhp_step *step = cpuhp_get_step(state);
cf392d10
TG
153 int (*cbm)(unsigned int cpu, struct hlist_node *node);
154 int (*cb)(unsigned int cpu);
155 int ret, cnt;
156
1db49484
PZ
157 if (st->fail == state) {
158 st->fail = CPUHP_INVALID;
159
160 if (!(bringup ? step->startup.single : step->teardown.single))
161 return 0;
162
163 return -EAGAIN;
164 }
165
cf392d10 166 if (!step->multi_instance) {
96abb968 167 WARN_ON_ONCE(lastp && *lastp);
3c1627e9 168 cb = bringup ? step->startup.single : step->teardown.single;
cf392d10
TG
169 if (!cb)
170 return 0;
a724632c 171 trace_cpuhp_enter(cpu, st->target, state, cb);
cff7d378 172 ret = cb(cpu);
a724632c 173 trace_cpuhp_exit(cpu, st->state, state, ret);
cf392d10
TG
174 return ret;
175 }
3c1627e9 176 cbm = bringup ? step->startup.multi : step->teardown.multi;
cf392d10
TG
177 if (!cbm)
178 return 0;
179
180 /* Single invocation for instance add/remove */
181 if (node) {
96abb968 182 WARN_ON_ONCE(lastp && *lastp);
cf392d10
TG
183 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
184 ret = cbm(cpu, node);
185 trace_cpuhp_exit(cpu, st->state, state, ret);
186 return ret;
187 }
188
189 /* State transition. Invoke on all instances */
190 cnt = 0;
191 hlist_for_each(node, &step->list) {
96abb968
PZ
192 if (lastp && node == *lastp)
193 break;
194
cf392d10
TG
195 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
196 ret = cbm(cpu, node);
197 trace_cpuhp_exit(cpu, st->state, state, ret);
96abb968
PZ
198 if (ret) {
199 if (!lastp)
200 goto err;
201
202 *lastp = node;
203 return ret;
204 }
cf392d10
TG
205 cnt++;
206 }
96abb968
PZ
207 if (lastp)
208 *lastp = NULL;
cf392d10
TG
209 return 0;
210err:
211 /* Rollback the instances if one failed */
3c1627e9 212 cbm = !bringup ? step->startup.multi : step->teardown.multi;
cf392d10
TG
213 if (!cbm)
214 return ret;
215
216 hlist_for_each(node, &step->list) {
217 if (!cnt--)
218 break;
724a8688
PZ
219
220 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
221 ret = cbm(cpu, node);
222 trace_cpuhp_exit(cpu, st->state, state, ret);
223 /*
224 * Rollback must not fail,
225 */
226 WARN_ON_ONCE(ret);
cff7d378
TG
227 }
228 return ret;
229}
230
98a79d6a 231#ifdef CONFIG_SMP
fcb3029a
AB
232static bool cpuhp_is_ap_state(enum cpuhp_state state)
233{
234 /*
235 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
236 * purposes as that state is handled explicitly in cpu_down.
237 */
238 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
239}
240
5ebe7742
PZ
241static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
242{
243 struct completion *done = bringup ? &st->done_up : &st->done_down;
244 wait_for_completion(done);
245}
246
247static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
248{
249 struct completion *done = bringup ? &st->done_up : &st->done_down;
250 complete(done);
251}
252
253/*
254 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
255 */
256static bool cpuhp_is_atomic_state(enum cpuhp_state state)
257{
258 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
259}
260
b3199c02 261/* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa953877 262static DEFINE_MUTEX(cpu_add_remove_lock);
090e77c3
TG
263bool cpuhp_tasks_frozen;
264EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
1da177e4 265
79a6cdeb 266/*
93ae4f97
SB
267 * The following two APIs (cpu_maps_update_begin/done) must be used when
268 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
79a6cdeb
LJ
269 */
270void cpu_maps_update_begin(void)
271{
272 mutex_lock(&cpu_add_remove_lock);
273}
274
275void cpu_maps_update_done(void)
276{
277 mutex_unlock(&cpu_add_remove_lock);
278}
1da177e4 279
fc8dffd3
TG
280/*
281 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
e3920fb4
RW
282 * Should always be manipulated under cpu_add_remove_lock
283 */
284static int cpu_hotplug_disabled;
285
79a6cdeb
LJ
286#ifdef CONFIG_HOTPLUG_CPU
287
fc8dffd3 288DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
a19423b9 289
8f553c49 290void cpus_read_lock(void)
a9d9baa1 291{
fc8dffd3 292 percpu_down_read(&cpu_hotplug_lock);
a9d9baa1 293}
8f553c49 294EXPORT_SYMBOL_GPL(cpus_read_lock);
90d45d17 295
6f4ceee9
WL
296int cpus_read_trylock(void)
297{
298 return percpu_down_read_trylock(&cpu_hotplug_lock);
299}
300EXPORT_SYMBOL_GPL(cpus_read_trylock);
301
8f553c49 302void cpus_read_unlock(void)
a9d9baa1 303{
fc8dffd3 304 percpu_up_read(&cpu_hotplug_lock);
a9d9baa1 305}
8f553c49 306EXPORT_SYMBOL_GPL(cpus_read_unlock);
a9d9baa1 307
8f553c49 308void cpus_write_lock(void)
d221938c 309{
fc8dffd3 310 percpu_down_write(&cpu_hotplug_lock);
d221938c 311}
87af9e7f 312
8f553c49 313void cpus_write_unlock(void)
d221938c 314{
fc8dffd3 315 percpu_up_write(&cpu_hotplug_lock);
d221938c
GS
316}
317
fc8dffd3 318void lockdep_assert_cpus_held(void)
d221938c 319{
ce48c457
VS
320 /*
321 * We can't have hotplug operations before userspace starts running,
322 * and some init codepaths will knowingly not take the hotplug lock.
323 * This is all valid, so mute lockdep until it makes sense to report
324 * unheld locks.
325 */
326 if (system_state < SYSTEM_RUNNING)
327 return;
328
fc8dffd3 329 percpu_rwsem_assert_held(&cpu_hotplug_lock);
d221938c 330}
79a6cdeb 331
cb92173d
PZ
332static void lockdep_acquire_cpus_lock(void)
333{
334 rwsem_acquire(&cpu_hotplug_lock.rw_sem.dep_map, 0, 0, _THIS_IP_);
335}
336
337static void lockdep_release_cpus_lock(void)
338{
5facae4f 339 rwsem_release(&cpu_hotplug_lock.rw_sem.dep_map, _THIS_IP_);
cb92173d
PZ
340}
341
16e53dbf
SB
342/*
343 * Wait for currently running CPU hotplug operations to complete (if any) and
344 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
345 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
346 * hotplug path before performing hotplug operations. So acquiring that lock
347 * guarantees mutual exclusion from any currently running hotplug operations.
348 */
349void cpu_hotplug_disable(void)
350{
351 cpu_maps_update_begin();
89af7ba5 352 cpu_hotplug_disabled++;
16e53dbf
SB
353 cpu_maps_update_done();
354}
32145c46 355EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
16e53dbf 356
01b41159
LW
357static void __cpu_hotplug_enable(void)
358{
359 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
360 return;
361 cpu_hotplug_disabled--;
362}
363
16e53dbf
SB
364void cpu_hotplug_enable(void)
365{
366 cpu_maps_update_begin();
01b41159 367 __cpu_hotplug_enable();
16e53dbf
SB
368 cpu_maps_update_done();
369}
32145c46 370EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
cb92173d
PZ
371
372#else
373
374static void lockdep_acquire_cpus_lock(void)
375{
376}
377
378static void lockdep_release_cpus_lock(void)
379{
380}
381
b9d10be7 382#endif /* CONFIG_HOTPLUG_CPU */
79a6cdeb 383
a74cfffb
TG
384/*
385 * Architectures that need SMT-specific errata handling during SMT hotplug
386 * should override this.
387 */
388void __weak arch_smt_update(void) { }
389
0cc3cd21
TG
390#ifdef CONFIG_HOTPLUG_SMT
391enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
bc2d8d26 392
8e1b706b 393void __init cpu_smt_disable(bool force)
0cc3cd21 394{
e1572f1d 395 if (!cpu_smt_possible())
8e1b706b
JK
396 return;
397
398 if (force) {
0cc3cd21
TG
399 pr_info("SMT: Force disabled\n");
400 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
8e1b706b 401 } else {
d0e7d144 402 pr_info("SMT: disabled\n");
8e1b706b 403 cpu_smt_control = CPU_SMT_DISABLED;
0cc3cd21 404 }
8e1b706b
JK
405}
406
fee0aede
TG
407/*
408 * The decision whether SMT is supported can only be done after the full
b284909a 409 * CPU identification. Called from architecture code.
bc2d8d26
TG
410 */
411void __init cpu_smt_check_topology(void)
412{
b284909a 413 if (!topology_smt_supported())
bc2d8d26
TG
414 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
415}
416
8e1b706b
JK
417static int __init smt_cmdline_disable(char *str)
418{
419 cpu_smt_disable(str && !strcmp(str, "force"));
0cc3cd21
TG
420 return 0;
421}
422early_param("nosmt", smt_cmdline_disable);
423
424static inline bool cpu_smt_allowed(unsigned int cpu)
425{
b284909a 426 if (cpu_smt_control == CPU_SMT_ENABLED)
0cc3cd21
TG
427 return true;
428
b284909a 429 if (topology_is_primary_thread(cpu))
0cc3cd21
TG
430 return true;
431
432 /*
433 * On x86 it's required to boot all logical CPUs at least once so
434 * that the init code can get a chance to set CR4.MCE on each
435 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
436 * core will shutdown the machine.
437 */
e797bda3 438 return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
0cc3cd21 439}
e1572f1d
VK
440
441/* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
442bool cpu_smt_possible(void)
443{
444 return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
445 cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
446}
447EXPORT_SYMBOL_GPL(cpu_smt_possible);
0cc3cd21
TG
448#else
449static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
450#endif
451
4dddfb5f
PZ
452static inline enum cpuhp_state
453cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
454{
455 enum cpuhp_state prev_state = st->state;
456
457 st->rollback = false;
458 st->last = NULL;
459
460 st->target = target;
461 st->single = false;
462 st->bringup = st->state < target;
463
464 return prev_state;
465}
466
467static inline void
468cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
469{
470 st->rollback = true;
471
472 /*
473 * If we have st->last we need to undo partial multi_instance of this
474 * state first. Otherwise start undo at the previous state.
475 */
476 if (!st->last) {
477 if (st->bringup)
478 st->state--;
479 else
480 st->state++;
481 }
482
483 st->target = prev_state;
484 st->bringup = !st->bringup;
485}
486
487/* Regular hotplug invocation of the AP hotplug thread */
488static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
489{
490 if (!st->single && st->state == st->target)
491 return;
492
493 st->result = 0;
494 /*
495 * Make sure the above stores are visible before should_run becomes
496 * true. Paired with the mb() above in cpuhp_thread_fun()
497 */
498 smp_mb();
499 st->should_run = true;
500 wake_up_process(st->thread);
5ebe7742 501 wait_for_ap_thread(st, st->bringup);
4dddfb5f
PZ
502}
503
504static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
505{
506 enum cpuhp_state prev_state;
507 int ret;
508
509 prev_state = cpuhp_set_state(st, target);
510 __cpuhp_kick_ap(st);
511 if ((ret = st->result)) {
512 cpuhp_reset_state(st, prev_state);
513 __cpuhp_kick_ap(st);
514 }
515
516 return ret;
517}
9cd4f1a4 518
8df3e07e
TG
519static int bringup_wait_for_ap(unsigned int cpu)
520{
521 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
522
9cd4f1a4 523 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
5ebe7742 524 wait_for_ap_thread(st, true);
dea1d0f5
TG
525 if (WARN_ON_ONCE((!cpu_online(cpu))))
526 return -ECANCELED;
9cd4f1a4 527
45178ac0 528 /* Unpark the hotplug thread of the target cpu */
9cd4f1a4
TG
529 kthread_unpark(st->thread);
530
0cc3cd21
TG
531 /*
532 * SMT soft disabling on X86 requires to bring the CPU out of the
533 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
f5602011 534 * CPU marked itself as booted_once in notify_cpu_starting() so the
0cc3cd21
TG
535 * cpu_smt_allowed() check will now return false if this is not the
536 * primary sibling.
537 */
538 if (!cpu_smt_allowed(cpu))
539 return -ECANCELED;
540
4dddfb5f
PZ
541 if (st->target <= CPUHP_AP_ONLINE_IDLE)
542 return 0;
543
544 return cpuhp_kick_ap(st, st->target);
8df3e07e
TG
545}
546
ba997462
TG
547static int bringup_cpu(unsigned int cpu)
548{
549 struct task_struct *idle = idle_thread_get(cpu);
550 int ret;
551
aa877175
BO
552 /*
553 * Some architectures have to walk the irq descriptors to
554 * setup the vector space for the cpu which comes online.
555 * Prevent irq alloc/free across the bringup.
556 */
557 irq_lock_sparse();
558
ba997462
TG
559 /* Arch-specific enabling code. */
560 ret = __cpu_up(cpu, idle);
aa877175 561 irq_unlock_sparse();
530e9b76 562 if (ret)
ba997462 563 return ret;
9cd4f1a4 564 return bringup_wait_for_ap(cpu);
ba997462
TG
565}
566
2e1a3483
TG
567/*
568 * Hotplug state machine related functions
569 */
2e1a3483 570
a724632c 571static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
2e1a3483 572{
6fb86d97
MO
573 for (st->state--; st->state > st->target; st->state--)
574 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
2e1a3483
TG
575}
576
206b9235
TG
577static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
578{
579 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
580 return true;
581 /*
582 * When CPU hotplug is disabled, then taking the CPU down is not
583 * possible because takedown_cpu() and the architecture and
584 * subsystem specific mechanisms are not available. So the CPU
585 * which would be completely unplugged again needs to stay around
586 * in the current state.
587 */
588 return st->state <= CPUHP_BRINGUP_CPU;
589}
590
2e1a3483 591static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
a724632c 592 enum cpuhp_state target)
2e1a3483
TG
593{
594 enum cpuhp_state prev_state = st->state;
595 int ret = 0;
596
597 while (st->state < target) {
2e1a3483 598 st->state++;
96abb968 599 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
2e1a3483 600 if (ret) {
206b9235
TG
601 if (can_rollback_cpu(st)) {
602 st->target = prev_state;
603 undo_cpu_up(cpu, st);
604 }
2e1a3483
TG
605 break;
606 }
607 }
608 return ret;
609}
610
4cb28ced
TG
611/*
612 * The cpu hotplug threads manage the bringup and teardown of the cpus
613 */
614static void cpuhp_create(unsigned int cpu)
615{
616 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
617
5ebe7742
PZ
618 init_completion(&st->done_up);
619 init_completion(&st->done_down);
4cb28ced
TG
620}
621
622static int cpuhp_should_run(unsigned int cpu)
623{
624 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
625
626 return st->should_run;
627}
628
4cb28ced
TG
629/*
630 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
631 * callbacks when a state gets [un]installed at runtime.
4dddfb5f
PZ
632 *
633 * Each invocation of this function by the smpboot thread does a single AP
634 * state callback.
635 *
636 * It has 3 modes of operation:
637 * - single: runs st->cb_state
638 * - up: runs ++st->state, while st->state < st->target
639 * - down: runs st->state--, while st->state > st->target
640 *
641 * When complete or on error, should_run is cleared and the completion is fired.
4cb28ced
TG
642 */
643static void cpuhp_thread_fun(unsigned int cpu)
644{
645 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
4dddfb5f
PZ
646 bool bringup = st->bringup;
647 enum cpuhp_state state;
4cb28ced 648
f8b7530a
NU
649 if (WARN_ON_ONCE(!st->should_run))
650 return;
651
4cb28ced 652 /*
4dddfb5f
PZ
653 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
654 * that if we see ->should_run we also see the rest of the state.
4cb28ced
TG
655 */
656 smp_mb();
4cb28ced 657
cb92173d
PZ
658 /*
659 * The BP holds the hotplug lock, but we're now running on the AP,
660 * ensure that anybody asserting the lock is held, will actually find
661 * it so.
662 */
663 lockdep_acquire_cpus_lock();
5f4b55e1 664 cpuhp_lock_acquire(bringup);
4dddfb5f 665
a724632c 666 if (st->single) {
4dddfb5f
PZ
667 state = st->cb_state;
668 st->should_run = false;
669 } else {
670 if (bringup) {
671 st->state++;
672 state = st->state;
673 st->should_run = (st->state < st->target);
674 WARN_ON_ONCE(st->state > st->target);
4cb28ced 675 } else {
4dddfb5f
PZ
676 state = st->state;
677 st->state--;
678 st->should_run = (st->state > st->target);
679 WARN_ON_ONCE(st->state < st->target);
4cb28ced 680 }
4dddfb5f
PZ
681 }
682
683 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
684
4dddfb5f
PZ
685 if (cpuhp_is_atomic_state(state)) {
686 local_irq_disable();
687 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
688 local_irq_enable();
3b9d6da6 689
4dddfb5f
PZ
690 /*
691 * STARTING/DYING must not fail!
692 */
693 WARN_ON_ONCE(st->result);
4cb28ced 694 } else {
4dddfb5f
PZ
695 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
696 }
697
698 if (st->result) {
699 /*
700 * If we fail on a rollback, we're up a creek without no
701 * paddle, no way forward, no way back. We loose, thanks for
702 * playing.
703 */
704 WARN_ON_ONCE(st->rollback);
705 st->should_run = false;
4cb28ced 706 }
4dddfb5f 707
5f4b55e1 708 cpuhp_lock_release(bringup);
cb92173d 709 lockdep_release_cpus_lock();
4dddfb5f
PZ
710
711 if (!st->should_run)
5ebe7742 712 complete_ap_thread(st, bringup);
4cb28ced
TG
713}
714
715/* Invoke a single callback on a remote cpu */
a724632c 716static int
cf392d10
TG
717cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
718 struct hlist_node *node)
4cb28ced
TG
719{
720 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
4dddfb5f 721 int ret;
4cb28ced
TG
722
723 if (!cpu_online(cpu))
724 return 0;
725
5f4b55e1
PZ
726 cpuhp_lock_acquire(false);
727 cpuhp_lock_release(false);
728
729 cpuhp_lock_acquire(true);
730 cpuhp_lock_release(true);
49dfe2a6 731
6a4e2451
TG
732 /*
733 * If we are up and running, use the hotplug thread. For early calls
734 * we invoke the thread function directly.
735 */
736 if (!st->thread)
96abb968 737 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
6a4e2451 738
4dddfb5f
PZ
739 st->rollback = false;
740 st->last = NULL;
741
742 st->node = node;
743 st->bringup = bringup;
4cb28ced 744 st->cb_state = state;
a724632c 745 st->single = true;
a724632c 746
4dddfb5f 747 __cpuhp_kick_ap(st);
4cb28ced 748
4cb28ced 749 /*
4dddfb5f 750 * If we failed and did a partial, do a rollback.
4cb28ced 751 */
4dddfb5f
PZ
752 if ((ret = st->result) && st->last) {
753 st->rollback = true;
754 st->bringup = !bringup;
755
756 __cpuhp_kick_ap(st);
757 }
758
1f7c70d6
TG
759 /*
760 * Clean up the leftovers so the next hotplug operation wont use stale
761 * data.
762 */
763 st->node = st->last = NULL;
4dddfb5f 764 return ret;
1cf4f629
TG
765}
766
767static int cpuhp_kick_ap_work(unsigned int cpu)
768{
769 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
4dddfb5f
PZ
770 enum cpuhp_state prev_state = st->state;
771 int ret;
1cf4f629 772
5f4b55e1
PZ
773 cpuhp_lock_acquire(false);
774 cpuhp_lock_release(false);
775
776 cpuhp_lock_acquire(true);
777 cpuhp_lock_release(true);
4dddfb5f
PZ
778
779 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
780 ret = cpuhp_kick_ap(st, st->target);
781 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
782
783 return ret;
4cb28ced
TG
784}
785
786static struct smp_hotplug_thread cpuhp_threads = {
787 .store = &cpuhp_state.thread,
788 .create = &cpuhp_create,
789 .thread_should_run = cpuhp_should_run,
790 .thread_fn = cpuhp_thread_fun,
791 .thread_comm = "cpuhp/%u",
792 .selfparking = true,
793};
794
795void __init cpuhp_threads_init(void)
796{
797 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
798 kthread_unpark(this_cpu_read(cpuhp_state.thread));
799}
800
777c6e0d 801#ifdef CONFIG_HOTPLUG_CPU
e4cc2f87
AV
802/**
803 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
804 * @cpu: a CPU id
805 *
806 * This function walks all processes, finds a valid mm struct for each one and
807 * then clears a corresponding bit in mm's cpumask. While this all sounds
808 * trivial, there are various non-obvious corner cases, which this function
809 * tries to solve in a safe manner.
810 *
811 * Also note that the function uses a somewhat relaxed locking scheme, so it may
812 * be called only for an already offlined CPU.
813 */
cb79295e
AV
814void clear_tasks_mm_cpumask(int cpu)
815{
816 struct task_struct *p;
817
818 /*
819 * This function is called after the cpu is taken down and marked
820 * offline, so its not like new tasks will ever get this cpu set in
821 * their mm mask. -- Peter Zijlstra
822 * Thus, we may use rcu_read_lock() here, instead of grabbing
823 * full-fledged tasklist_lock.
824 */
e4cc2f87 825 WARN_ON(cpu_online(cpu));
cb79295e
AV
826 rcu_read_lock();
827 for_each_process(p) {
828 struct task_struct *t;
829
e4cc2f87
AV
830 /*
831 * Main thread might exit, but other threads may still have
832 * a valid mm. Find one.
833 */
cb79295e
AV
834 t = find_lock_task_mm(p);
835 if (!t)
836 continue;
837 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
838 task_unlock(t);
839 }
840 rcu_read_unlock();
841}
842
1da177e4 843/* Take this CPU down. */
71cf5aee 844static int take_cpu_down(void *_param)
1da177e4 845{
4baa0afc
TG
846 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
847 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
090e77c3 848 int err, cpu = smp_processor_id();
724a8688 849 int ret;
1da177e4 850
1da177e4
LT
851 /* Ensure this CPU doesn't handle any more interrupts. */
852 err = __cpu_disable();
853 if (err < 0)
f3705136 854 return err;
1da177e4 855
a724632c
TG
856 /*
857 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
858 * do this step again.
859 */
860 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
861 st->state--;
4baa0afc 862 /* Invoke the former CPU_DYING callbacks */
724a8688
PZ
863 for (; st->state > target; st->state--) {
864 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
865 /*
866 * DYING must not fail!
867 */
868 WARN_ON_ONCE(ret);
869 }
4baa0afc 870
52c063d1
TG
871 /* Give up timekeeping duties */
872 tick_handover_do_timer();
1b72d432
TG
873 /* Remove CPU from timer broadcasting */
874 tick_offline_cpu(cpu);
14e568e7 875 /* Park the stopper thread */
090e77c3 876 stop_machine_park(cpu);
f3705136 877 return 0;
1da177e4
LT
878}
879
98458172 880static int takedown_cpu(unsigned int cpu)
1da177e4 881{
e69aab13 882 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
98458172 883 int err;
1da177e4 884
2a58c527 885 /* Park the smpboot threads */
1cf4f629
TG
886 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
887
6acce3ef 888 /*
a8994181
TG
889 * Prevent irq alloc/free while the dying cpu reorganizes the
890 * interrupt affinities.
6acce3ef 891 */
a8994181 892 irq_lock_sparse();
6acce3ef 893
a8994181
TG
894 /*
895 * So now all preempt/rcu users must observe !cpu_active().
896 */
210e2133 897 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
04321587 898 if (err) {
3b9d6da6 899 /* CPU refused to die */
a8994181 900 irq_unlock_sparse();
3b9d6da6
SAS
901 /* Unpark the hotplug thread so we can rollback there */
902 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
98458172 903 return err;
8fa1d7d3 904 }
04321587 905 BUG_ON(cpu_online(cpu));
1da177e4 906
48c5ccae 907 /*
5b1ead68
BJ
908 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
909 * all runnable tasks from the CPU, there's only the idle task left now
48c5ccae 910 * that the migration thread is done doing the stop_machine thing.
51a96c77
PZ
911 *
912 * Wait for the stop thread to go away.
48c5ccae 913 */
5ebe7742 914 wait_for_ap_thread(st, false);
e69aab13 915 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1da177e4 916
a8994181
TG
917 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
918 irq_unlock_sparse();
919
345527b1 920 hotplug_cpu__broadcast_tick_pull(cpu);
1da177e4
LT
921 /* This actually kills the CPU. */
922 __cpu_die(cpu);
923
a49b116d 924 tick_cleanup_dead_cpu(cpu);
a58163d8 925 rcutree_migrate_callbacks(cpu);
98458172
TG
926 return 0;
927}
1da177e4 928
71f87b2f
TG
929static void cpuhp_complete_idle_dead(void *arg)
930{
931 struct cpuhp_cpu_state *st = arg;
932
5ebe7742 933 complete_ap_thread(st, false);
71f87b2f
TG
934}
935
e69aab13
TG
936void cpuhp_report_idle_dead(void)
937{
938 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
939
940 BUG_ON(st->state != CPUHP_AP_OFFLINE);
27d50c7e 941 rcu_report_dead(smp_processor_id());
71f87b2f
TG
942 st->state = CPUHP_AP_IDLE_DEAD;
943 /*
944 * We cannot call complete after rcu_report_dead() so we delegate it
945 * to an online cpu.
946 */
947 smp_call_function_single(cpumask_first(cpu_online_mask),
948 cpuhp_complete_idle_dead, st, 0);
e69aab13
TG
949}
950
4dddfb5f
PZ
951static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
952{
6fb86d97
MO
953 for (st->state++; st->state < st->target; st->state++)
954 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
4dddfb5f
PZ
955}
956
957static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
958 enum cpuhp_state target)
959{
960 enum cpuhp_state prev_state = st->state;
961 int ret = 0;
962
963 for (; st->state > target; st->state--) {
964 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
965 if (ret) {
966 st->target = prev_state;
69fa6eb7
TG
967 if (st->state < prev_state)
968 undo_cpu_down(cpu, st);
4dddfb5f
PZ
969 break;
970 }
971 }
972 return ret;
973}
cff7d378 974
98458172 975/* Requires cpu_add_remove_lock to be held */
af1f4045
TG
976static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
977 enum cpuhp_state target)
98458172 978{
cff7d378
TG
979 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
980 int prev_state, ret = 0;
98458172
TG
981
982 if (num_online_cpus() == 1)
983 return -EBUSY;
984
757c989b 985 if (!cpu_present(cpu))
98458172
TG
986 return -EINVAL;
987
8f553c49 988 cpus_write_lock();
98458172
TG
989
990 cpuhp_tasks_frozen = tasks_frozen;
991
4dddfb5f 992 prev_state = cpuhp_set_state(st, target);
1cf4f629
TG
993 /*
994 * If the current CPU state is in the range of the AP hotplug thread,
995 * then we need to kick the thread.
996 */
8df3e07e 997 if (st->state > CPUHP_TEARDOWN_CPU) {
4dddfb5f 998 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1cf4f629
TG
999 ret = cpuhp_kick_ap_work(cpu);
1000 /*
1001 * The AP side has done the error rollback already. Just
1002 * return the error code..
1003 */
1004 if (ret)
1005 goto out;
1006
1007 /*
1008 * We might have stopped still in the range of the AP hotplug
1009 * thread. Nothing to do anymore.
1010 */
8df3e07e 1011 if (st->state > CPUHP_TEARDOWN_CPU)
1cf4f629 1012 goto out;
4dddfb5f
PZ
1013
1014 st->target = target;
1cf4f629
TG
1015 }
1016 /*
8df3e07e 1017 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1cf4f629
TG
1018 * to do the further cleanups.
1019 */
a724632c 1020 ret = cpuhp_down_callbacks(cpu, st, target);
69fa6eb7 1021 if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
4dddfb5f
PZ
1022 cpuhp_reset_state(st, prev_state);
1023 __cpuhp_kick_ap(st);
3b9d6da6 1024 }
98458172 1025
1cf4f629 1026out:
8f553c49 1027 cpus_write_unlock();
941154bd
TG
1028 /*
1029 * Do post unplug cleanup. This is still protected against
1030 * concurrent CPU hotplug via cpu_add_remove_lock.
1031 */
1032 lockup_detector_cleanup();
a74cfffb 1033 arch_smt_update();
cff7d378 1034 return ret;
e3920fb4
RW
1035}
1036
cc1fe215
TG
1037static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1038{
1039 if (cpu_hotplug_disabled)
1040 return -EBUSY;
1041 return _cpu_down(cpu, 0, target);
1042}
1043
af1f4045 1044static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
e3920fb4 1045{
9ea09af3 1046 int err;
e3920fb4 1047
d221938c 1048 cpu_maps_update_begin();
cc1fe215 1049 err = cpu_down_maps_locked(cpu, target);
d221938c 1050 cpu_maps_update_done();
1da177e4
LT
1051 return err;
1052}
4dddfb5f 1053
af1f4045
TG
1054int cpu_down(unsigned int cpu)
1055{
1056 return do_cpu_down(cpu, CPUHP_OFFLINE);
1057}
b62b8ef9 1058EXPORT_SYMBOL(cpu_down);
4dddfb5f 1059
93ef1429
QY
1060int remove_cpu(unsigned int cpu)
1061{
1062 int ret;
1063
1064 lock_device_hotplug();
1065 ret = device_offline(get_cpu_device(cpu));
1066 unlock_device_hotplug();
1067
1068 return ret;
1069}
1070EXPORT_SYMBOL_GPL(remove_cpu);
1071
0441a559
QY
1072void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
1073{
1074 unsigned int cpu;
1075 int error;
1076
1077 cpu_maps_update_begin();
1078
1079 /*
1080 * Make certain the cpu I'm about to reboot on is online.
1081 *
1082 * This is inline to what migrate_to_reboot_cpu() already do.
1083 */
1084 if (!cpu_online(primary_cpu))
1085 primary_cpu = cpumask_first(cpu_online_mask);
1086
1087 for_each_online_cpu(cpu) {
1088 if (cpu == primary_cpu)
1089 continue;
1090
1091 error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1092 if (error) {
1093 pr_err("Failed to offline CPU%d - error=%d",
1094 cpu, error);
1095 break;
1096 }
1097 }
1098
1099 /*
1100 * Ensure all but the reboot CPU are offline.
1101 */
1102 BUG_ON(num_online_cpus() > 1);
1103
1104 /*
1105 * Make sure the CPUs won't be enabled by someone else after this
1106 * point. Kexec will reboot to a new kernel shortly resetting
1107 * everything along the way.
1108 */
1109 cpu_hotplug_disabled++;
1110
1111 cpu_maps_update_done();
1112}
1113
4dddfb5f
PZ
1114#else
1115#define takedown_cpu NULL
1da177e4
LT
1116#endif /*CONFIG_HOTPLUG_CPU*/
1117
4baa0afc 1118/**
ee1e714b 1119 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
4baa0afc
TG
1120 * @cpu: cpu that just started
1121 *
4baa0afc
TG
1122 * It must be called by the arch code on the new cpu, before the new cpu
1123 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1124 */
1125void notify_cpu_starting(unsigned int cpu)
1126{
1127 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1128 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
724a8688 1129 int ret;
4baa0afc 1130
0c6d4576 1131 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
e797bda3 1132 cpumask_set_cpu(cpu, &cpus_booted_once_mask);
4baa0afc 1133 while (st->state < target) {
4baa0afc 1134 st->state++;
724a8688
PZ
1135 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1136 /*
1137 * STARTING must not fail!
1138 */
1139 WARN_ON_ONCE(ret);
4baa0afc
TG
1140 }
1141}
1142
949338e3 1143/*
9cd4f1a4 1144 * Called from the idle task. Wake up the controlling task which brings the
45178ac0
PZ
1145 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1146 * online bringup to the hotplug thread.
949338e3 1147 */
8df3e07e 1148void cpuhp_online_idle(enum cpuhp_state state)
949338e3 1149{
8df3e07e 1150 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
8df3e07e
TG
1151
1152 /* Happens for the boot cpu */
1153 if (state != CPUHP_AP_ONLINE_IDLE)
1154 return;
1155
45178ac0
PZ
1156 /*
1157 * Unpart the stopper thread before we start the idle loop (and start
1158 * scheduling); this ensures the stopper task is always available.
1159 */
1160 stop_machine_unpark(smp_processor_id());
1161
8df3e07e 1162 st->state = CPUHP_AP_ONLINE_IDLE;
5ebe7742 1163 complete_ap_thread(st, true);
949338e3
TG
1164}
1165
e3920fb4 1166/* Requires cpu_add_remove_lock to be held */
af1f4045 1167static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1da177e4 1168{
cff7d378 1169 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
3bb5d2ee 1170 struct task_struct *idle;
2e1a3483 1171 int ret = 0;
1da177e4 1172
8f553c49 1173 cpus_write_lock();
38498a67 1174
757c989b 1175 if (!cpu_present(cpu)) {
5e5041f3
YI
1176 ret = -EINVAL;
1177 goto out;
1178 }
1179
757c989b
TG
1180 /*
1181 * The caller of do_cpu_up might have raced with another
1182 * caller. Ignore it for now.
1183 */
1184 if (st->state >= target)
38498a67 1185 goto out;
757c989b
TG
1186
1187 if (st->state == CPUHP_OFFLINE) {
1188 /* Let it fail before we try to bring the cpu up */
1189 idle = idle_thread_get(cpu);
1190 if (IS_ERR(idle)) {
1191 ret = PTR_ERR(idle);
1192 goto out;
1193 }
3bb5d2ee 1194 }
38498a67 1195
ba997462
TG
1196 cpuhp_tasks_frozen = tasks_frozen;
1197
4dddfb5f 1198 cpuhp_set_state(st, target);
1cf4f629
TG
1199 /*
1200 * If the current CPU state is in the range of the AP hotplug thread,
1201 * then we need to kick the thread once more.
1202 */
8df3e07e 1203 if (st->state > CPUHP_BRINGUP_CPU) {
1cf4f629
TG
1204 ret = cpuhp_kick_ap_work(cpu);
1205 /*
1206 * The AP side has done the error rollback already. Just
1207 * return the error code..
1208 */
1209 if (ret)
1210 goto out;
1211 }
1212
1213 /*
1214 * Try to reach the target state. We max out on the BP at
8df3e07e 1215 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1cf4f629
TG
1216 * responsible for bringing it up to the target state.
1217 */
8df3e07e 1218 target = min((int)target, CPUHP_BRINGUP_CPU);
a724632c 1219 ret = cpuhp_up_callbacks(cpu, st, target);
38498a67 1220out:
8f553c49 1221 cpus_write_unlock();
a74cfffb 1222 arch_smt_update();
e3920fb4
RW
1223 return ret;
1224}
1225
af1f4045 1226static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
e3920fb4
RW
1227{
1228 int err = 0;
cf23422b 1229
e0b582ec 1230 if (!cpu_possible(cpu)) {
84117da5
FF
1231 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1232 cpu);
87d5e023 1233#if defined(CONFIG_IA64)
84117da5 1234 pr_err("please check additional_cpus= boot parameter\n");
73e753a5
KH
1235#endif
1236 return -EINVAL;
1237 }
e3920fb4 1238
01b0f197
TK
1239 err = try_online_node(cpu_to_node(cpu));
1240 if (err)
1241 return err;
cf23422b 1242
d221938c 1243 cpu_maps_update_begin();
e761b772
MK
1244
1245 if (cpu_hotplug_disabled) {
e3920fb4 1246 err = -EBUSY;
e761b772
MK
1247 goto out;
1248 }
05736e4a
TG
1249 if (!cpu_smt_allowed(cpu)) {
1250 err = -EPERM;
1251 goto out;
1252 }
e761b772 1253
af1f4045 1254 err = _cpu_up(cpu, 0, target);
e761b772 1255out:
d221938c 1256 cpu_maps_update_done();
e3920fb4
RW
1257 return err;
1258}
af1f4045
TG
1259
1260int cpu_up(unsigned int cpu)
1261{
1262 return do_cpu_up(cpu, CPUHP_ONLINE);
1263}
a513f6ba 1264EXPORT_SYMBOL_GPL(cpu_up);
e3920fb4 1265
93ef1429
QY
1266int add_cpu(unsigned int cpu)
1267{
1268 int ret;
1269
1270 lock_device_hotplug();
1271 ret = device_online(get_cpu_device(cpu));
1272 unlock_device_hotplug();
1273
1274 return ret;
1275}
1276EXPORT_SYMBOL_GPL(add_cpu);
1277
d720f986
QY
1278/**
1279 * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1280 * @sleep_cpu: The cpu we hibernated on and should be brought up.
1281 *
1282 * On some architectures like arm64, we can hibernate on any CPU, but on
1283 * wake up the CPU we hibernated on might be offline as a side effect of
1284 * using maxcpus= for example.
1285 */
1286int bringup_hibernate_cpu(unsigned int sleep_cpu)
1287{
1288 int ret;
1289
1290 if (!cpu_online(sleep_cpu)) {
1291 pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1292 ret = cpu_up(sleep_cpu);
1293 if (ret) {
1294 pr_err("Failed to bring hibernate-CPU up!\n");
1295 return ret;
1296 }
1297 }
1298 return 0;
1299}
1300
f3de4be9 1301#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 1302static cpumask_var_t frozen_cpus;
e3920fb4 1303
d391e552 1304int freeze_secondary_cpus(int primary)
e3920fb4 1305{
d391e552 1306 int cpu, error = 0;
e3920fb4 1307
d221938c 1308 cpu_maps_update_begin();
9ca12ac0 1309 if (primary == -1) {
d391e552 1310 primary = cpumask_first(cpu_online_mask);
9ca12ac0
NP
1311 if (!housekeeping_cpu(primary, HK_FLAG_TIMER))
1312 primary = housekeeping_any_cpu(HK_FLAG_TIMER);
1313 } else {
1314 if (!cpu_online(primary))
1315 primary = cpumask_first(cpu_online_mask);
1316 }
1317
9ee349ad
XF
1318 /*
1319 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb4
RW
1320 * with the userspace trying to use the CPU hotplug at the same time
1321 */
e0b582ec 1322 cpumask_clear(frozen_cpus);
6ad4c188 1323
84117da5 1324 pr_info("Disabling non-boot CPUs ...\n");
e3920fb4 1325 for_each_online_cpu(cpu) {
d391e552 1326 if (cpu == primary)
e3920fb4 1327 continue;
a66d955e
PK
1328
1329 if (pm_wakeup_pending()) {
1330 pr_info("Wakeup pending. Abort CPU freeze\n");
1331 error = -EBUSY;
1332 break;
1333 }
1334
bb3632c6 1335 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
af1f4045 1336 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
bb3632c6 1337 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
feae3203 1338 if (!error)
e0b582ec 1339 cpumask_set_cpu(cpu, frozen_cpus);
feae3203 1340 else {
84117da5 1341 pr_err("Error taking CPU%d down: %d\n", cpu, error);
e3920fb4
RW
1342 break;
1343 }
1344 }
86886e55 1345
89af7ba5 1346 if (!error)
e3920fb4 1347 BUG_ON(num_online_cpus() > 1);
89af7ba5 1348 else
84117da5 1349 pr_err("Non-boot CPUs are not disabled\n");
89af7ba5
VK
1350
1351 /*
1352 * Make sure the CPUs won't be enabled by someone else. We need to do
1353 * this even in case of failure as all disable_nonboot_cpus() users are
1354 * supposed to do enable_nonboot_cpus() on the failure path.
1355 */
1356 cpu_hotplug_disabled++;
1357
d221938c 1358 cpu_maps_update_done();
e3920fb4
RW
1359 return error;
1360}
1361
d0af9eed
SS
1362void __weak arch_enable_nonboot_cpus_begin(void)
1363{
1364}
1365
1366void __weak arch_enable_nonboot_cpus_end(void)
1367{
1368}
1369
71cf5aee 1370void enable_nonboot_cpus(void)
e3920fb4
RW
1371{
1372 int cpu, error;
1373
1374 /* Allow everyone to use the CPU hotplug again */
d221938c 1375 cpu_maps_update_begin();
01b41159 1376 __cpu_hotplug_enable();
e0b582ec 1377 if (cpumask_empty(frozen_cpus))
1d64b9cb 1378 goto out;
e3920fb4 1379
84117da5 1380 pr_info("Enabling non-boot CPUs ...\n");
d0af9eed
SS
1381
1382 arch_enable_nonboot_cpus_begin();
1383
e0b582ec 1384 for_each_cpu(cpu, frozen_cpus) {
bb3632c6 1385 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
af1f4045 1386 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
bb3632c6 1387 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
e3920fb4 1388 if (!error) {
84117da5 1389 pr_info("CPU%d is up\n", cpu);
e3920fb4
RW
1390 continue;
1391 }
84117da5 1392 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
e3920fb4 1393 }
d0af9eed
SS
1394
1395 arch_enable_nonboot_cpus_end();
1396
e0b582ec 1397 cpumask_clear(frozen_cpus);
1d64b9cb 1398out:
d221938c 1399 cpu_maps_update_done();
1da177e4 1400}
e0b582ec 1401
d7268a31 1402static int __init alloc_frozen_cpus(void)
e0b582ec
RR
1403{
1404 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1405 return -ENOMEM;
1406 return 0;
1407}
1408core_initcall(alloc_frozen_cpus);
79cfbdfa 1409
79cfbdfa
SB
1410/*
1411 * When callbacks for CPU hotplug notifications are being executed, we must
1412 * ensure that the state of the system with respect to the tasks being frozen
1413 * or not, as reported by the notification, remains unchanged *throughout the
1414 * duration* of the execution of the callbacks.
1415 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1416 *
1417 * This synchronization is implemented by mutually excluding regular CPU
1418 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1419 * Hibernate notifications.
1420 */
1421static int
1422cpu_hotplug_pm_callback(struct notifier_block *nb,
1423 unsigned long action, void *ptr)
1424{
1425 switch (action) {
1426
1427 case PM_SUSPEND_PREPARE:
1428 case PM_HIBERNATION_PREPARE:
16e53dbf 1429 cpu_hotplug_disable();
79cfbdfa
SB
1430 break;
1431
1432 case PM_POST_SUSPEND:
1433 case PM_POST_HIBERNATION:
16e53dbf 1434 cpu_hotplug_enable();
79cfbdfa
SB
1435 break;
1436
1437 default:
1438 return NOTIFY_DONE;
1439 }
1440
1441 return NOTIFY_OK;
1442}
1443
1444
d7268a31 1445static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa 1446{
6e32d479
FY
1447 /*
1448 * cpu_hotplug_pm_callback has higher priority than x86
1449 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1450 * to disable cpu hotplug to avoid cpu hotplug race.
1451 */
79cfbdfa
SB
1452 pm_notifier(cpu_hotplug_pm_callback, 0);
1453 return 0;
1454}
1455core_initcall(cpu_hotplug_pm_sync_init);
1456
f3de4be9 1457#endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec 1458
8ce371f9
PZ
1459int __boot_cpu_id;
1460
68f4f1ec 1461#endif /* CONFIG_SMP */
b8d317d1 1462
cff7d378 1463/* Boot processor state steps */
17a2f1ce 1464static struct cpuhp_step cpuhp_hp_states[] = {
cff7d378
TG
1465 [CPUHP_OFFLINE] = {
1466 .name = "offline",
3c1627e9
TG
1467 .startup.single = NULL,
1468 .teardown.single = NULL,
cff7d378
TG
1469 },
1470#ifdef CONFIG_SMP
1471 [CPUHP_CREATE_THREADS]= {
677f6646 1472 .name = "threads:prepare",
3c1627e9
TG
1473 .startup.single = smpboot_create_threads,
1474 .teardown.single = NULL,
757c989b 1475 .cant_stop = true,
cff7d378 1476 },
00e16c3d 1477 [CPUHP_PERF_PREPARE] = {
3c1627e9
TG
1478 .name = "perf:prepare",
1479 .startup.single = perf_event_init_cpu,
1480 .teardown.single = perf_event_exit_cpu,
00e16c3d 1481 },
7ee681b2 1482 [CPUHP_WORKQUEUE_PREP] = {
3c1627e9
TG
1483 .name = "workqueue:prepare",
1484 .startup.single = workqueue_prepare_cpu,
1485 .teardown.single = NULL,
7ee681b2 1486 },
27590dc1 1487 [CPUHP_HRTIMERS_PREPARE] = {
3c1627e9
TG
1488 .name = "hrtimers:prepare",
1489 .startup.single = hrtimers_prepare_cpu,
1490 .teardown.single = hrtimers_dead_cpu,
27590dc1 1491 },
31487f83 1492 [CPUHP_SMPCFD_PREPARE] = {
677f6646 1493 .name = "smpcfd:prepare",
3c1627e9
TG
1494 .startup.single = smpcfd_prepare_cpu,
1495 .teardown.single = smpcfd_dead_cpu,
31487f83 1496 },
e6d4989a
RW
1497 [CPUHP_RELAY_PREPARE] = {
1498 .name = "relay:prepare",
1499 .startup.single = relay_prepare_cpu,
1500 .teardown.single = NULL,
1501 },
6731d4f1
SAS
1502 [CPUHP_SLAB_PREPARE] = {
1503 .name = "slab:prepare",
1504 .startup.single = slab_prepare_cpu,
1505 .teardown.single = slab_dead_cpu,
31487f83 1506 },
4df83742 1507 [CPUHP_RCUTREE_PREP] = {
677f6646 1508 .name = "RCU/tree:prepare",
3c1627e9
TG
1509 .startup.single = rcutree_prepare_cpu,
1510 .teardown.single = rcutree_dead_cpu,
4df83742 1511 },
4fae16df
RC
1512 /*
1513 * On the tear-down path, timers_dead_cpu() must be invoked
1514 * before blk_mq_queue_reinit_notify() from notify_dead(),
1515 * otherwise a RCU stall occurs.
1516 */
26456f87 1517 [CPUHP_TIMERS_PREPARE] = {
d018031f 1518 .name = "timers:prepare",
26456f87 1519 .startup.single = timers_prepare_cpu,
3c1627e9 1520 .teardown.single = timers_dead_cpu,
4fae16df 1521 },
d10ef6f9 1522 /* Kicks the plugged cpu into life */
cff7d378
TG
1523 [CPUHP_BRINGUP_CPU] = {
1524 .name = "cpu:bringup",
3c1627e9
TG
1525 .startup.single = bringup_cpu,
1526 .teardown.single = NULL,
757c989b 1527 .cant_stop = true,
4baa0afc 1528 },
d10ef6f9
TG
1529 /* Final state before CPU kills itself */
1530 [CPUHP_AP_IDLE_DEAD] = {
1531 .name = "idle:dead",
1532 },
1533 /*
1534 * Last state before CPU enters the idle loop to die. Transient state
1535 * for synchronization.
1536 */
1537 [CPUHP_AP_OFFLINE] = {
1538 .name = "ap:offline",
1539 .cant_stop = true,
1540 },
9cf7243d
TG
1541 /* First state is scheduler control. Interrupts are disabled */
1542 [CPUHP_AP_SCHED_STARTING] = {
1543 .name = "sched:starting",
3c1627e9
TG
1544 .startup.single = sched_cpu_starting,
1545 .teardown.single = sched_cpu_dying,
9cf7243d 1546 },
4df83742 1547 [CPUHP_AP_RCUTREE_DYING] = {
677f6646 1548 .name = "RCU/tree:dying",
3c1627e9
TG
1549 .startup.single = NULL,
1550 .teardown.single = rcutree_dying_cpu,
4baa0afc 1551 },
46febd37
LJ
1552 [CPUHP_AP_SMPCFD_DYING] = {
1553 .name = "smpcfd:dying",
1554 .startup.single = NULL,
1555 .teardown.single = smpcfd_dying_cpu,
1556 },
d10ef6f9
TG
1557 /* Entry state on starting. Interrupts enabled from here on. Transient
1558 * state for synchronsization */
1559 [CPUHP_AP_ONLINE] = {
1560 .name = "ap:online",
1561 },
17a2f1ce
LJ
1562 /*
1563 * Handled on controll processor until the plugged processor manages
1564 * this itself.
1565 */
1566 [CPUHP_TEARDOWN_CPU] = {
1567 .name = "cpu:teardown",
1568 .startup.single = NULL,
1569 .teardown.single = takedown_cpu,
1570 .cant_stop = true,
1571 },
d10ef6f9 1572 /* Handle smpboot threads park/unpark */
1cf4f629 1573 [CPUHP_AP_SMPBOOT_THREADS] = {
677f6646 1574 .name = "smpboot/threads:online",
3c1627e9 1575 .startup.single = smpboot_unpark_threads,
c4de6569 1576 .teardown.single = smpboot_park_threads,
1cf4f629 1577 },
c5cb83bb
TG
1578 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1579 .name = "irq/affinity:online",
1580 .startup.single = irq_affinity_online_cpu,
1581 .teardown.single = NULL,
1582 },
00e16c3d 1583 [CPUHP_AP_PERF_ONLINE] = {
3c1627e9
TG
1584 .name = "perf:online",
1585 .startup.single = perf_event_init_cpu,
1586 .teardown.single = perf_event_exit_cpu,
00e16c3d 1587 },
9cf57731
PZ
1588 [CPUHP_AP_WATCHDOG_ONLINE] = {
1589 .name = "lockup_detector:online",
1590 .startup.single = lockup_detector_online_cpu,
1591 .teardown.single = lockup_detector_offline_cpu,
1592 },
7ee681b2 1593 [CPUHP_AP_WORKQUEUE_ONLINE] = {
3c1627e9
TG
1594 .name = "workqueue:online",
1595 .startup.single = workqueue_online_cpu,
1596 .teardown.single = workqueue_offline_cpu,
7ee681b2 1597 },
4df83742 1598 [CPUHP_AP_RCUTREE_ONLINE] = {
677f6646 1599 .name = "RCU/tree:online",
3c1627e9
TG
1600 .startup.single = rcutree_online_cpu,
1601 .teardown.single = rcutree_offline_cpu,
4df83742 1602 },
4baa0afc 1603#endif
d10ef6f9
TG
1604 /*
1605 * The dynamically registered state space is here
1606 */
1607
aaddd7d1
TG
1608#ifdef CONFIG_SMP
1609 /* Last state is scheduler control setting the cpu active */
1610 [CPUHP_AP_ACTIVE] = {
1611 .name = "sched:active",
3c1627e9
TG
1612 .startup.single = sched_cpu_activate,
1613 .teardown.single = sched_cpu_deactivate,
aaddd7d1
TG
1614 },
1615#endif
1616
d10ef6f9 1617 /* CPU is fully up and running. */
4baa0afc
TG
1618 [CPUHP_ONLINE] = {
1619 .name = "online",
3c1627e9
TG
1620 .startup.single = NULL,
1621 .teardown.single = NULL,
4baa0afc
TG
1622 },
1623};
1624
5b7aa87e
TG
1625/* Sanity check for callbacks */
1626static int cpuhp_cb_check(enum cpuhp_state state)
1627{
1628 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1629 return -EINVAL;
1630 return 0;
1631}
1632
dc280d93
TG
1633/*
1634 * Returns a free for dynamic slot assignment of the Online state. The states
1635 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1636 * by having no name assigned.
1637 */
1638static int cpuhp_reserve_state(enum cpuhp_state state)
1639{
4205e478
TG
1640 enum cpuhp_state i, end;
1641 struct cpuhp_step *step;
dc280d93 1642
4205e478
TG
1643 switch (state) {
1644 case CPUHP_AP_ONLINE_DYN:
17a2f1ce 1645 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
4205e478
TG
1646 end = CPUHP_AP_ONLINE_DYN_END;
1647 break;
1648 case CPUHP_BP_PREPARE_DYN:
17a2f1ce 1649 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
4205e478
TG
1650 end = CPUHP_BP_PREPARE_DYN_END;
1651 break;
1652 default:
1653 return -EINVAL;
1654 }
1655
1656 for (i = state; i <= end; i++, step++) {
1657 if (!step->name)
dc280d93
TG
1658 return i;
1659 }
1660 WARN(1, "No more dynamic states available for CPU hotplug\n");
1661 return -ENOSPC;
1662}
1663
1664static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1665 int (*startup)(unsigned int cpu),
1666 int (*teardown)(unsigned int cpu),
1667 bool multi_instance)
5b7aa87e
TG
1668{
1669 /* (Un)Install the callbacks for further cpu hotplug operations */
1670 struct cpuhp_step *sp;
dc280d93 1671 int ret = 0;
5b7aa87e 1672
0c96b273
EB
1673 /*
1674 * If name is NULL, then the state gets removed.
1675 *
1676 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1677 * the first allocation from these dynamic ranges, so the removal
1678 * would trigger a new allocation and clear the wrong (already
1679 * empty) state, leaving the callbacks of the to be cleared state
1680 * dangling, which causes wreckage on the next hotplug operation.
1681 */
1682 if (name && (state == CPUHP_AP_ONLINE_DYN ||
1683 state == CPUHP_BP_PREPARE_DYN)) {
dc280d93
TG
1684 ret = cpuhp_reserve_state(state);
1685 if (ret < 0)
dc434e05 1686 return ret;
dc280d93
TG
1687 state = ret;
1688 }
5b7aa87e 1689 sp = cpuhp_get_step(state);
dc434e05
SAS
1690 if (name && sp->name)
1691 return -EBUSY;
1692
3c1627e9
TG
1693 sp->startup.single = startup;
1694 sp->teardown.single = teardown;
5b7aa87e 1695 sp->name = name;
cf392d10
TG
1696 sp->multi_instance = multi_instance;
1697 INIT_HLIST_HEAD(&sp->list);
dc280d93 1698 return ret;
5b7aa87e
TG
1699}
1700
1701static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1702{
3c1627e9 1703 return cpuhp_get_step(state)->teardown.single;
5b7aa87e
TG
1704}
1705
5b7aa87e
TG
1706/*
1707 * Call the startup/teardown function for a step either on the AP or
1708 * on the current CPU.
1709 */
cf392d10
TG
1710static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1711 struct hlist_node *node)
5b7aa87e 1712{
a724632c 1713 struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e
TG
1714 int ret;
1715
4dddfb5f
PZ
1716 /*
1717 * If there's nothing to do, we done.
1718 * Relies on the union for multi_instance.
1719 */
3c1627e9
TG
1720 if ((bringup && !sp->startup.single) ||
1721 (!bringup && !sp->teardown.single))
5b7aa87e 1722 return 0;
5b7aa87e
TG
1723 /*
1724 * The non AP bound callbacks can fail on bringup. On teardown
1725 * e.g. module removal we crash for now.
1726 */
1cf4f629
TG
1727#ifdef CONFIG_SMP
1728 if (cpuhp_is_ap_state(state))
cf392d10 1729 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1cf4f629 1730 else
96abb968 1731 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1cf4f629 1732#else
96abb968 1733 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1cf4f629 1734#endif
5b7aa87e
TG
1735 BUG_ON(ret && !bringup);
1736 return ret;
1737}
1738
1739/*
1740 * Called from __cpuhp_setup_state on a recoverable failure.
1741 *
1742 * Note: The teardown callbacks for rollback are not allowed to fail!
1743 */
1744static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
cf392d10 1745 struct hlist_node *node)
5b7aa87e
TG
1746{
1747 int cpu;
1748
5b7aa87e
TG
1749 /* Roll back the already executed steps on the other cpus */
1750 for_each_present_cpu(cpu) {
1751 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1752 int cpustate = st->state;
1753
1754 if (cpu >= failedcpu)
1755 break;
1756
1757 /* Did we invoke the startup call on that cpu ? */
1758 if (cpustate >= state)
cf392d10 1759 cpuhp_issue_call(cpu, state, false, node);
5b7aa87e
TG
1760 }
1761}
1762
9805c673
TG
1763int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1764 struct hlist_node *node,
1765 bool invoke)
cf392d10
TG
1766{
1767 struct cpuhp_step *sp;
1768 int cpu;
1769 int ret;
1770
9805c673
TG
1771 lockdep_assert_cpus_held();
1772
cf392d10
TG
1773 sp = cpuhp_get_step(state);
1774 if (sp->multi_instance == false)
1775 return -EINVAL;
1776
dc434e05 1777 mutex_lock(&cpuhp_state_mutex);
cf392d10 1778
3c1627e9 1779 if (!invoke || !sp->startup.multi)
cf392d10
TG
1780 goto add_node;
1781
1782 /*
1783 * Try to call the startup callback for each present cpu
1784 * depending on the hotplug state of the cpu.
1785 */
1786 for_each_present_cpu(cpu) {
1787 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1788 int cpustate = st->state;
1789
1790 if (cpustate < state)
1791 continue;
1792
1793 ret = cpuhp_issue_call(cpu, state, true, node);
1794 if (ret) {
3c1627e9 1795 if (sp->teardown.multi)
cf392d10 1796 cpuhp_rollback_install(cpu, state, node);
dc434e05 1797 goto unlock;
cf392d10
TG
1798 }
1799 }
1800add_node:
1801 ret = 0;
cf392d10 1802 hlist_add_head(node, &sp->list);
dc434e05 1803unlock:
cf392d10 1804 mutex_unlock(&cpuhp_state_mutex);
9805c673
TG
1805 return ret;
1806}
1807
1808int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1809 bool invoke)
1810{
1811 int ret;
1812
1813 cpus_read_lock();
1814 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
8f553c49 1815 cpus_read_unlock();
cf392d10
TG
1816 return ret;
1817}
1818EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1819
5b7aa87e 1820/**
71def423 1821 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
dc280d93
TG
1822 * @state: The state to setup
1823 * @invoke: If true, the startup function is invoked for cpus where
1824 * cpu state >= @state
1825 * @startup: startup callback function
1826 * @teardown: teardown callback function
1827 * @multi_instance: State is set up for multiple instances which get
1828 * added afterwards.
5b7aa87e 1829 *
71def423 1830 * The caller needs to hold cpus read locked while calling this function.
512f0980
BO
1831 * Returns:
1832 * On success:
1833 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1834 * 0 for all other states
1835 * On failure: proper (negative) error code
5b7aa87e 1836 */
71def423
SAS
1837int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1838 const char *name, bool invoke,
1839 int (*startup)(unsigned int cpu),
1840 int (*teardown)(unsigned int cpu),
1841 bool multi_instance)
5b7aa87e
TG
1842{
1843 int cpu, ret = 0;
b9d9d691 1844 bool dynstate;
5b7aa87e 1845
71def423
SAS
1846 lockdep_assert_cpus_held();
1847
5b7aa87e
TG
1848 if (cpuhp_cb_check(state) || !name)
1849 return -EINVAL;
1850
dc434e05 1851 mutex_lock(&cpuhp_state_mutex);
5b7aa87e 1852
dc280d93
TG
1853 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1854 multi_instance);
5b7aa87e 1855
b9d9d691
TG
1856 dynstate = state == CPUHP_AP_ONLINE_DYN;
1857 if (ret > 0 && dynstate) {
1858 state = ret;
1859 ret = 0;
1860 }
1861
dc280d93 1862 if (ret || !invoke || !startup)
5b7aa87e
TG
1863 goto out;
1864
1865 /*
1866 * Try to call the startup callback for each present cpu
1867 * depending on the hotplug state of the cpu.
1868 */
1869 for_each_present_cpu(cpu) {
1870 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1871 int cpustate = st->state;
1872
1873 if (cpustate < state)
1874 continue;
1875
cf392d10 1876 ret = cpuhp_issue_call(cpu, state, true, NULL);
5b7aa87e 1877 if (ret) {
a724632c 1878 if (teardown)
cf392d10
TG
1879 cpuhp_rollback_install(cpu, state, NULL);
1880 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
5b7aa87e
TG
1881 goto out;
1882 }
1883 }
1884out:
dc434e05 1885 mutex_unlock(&cpuhp_state_mutex);
dc280d93
TG
1886 /*
1887 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1888 * dynamically allocated state in case of success.
1889 */
b9d9d691 1890 if (!ret && dynstate)
5b7aa87e
TG
1891 return state;
1892 return ret;
1893}
71def423
SAS
1894EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1895
1896int __cpuhp_setup_state(enum cpuhp_state state,
1897 const char *name, bool invoke,
1898 int (*startup)(unsigned int cpu),
1899 int (*teardown)(unsigned int cpu),
1900 bool multi_instance)
1901{
1902 int ret;
1903
1904 cpus_read_lock();
1905 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1906 teardown, multi_instance);
1907 cpus_read_unlock();
1908 return ret;
1909}
5b7aa87e
TG
1910EXPORT_SYMBOL(__cpuhp_setup_state);
1911
cf392d10
TG
1912int __cpuhp_state_remove_instance(enum cpuhp_state state,
1913 struct hlist_node *node, bool invoke)
1914{
1915 struct cpuhp_step *sp = cpuhp_get_step(state);
1916 int cpu;
1917
1918 BUG_ON(cpuhp_cb_check(state));
1919
1920 if (!sp->multi_instance)
1921 return -EINVAL;
1922
8f553c49 1923 cpus_read_lock();
dc434e05
SAS
1924 mutex_lock(&cpuhp_state_mutex);
1925
cf392d10
TG
1926 if (!invoke || !cpuhp_get_teardown_cb(state))
1927 goto remove;
1928 /*
1929 * Call the teardown callback for each present cpu depending
1930 * on the hotplug state of the cpu. This function is not
1931 * allowed to fail currently!
1932 */
1933 for_each_present_cpu(cpu) {
1934 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1935 int cpustate = st->state;
1936
1937 if (cpustate >= state)
1938 cpuhp_issue_call(cpu, state, false, node);
1939 }
1940
1941remove:
cf392d10
TG
1942 hlist_del(node);
1943 mutex_unlock(&cpuhp_state_mutex);
8f553c49 1944 cpus_read_unlock();
cf392d10
TG
1945
1946 return 0;
1947}
1948EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
dc434e05 1949
5b7aa87e 1950/**
71def423 1951 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
5b7aa87e
TG
1952 * @state: The state to remove
1953 * @invoke: If true, the teardown function is invoked for cpus where
1954 * cpu state >= @state
1955 *
71def423 1956 * The caller needs to hold cpus read locked while calling this function.
5b7aa87e
TG
1957 * The teardown callback is currently not allowed to fail. Think
1958 * about module removal!
1959 */
71def423 1960void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
5b7aa87e 1961{
cf392d10 1962 struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e
TG
1963 int cpu;
1964
1965 BUG_ON(cpuhp_cb_check(state));
1966
71def423 1967 lockdep_assert_cpus_held();
5b7aa87e 1968
dc434e05 1969 mutex_lock(&cpuhp_state_mutex);
cf392d10
TG
1970 if (sp->multi_instance) {
1971 WARN(!hlist_empty(&sp->list),
1972 "Error: Removing state %d which has instances left.\n",
1973 state);
1974 goto remove;
1975 }
1976
a724632c 1977 if (!invoke || !cpuhp_get_teardown_cb(state))
5b7aa87e
TG
1978 goto remove;
1979
1980 /*
1981 * Call the teardown callback for each present cpu depending
1982 * on the hotplug state of the cpu. This function is not
1983 * allowed to fail currently!
1984 */
1985 for_each_present_cpu(cpu) {
1986 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1987 int cpustate = st->state;
1988
1989 if (cpustate >= state)
cf392d10 1990 cpuhp_issue_call(cpu, state, false, NULL);
5b7aa87e
TG
1991 }
1992remove:
cf392d10 1993 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
dc434e05 1994 mutex_unlock(&cpuhp_state_mutex);
71def423
SAS
1995}
1996EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
1997
1998void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1999{
2000 cpus_read_lock();
2001 __cpuhp_remove_state_cpuslocked(state, invoke);
8f553c49 2002 cpus_read_unlock();
5b7aa87e
TG
2003}
2004EXPORT_SYMBOL(__cpuhp_remove_state);
2005
dc8d37ed
AB
2006#ifdef CONFIG_HOTPLUG_SMT
2007static void cpuhp_offline_cpu_device(unsigned int cpu)
2008{
2009 struct device *dev = get_cpu_device(cpu);
2010
2011 dev->offline = true;
2012 /* Tell user space about the state change */
2013 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2014}
2015
2016static void cpuhp_online_cpu_device(unsigned int cpu)
2017{
2018 struct device *dev = get_cpu_device(cpu);
2019
2020 dev->offline = false;
2021 /* Tell user space about the state change */
2022 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2023}
2024
2025int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2026{
2027 int cpu, ret = 0;
2028
2029 cpu_maps_update_begin();
2030 for_each_online_cpu(cpu) {
2031 if (topology_is_primary_thread(cpu))
2032 continue;
2033 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2034 if (ret)
2035 break;
2036 /*
2037 * As this needs to hold the cpu maps lock it's impossible
2038 * to call device_offline() because that ends up calling
2039 * cpu_down() which takes cpu maps lock. cpu maps lock
2040 * needs to be held as this might race against in kernel
2041 * abusers of the hotplug machinery (thermal management).
2042 *
2043 * So nothing would update device:offline state. That would
2044 * leave the sysfs entry stale and prevent onlining after
2045 * smt control has been changed to 'off' again. This is
2046 * called under the sysfs hotplug lock, so it is properly
2047 * serialized against the regular offline usage.
2048 */
2049 cpuhp_offline_cpu_device(cpu);
2050 }
2051 if (!ret)
2052 cpu_smt_control = ctrlval;
2053 cpu_maps_update_done();
2054 return ret;
2055}
2056
2057int cpuhp_smt_enable(void)
2058{
2059 int cpu, ret = 0;
2060
2061 cpu_maps_update_begin();
2062 cpu_smt_control = CPU_SMT_ENABLED;
2063 for_each_present_cpu(cpu) {
2064 /* Skip online CPUs and CPUs on offline nodes */
2065 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2066 continue;
2067 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2068 if (ret)
2069 break;
2070 /* See comment in cpuhp_smt_disable() */
2071 cpuhp_online_cpu_device(cpu);
2072 }
2073 cpu_maps_update_done();
2074 return ret;
2075}
2076#endif
2077
98f8cdce
TG
2078#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2079static ssize_t show_cpuhp_state(struct device *dev,
2080 struct device_attribute *attr, char *buf)
2081{
2082 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2083
2084 return sprintf(buf, "%d\n", st->state);
2085}
2086static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
2087
757c989b
TG
2088static ssize_t write_cpuhp_target(struct device *dev,
2089 struct device_attribute *attr,
2090 const char *buf, size_t count)
2091{
2092 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2093 struct cpuhp_step *sp;
2094 int target, ret;
2095
2096 ret = kstrtoint(buf, 10, &target);
2097 if (ret)
2098 return ret;
2099
2100#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2101 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2102 return -EINVAL;
2103#else
2104 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2105 return -EINVAL;
2106#endif
2107
2108 ret = lock_device_hotplug_sysfs();
2109 if (ret)
2110 return ret;
2111
2112 mutex_lock(&cpuhp_state_mutex);
2113 sp = cpuhp_get_step(target);
2114 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2115 mutex_unlock(&cpuhp_state_mutex);
2116 if (ret)
40da1b11 2117 goto out;
757c989b
TG
2118
2119 if (st->state < target)
2120 ret = do_cpu_up(dev->id, target);
2121 else
2122 ret = do_cpu_down(dev->id, target);
40da1b11 2123out:
757c989b
TG
2124 unlock_device_hotplug();
2125 return ret ? ret : count;
2126}
2127
98f8cdce
TG
2128static ssize_t show_cpuhp_target(struct device *dev,
2129 struct device_attribute *attr, char *buf)
2130{
2131 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2132
2133 return sprintf(buf, "%d\n", st->target);
2134}
757c989b 2135static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
98f8cdce 2136
1db49484
PZ
2137
2138static ssize_t write_cpuhp_fail(struct device *dev,
2139 struct device_attribute *attr,
2140 const char *buf, size_t count)
2141{
2142 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2143 struct cpuhp_step *sp;
2144 int fail, ret;
2145
2146 ret = kstrtoint(buf, 10, &fail);
2147 if (ret)
2148 return ret;
2149
33d4a5a7
ET
2150 if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
2151 return -EINVAL;
2152
1db49484
PZ
2153 /*
2154 * Cannot fail STARTING/DYING callbacks.
2155 */
2156 if (cpuhp_is_atomic_state(fail))
2157 return -EINVAL;
2158
2159 /*
2160 * Cannot fail anything that doesn't have callbacks.
2161 */
2162 mutex_lock(&cpuhp_state_mutex);
2163 sp = cpuhp_get_step(fail);
2164 if (!sp->startup.single && !sp->teardown.single)
2165 ret = -EINVAL;
2166 mutex_unlock(&cpuhp_state_mutex);
2167 if (ret)
2168 return ret;
2169
2170 st->fail = fail;
2171
2172 return count;
2173}
2174
2175static ssize_t show_cpuhp_fail(struct device *dev,
2176 struct device_attribute *attr, char *buf)
2177{
2178 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2179
2180 return sprintf(buf, "%d\n", st->fail);
2181}
2182
2183static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
2184
98f8cdce
TG
2185static struct attribute *cpuhp_cpu_attrs[] = {
2186 &dev_attr_state.attr,
2187 &dev_attr_target.attr,
1db49484 2188 &dev_attr_fail.attr,
98f8cdce
TG
2189 NULL
2190};
2191
993647a2 2192static const struct attribute_group cpuhp_cpu_attr_group = {
98f8cdce
TG
2193 .attrs = cpuhp_cpu_attrs,
2194 .name = "hotplug",
2195 NULL
2196};
2197
2198static ssize_t show_cpuhp_states(struct device *dev,
2199 struct device_attribute *attr, char *buf)
2200{
2201 ssize_t cur, res = 0;
2202 int i;
2203
2204 mutex_lock(&cpuhp_state_mutex);
757c989b 2205 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
98f8cdce
TG
2206 struct cpuhp_step *sp = cpuhp_get_step(i);
2207
2208 if (sp->name) {
2209 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2210 buf += cur;
2211 res += cur;
2212 }
2213 }
2214 mutex_unlock(&cpuhp_state_mutex);
2215 return res;
2216}
2217static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
2218
2219static struct attribute *cpuhp_cpu_root_attrs[] = {
2220 &dev_attr_states.attr,
2221 NULL
2222};
2223
993647a2 2224static const struct attribute_group cpuhp_cpu_root_attr_group = {
98f8cdce
TG
2225 .attrs = cpuhp_cpu_root_attrs,
2226 .name = "hotplug",
2227 NULL
2228};
2229
05736e4a
TG
2230#ifdef CONFIG_HOTPLUG_SMT
2231
05736e4a 2232static ssize_t
de7b77e5
JP
2233__store_smt_control(struct device *dev, struct device_attribute *attr,
2234 const char *buf, size_t count)
05736e4a
TG
2235{
2236 int ctrlval, ret;
2237
2238 if (sysfs_streq(buf, "on"))
2239 ctrlval = CPU_SMT_ENABLED;
2240 else if (sysfs_streq(buf, "off"))
2241 ctrlval = CPU_SMT_DISABLED;
2242 else if (sysfs_streq(buf, "forceoff"))
2243 ctrlval = CPU_SMT_FORCE_DISABLED;
2244 else
2245 return -EINVAL;
2246
2247 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2248 return -EPERM;
2249
2250 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2251 return -ENODEV;
2252
2253 ret = lock_device_hotplug_sysfs();
2254 if (ret)
2255 return ret;
2256
2257 if (ctrlval != cpu_smt_control) {
2258 switch (ctrlval) {
2259 case CPU_SMT_ENABLED:
215af549 2260 ret = cpuhp_smt_enable();
05736e4a
TG
2261 break;
2262 case CPU_SMT_DISABLED:
2263 case CPU_SMT_FORCE_DISABLED:
2264 ret = cpuhp_smt_disable(ctrlval);
2265 break;
2266 }
2267 }
2268
2269 unlock_device_hotplug();
2270 return ret ? ret : count;
2271}
de7b77e5
JP
2272
2273#else /* !CONFIG_HOTPLUG_SMT */
2274static ssize_t
2275__store_smt_control(struct device *dev, struct device_attribute *attr,
2276 const char *buf, size_t count)
2277{
2278 return -ENODEV;
2279}
2280#endif /* CONFIG_HOTPLUG_SMT */
2281
2282static const char *smt_states[] = {
2283 [CPU_SMT_ENABLED] = "on",
2284 [CPU_SMT_DISABLED] = "off",
2285 [CPU_SMT_FORCE_DISABLED] = "forceoff",
2286 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
2287 [CPU_SMT_NOT_IMPLEMENTED] = "notimplemented",
2288};
2289
2290static ssize_t
2291show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2292{
2293 const char *state = smt_states[cpu_smt_control];
2294
2295 return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
2296}
2297
2298static ssize_t
2299store_smt_control(struct device *dev, struct device_attribute *attr,
2300 const char *buf, size_t count)
2301{
2302 return __store_smt_control(dev, attr, buf, count);
2303}
05736e4a
TG
2304static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2305
2306static ssize_t
2307show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2308{
de7b77e5 2309 return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
05736e4a
TG
2310}
2311static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2312
2313static struct attribute *cpuhp_smt_attrs[] = {
2314 &dev_attr_control.attr,
2315 &dev_attr_active.attr,
2316 NULL
2317};
2318
2319static const struct attribute_group cpuhp_smt_attr_group = {
2320 .attrs = cpuhp_smt_attrs,
2321 .name = "smt",
2322 NULL
2323};
2324
de7b77e5 2325static int __init cpu_smt_sysfs_init(void)
05736e4a 2326{
05736e4a
TG
2327 return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2328 &cpuhp_smt_attr_group);
2329}
2330
98f8cdce
TG
2331static int __init cpuhp_sysfs_init(void)
2332{
2333 int cpu, ret;
2334
de7b77e5 2335 ret = cpu_smt_sysfs_init();
05736e4a
TG
2336 if (ret)
2337 return ret;
2338
98f8cdce
TG
2339 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2340 &cpuhp_cpu_root_attr_group);
2341 if (ret)
2342 return ret;
2343
2344 for_each_possible_cpu(cpu) {
2345 struct device *dev = get_cpu_device(cpu);
2346
2347 if (!dev)
2348 continue;
2349 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2350 if (ret)
2351 return ret;
2352 }
2353 return 0;
2354}
2355device_initcall(cpuhp_sysfs_init);
de7b77e5 2356#endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
98f8cdce 2357
e56b3bc7
LT
2358/*
2359 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2360 * represents all NR_CPUS bits binary values of 1<<nr.
2361 *
e0b582ec 2362 * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc7
LT
2363 * mask value that has a single bit set only.
2364 */
b8d317d1 2365
e56b3bc7 2366/* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e 2367#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
e56b3bc7
LT
2368#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2369#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2370#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d1 2371
e56b3bc7
LT
2372const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2373
2374 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2375 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2376#if BITS_PER_LONG > 32
2377 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2378 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
b8d317d1
MT
2379#endif
2380};
e56b3bc7 2381EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a3
RR
2382
2383const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2384EXPORT_SYMBOL(cpu_all_bits);
b3199c02
RR
2385
2386#ifdef CONFIG_INIT_ALL_POSSIBLE
4b804c85 2387struct cpumask __cpu_possible_mask __read_mostly
c4c54dd1 2388 = {CPU_BITS_ALL};
b3199c02 2389#else
4b804c85 2390struct cpumask __cpu_possible_mask __read_mostly;
b3199c02 2391#endif
4b804c85 2392EXPORT_SYMBOL(__cpu_possible_mask);
b3199c02 2393
4b804c85
RV
2394struct cpumask __cpu_online_mask __read_mostly;
2395EXPORT_SYMBOL(__cpu_online_mask);
b3199c02 2396
4b804c85
RV
2397struct cpumask __cpu_present_mask __read_mostly;
2398EXPORT_SYMBOL(__cpu_present_mask);
b3199c02 2399
4b804c85
RV
2400struct cpumask __cpu_active_mask __read_mostly;
2401EXPORT_SYMBOL(__cpu_active_mask);
3fa41520 2402
0c09ab96
TG
2403atomic_t __num_online_cpus __read_mostly;
2404EXPORT_SYMBOL(__num_online_cpus);
2405
3fa41520
RR
2406void init_cpu_present(const struct cpumask *src)
2407{
c4c54dd1 2408 cpumask_copy(&__cpu_present_mask, src);
3fa41520
RR
2409}
2410
2411void init_cpu_possible(const struct cpumask *src)
2412{
c4c54dd1 2413 cpumask_copy(&__cpu_possible_mask, src);
3fa41520
RR
2414}
2415
2416void init_cpu_online(const struct cpumask *src)
2417{
c4c54dd1 2418 cpumask_copy(&__cpu_online_mask, src);
3fa41520 2419}
cff7d378 2420
0c09ab96
TG
2421void set_cpu_online(unsigned int cpu, bool online)
2422{
2423 /*
2424 * atomic_inc/dec() is required to handle the horrid abuse of this
2425 * function by the reboot and kexec code which invoke it from
2426 * IPI/NMI broadcasts when shutting down CPUs. Invocation from
2427 * regular CPU hotplug is properly serialized.
2428 *
2429 * Note, that the fact that __num_online_cpus is of type atomic_t
2430 * does not protect readers which are not serialized against
2431 * concurrent hotplug operations.
2432 */
2433 if (online) {
2434 if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
2435 atomic_inc(&__num_online_cpus);
2436 } else {
2437 if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
2438 atomic_dec(&__num_online_cpus);
2439 }
2440}
2441
cff7d378
TG
2442/*
2443 * Activate the first processor.
2444 */
2445void __init boot_cpu_init(void)
2446{
2447 int cpu = smp_processor_id();
2448
2449 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2450 set_cpu_online(cpu, true);
2451 set_cpu_active(cpu, true);
2452 set_cpu_present(cpu, true);
2453 set_cpu_possible(cpu, true);
8ce371f9
PZ
2454
2455#ifdef CONFIG_SMP
2456 __boot_cpu_id = cpu;
2457#endif
cff7d378
TG
2458}
2459
2460/*
2461 * Must be called _AFTER_ setting up the per_cpu areas
2462 */
b5b1404d 2463void __init boot_cpu_hotplug_init(void)
cff7d378 2464{
269777aa 2465#ifdef CONFIG_SMP
e797bda3 2466 cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
269777aa 2467#endif
0cc3cd21 2468 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
cff7d378 2469}
98af8452 2470
731dc9df
TH
2471/*
2472 * These are used for a global "mitigations=" cmdline option for toggling
2473 * optional CPU mitigations.
2474 */
2475enum cpu_mitigations {
2476 CPU_MITIGATIONS_OFF,
2477 CPU_MITIGATIONS_AUTO,
2478 CPU_MITIGATIONS_AUTO_NOSMT,
2479};
2480
2481static enum cpu_mitigations cpu_mitigations __ro_after_init =
2482 CPU_MITIGATIONS_AUTO;
98af8452
JP
2483
2484static int __init mitigations_parse_cmdline(char *arg)
2485{
2486 if (!strcmp(arg, "off"))
2487 cpu_mitigations = CPU_MITIGATIONS_OFF;
2488 else if (!strcmp(arg, "auto"))
2489 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2490 else if (!strcmp(arg, "auto,nosmt"))
2491 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
1bf72720
GU
2492 else
2493 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2494 arg);
98af8452
JP
2495
2496 return 0;
2497}
2498early_param("mitigations", mitigations_parse_cmdline);
731dc9df
TH
2499
2500/* mitigations=off */
2501bool cpu_mitigations_off(void)
2502{
2503 return cpu_mitigations == CPU_MITIGATIONS_OFF;
2504}
2505EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2506
2507/* mitigations=auto,nosmt */
2508bool cpu_mitigations_auto_nosmt(void)
2509{
2510 return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2511}
2512EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);