nvme: unexport nvme_start_keep_alive
[linux-2.6-block.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
3f07c014 10#include <linux/sched/signal.h>
ef8bd77f 11#include <linux/sched/hotplug.h>
29930025 12#include <linux/sched/task.h>
1da177e4
LT
13#include <linux/unistd.h>
14#include <linux/cpu.h>
cb79295e
AV
15#include <linux/oom.h>
16#include <linux/rcupdate.h>
9984de1a 17#include <linux/export.h>
e4cc2f87 18#include <linux/bug.h>
1da177e4
LT
19#include <linux/kthread.h>
20#include <linux/stop_machine.h>
81615b62 21#include <linux/mutex.h>
5a0e3ad6 22#include <linux/gfp.h>
79cfbdfa 23#include <linux/suspend.h>
a19423b9 24#include <linux/lockdep.h>
345527b1 25#include <linux/tick.h>
a8994181 26#include <linux/irq.h>
941154bd 27#include <linux/nmi.h>
4cb28ced 28#include <linux/smpboot.h>
e6d4989a 29#include <linux/relay.h>
6731d4f1 30#include <linux/slab.h>
fc8dffd3 31#include <linux/percpu-rwsem.h>
cff7d378 32
bb3632c6 33#include <trace/events/power.h>
cff7d378
TG
34#define CREATE_TRACE_POINTS
35#include <trace/events/cpuhp.h>
1da177e4 36
38498a67
TG
37#include "smpboot.h"
38
cff7d378
TG
39/**
40 * cpuhp_cpu_state - Per cpu hotplug state storage
41 * @state: The current cpu state
42 * @target: The target state
4cb28ced
TG
43 * @thread: Pointer to the hotplug thread
44 * @should_run: Thread should execute
3b9d6da6 45 * @rollback: Perform a rollback
a724632c
TG
46 * @single: Single callback invocation
47 * @bringup: Single callback bringup or teardown selector
48 * @cb_state: The state for a single callback (install/uninstall)
4cb28ced 49 * @result: Result of the operation
5ebe7742
PZ
50 * @done_up: Signal completion to the issuer of the task for cpu-up
51 * @done_down: Signal completion to the issuer of the task for cpu-down
cff7d378
TG
52 */
53struct cpuhp_cpu_state {
54 enum cpuhp_state state;
55 enum cpuhp_state target;
1db49484 56 enum cpuhp_state fail;
4cb28ced
TG
57#ifdef CONFIG_SMP
58 struct task_struct *thread;
59 bool should_run;
3b9d6da6 60 bool rollback;
a724632c
TG
61 bool single;
62 bool bringup;
cf392d10 63 struct hlist_node *node;
4dddfb5f 64 struct hlist_node *last;
4cb28ced 65 enum cpuhp_state cb_state;
4cb28ced 66 int result;
5ebe7742
PZ
67 struct completion done_up;
68 struct completion done_down;
4cb28ced 69#endif
cff7d378
TG
70};
71
1db49484
PZ
72static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
73 .fail = CPUHP_INVALID,
74};
cff7d378 75
49dfe2a6 76#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
5f4b55e1
PZ
77static struct lockdep_map cpuhp_state_up_map =
78 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
79static struct lockdep_map cpuhp_state_down_map =
80 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
81
82
76dc6c09 83static inline void cpuhp_lock_acquire(bool bringup)
5f4b55e1
PZ
84{
85 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
86}
87
76dc6c09 88static inline void cpuhp_lock_release(bool bringup)
5f4b55e1
PZ
89{
90 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
91}
92#else
93
76dc6c09
MM
94static inline void cpuhp_lock_acquire(bool bringup) { }
95static inline void cpuhp_lock_release(bool bringup) { }
5f4b55e1 96
49dfe2a6
TG
97#endif
98
cff7d378
TG
99/**
100 * cpuhp_step - Hotplug state machine step
101 * @name: Name of the step
102 * @startup: Startup function of the step
103 * @teardown: Teardown function of the step
104 * @skip_onerr: Do not invoke the functions on error rollback
105 * Will go away once the notifiers are gone
757c989b 106 * @cant_stop: Bringup/teardown can't be stopped at this step
cff7d378
TG
107 */
108struct cpuhp_step {
cf392d10
TG
109 const char *name;
110 union {
3c1627e9
TG
111 int (*single)(unsigned int cpu);
112 int (*multi)(unsigned int cpu,
113 struct hlist_node *node);
114 } startup;
cf392d10 115 union {
3c1627e9
TG
116 int (*single)(unsigned int cpu);
117 int (*multi)(unsigned int cpu,
118 struct hlist_node *node);
119 } teardown;
cf392d10
TG
120 struct hlist_head list;
121 bool skip_onerr;
122 bool cant_stop;
123 bool multi_instance;
cff7d378
TG
124};
125
98f8cdce 126static DEFINE_MUTEX(cpuhp_state_mutex);
17a2f1ce 127static struct cpuhp_step cpuhp_hp_states[];
cff7d378 128
a724632c
TG
129static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
130{
17a2f1ce 131 return cpuhp_hp_states + state;
a724632c
TG
132}
133
cff7d378
TG
134/**
135 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
136 * @cpu: The cpu for which the callback should be invoked
96abb968 137 * @state: The state to do callbacks for
a724632c 138 * @bringup: True if the bringup callback should be invoked
96abb968
PZ
139 * @node: For multi-instance, do a single entry callback for install/remove
140 * @lastp: For multi-instance rollback, remember how far we got
cff7d378 141 *
cf392d10 142 * Called from cpu hotplug and from the state register machinery.
cff7d378 143 */
a724632c 144static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
96abb968
PZ
145 bool bringup, struct hlist_node *node,
146 struct hlist_node **lastp)
cff7d378
TG
147{
148 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
a724632c 149 struct cpuhp_step *step = cpuhp_get_step(state);
cf392d10
TG
150 int (*cbm)(unsigned int cpu, struct hlist_node *node);
151 int (*cb)(unsigned int cpu);
152 int ret, cnt;
153
1db49484
PZ
154 if (st->fail == state) {
155 st->fail = CPUHP_INVALID;
156
157 if (!(bringup ? step->startup.single : step->teardown.single))
158 return 0;
159
160 return -EAGAIN;
161 }
162
cf392d10 163 if (!step->multi_instance) {
96abb968 164 WARN_ON_ONCE(lastp && *lastp);
3c1627e9 165 cb = bringup ? step->startup.single : step->teardown.single;
cf392d10
TG
166 if (!cb)
167 return 0;
a724632c 168 trace_cpuhp_enter(cpu, st->target, state, cb);
cff7d378 169 ret = cb(cpu);
a724632c 170 trace_cpuhp_exit(cpu, st->state, state, ret);
cf392d10
TG
171 return ret;
172 }
3c1627e9 173 cbm = bringup ? step->startup.multi : step->teardown.multi;
cf392d10
TG
174 if (!cbm)
175 return 0;
176
177 /* Single invocation for instance add/remove */
178 if (node) {
96abb968 179 WARN_ON_ONCE(lastp && *lastp);
cf392d10
TG
180 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
181 ret = cbm(cpu, node);
182 trace_cpuhp_exit(cpu, st->state, state, ret);
183 return ret;
184 }
185
186 /* State transition. Invoke on all instances */
187 cnt = 0;
188 hlist_for_each(node, &step->list) {
96abb968
PZ
189 if (lastp && node == *lastp)
190 break;
191
cf392d10
TG
192 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
193 ret = cbm(cpu, node);
194 trace_cpuhp_exit(cpu, st->state, state, ret);
96abb968
PZ
195 if (ret) {
196 if (!lastp)
197 goto err;
198
199 *lastp = node;
200 return ret;
201 }
cf392d10
TG
202 cnt++;
203 }
96abb968
PZ
204 if (lastp)
205 *lastp = NULL;
cf392d10
TG
206 return 0;
207err:
208 /* Rollback the instances if one failed */
3c1627e9 209 cbm = !bringup ? step->startup.multi : step->teardown.multi;
cf392d10
TG
210 if (!cbm)
211 return ret;
212
213 hlist_for_each(node, &step->list) {
214 if (!cnt--)
215 break;
724a8688
PZ
216
217 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
218 ret = cbm(cpu, node);
219 trace_cpuhp_exit(cpu, st->state, state, ret);
220 /*
221 * Rollback must not fail,
222 */
223 WARN_ON_ONCE(ret);
cff7d378
TG
224 }
225 return ret;
226}
227
98a79d6a 228#ifdef CONFIG_SMP
fcb3029a
AB
229static bool cpuhp_is_ap_state(enum cpuhp_state state)
230{
231 /*
232 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
233 * purposes as that state is handled explicitly in cpu_down.
234 */
235 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
236}
237
5ebe7742
PZ
238static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
239{
240 struct completion *done = bringup ? &st->done_up : &st->done_down;
241 wait_for_completion(done);
242}
243
244static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
245{
246 struct completion *done = bringup ? &st->done_up : &st->done_down;
247 complete(done);
248}
249
250/*
251 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
252 */
253static bool cpuhp_is_atomic_state(enum cpuhp_state state)
254{
255 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
256}
257
b3199c02 258/* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa953877 259static DEFINE_MUTEX(cpu_add_remove_lock);
090e77c3
TG
260bool cpuhp_tasks_frozen;
261EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
1da177e4 262
79a6cdeb 263/*
93ae4f97
SB
264 * The following two APIs (cpu_maps_update_begin/done) must be used when
265 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
79a6cdeb
LJ
266 */
267void cpu_maps_update_begin(void)
268{
269 mutex_lock(&cpu_add_remove_lock);
270}
271
272void cpu_maps_update_done(void)
273{
274 mutex_unlock(&cpu_add_remove_lock);
275}
1da177e4 276
fc8dffd3
TG
277/*
278 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
e3920fb4
RW
279 * Should always be manipulated under cpu_add_remove_lock
280 */
281static int cpu_hotplug_disabled;
282
79a6cdeb
LJ
283#ifdef CONFIG_HOTPLUG_CPU
284
fc8dffd3 285DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
a19423b9 286
8f553c49 287void cpus_read_lock(void)
a9d9baa1 288{
fc8dffd3 289 percpu_down_read(&cpu_hotplug_lock);
a9d9baa1 290}
8f553c49 291EXPORT_SYMBOL_GPL(cpus_read_lock);
90d45d17 292
8f553c49 293void cpus_read_unlock(void)
a9d9baa1 294{
fc8dffd3 295 percpu_up_read(&cpu_hotplug_lock);
a9d9baa1 296}
8f553c49 297EXPORT_SYMBOL_GPL(cpus_read_unlock);
a9d9baa1 298
8f553c49 299void cpus_write_lock(void)
d221938c 300{
fc8dffd3 301 percpu_down_write(&cpu_hotplug_lock);
d221938c 302}
87af9e7f 303
8f553c49 304void cpus_write_unlock(void)
d221938c 305{
fc8dffd3 306 percpu_up_write(&cpu_hotplug_lock);
d221938c
GS
307}
308
fc8dffd3 309void lockdep_assert_cpus_held(void)
d221938c 310{
fc8dffd3 311 percpu_rwsem_assert_held(&cpu_hotplug_lock);
d221938c 312}
79a6cdeb 313
16e53dbf
SB
314/*
315 * Wait for currently running CPU hotplug operations to complete (if any) and
316 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
317 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
318 * hotplug path before performing hotplug operations. So acquiring that lock
319 * guarantees mutual exclusion from any currently running hotplug operations.
320 */
321void cpu_hotplug_disable(void)
322{
323 cpu_maps_update_begin();
89af7ba5 324 cpu_hotplug_disabled++;
16e53dbf
SB
325 cpu_maps_update_done();
326}
32145c46 327EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
16e53dbf 328
01b41159
LW
329static void __cpu_hotplug_enable(void)
330{
331 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
332 return;
333 cpu_hotplug_disabled--;
334}
335
16e53dbf
SB
336void cpu_hotplug_enable(void)
337{
338 cpu_maps_update_begin();
01b41159 339 __cpu_hotplug_enable();
16e53dbf
SB
340 cpu_maps_update_done();
341}
32145c46 342EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
b9d10be7 343#endif /* CONFIG_HOTPLUG_CPU */
79a6cdeb 344
4dddfb5f
PZ
345static inline enum cpuhp_state
346cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
347{
348 enum cpuhp_state prev_state = st->state;
349
350 st->rollback = false;
351 st->last = NULL;
352
353 st->target = target;
354 st->single = false;
355 st->bringup = st->state < target;
356
357 return prev_state;
358}
359
360static inline void
361cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
362{
363 st->rollback = true;
364
365 /*
366 * If we have st->last we need to undo partial multi_instance of this
367 * state first. Otherwise start undo at the previous state.
368 */
369 if (!st->last) {
370 if (st->bringup)
371 st->state--;
372 else
373 st->state++;
374 }
375
376 st->target = prev_state;
377 st->bringup = !st->bringup;
378}
379
380/* Regular hotplug invocation of the AP hotplug thread */
381static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
382{
383 if (!st->single && st->state == st->target)
384 return;
385
386 st->result = 0;
387 /*
388 * Make sure the above stores are visible before should_run becomes
389 * true. Paired with the mb() above in cpuhp_thread_fun()
390 */
391 smp_mb();
392 st->should_run = true;
393 wake_up_process(st->thread);
5ebe7742 394 wait_for_ap_thread(st, st->bringup);
4dddfb5f
PZ
395}
396
397static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
398{
399 enum cpuhp_state prev_state;
400 int ret;
401
402 prev_state = cpuhp_set_state(st, target);
403 __cpuhp_kick_ap(st);
404 if ((ret = st->result)) {
405 cpuhp_reset_state(st, prev_state);
406 __cpuhp_kick_ap(st);
407 }
408
409 return ret;
410}
9cd4f1a4 411
8df3e07e
TG
412static int bringup_wait_for_ap(unsigned int cpu)
413{
414 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
415
9cd4f1a4 416 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
5ebe7742 417 wait_for_ap_thread(st, true);
dea1d0f5
TG
418 if (WARN_ON_ONCE((!cpu_online(cpu))))
419 return -ECANCELED;
9cd4f1a4
TG
420
421 /* Unpark the stopper thread and the hotplug thread of the target cpu */
422 stop_machine_unpark(cpu);
423 kthread_unpark(st->thread);
424
4dddfb5f
PZ
425 if (st->target <= CPUHP_AP_ONLINE_IDLE)
426 return 0;
427
428 return cpuhp_kick_ap(st, st->target);
8df3e07e
TG
429}
430
ba997462
TG
431static int bringup_cpu(unsigned int cpu)
432{
433 struct task_struct *idle = idle_thread_get(cpu);
434 int ret;
435
aa877175
BO
436 /*
437 * Some architectures have to walk the irq descriptors to
438 * setup the vector space for the cpu which comes online.
439 * Prevent irq alloc/free across the bringup.
440 */
441 irq_lock_sparse();
442
ba997462
TG
443 /* Arch-specific enabling code. */
444 ret = __cpu_up(cpu, idle);
aa877175 445 irq_unlock_sparse();
530e9b76 446 if (ret)
ba997462 447 return ret;
9cd4f1a4 448 return bringup_wait_for_ap(cpu);
ba997462
TG
449}
450
2e1a3483
TG
451/*
452 * Hotplug state machine related functions
453 */
2e1a3483 454
a724632c 455static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
2e1a3483
TG
456{
457 for (st->state--; st->state > st->target; st->state--) {
a724632c 458 struct cpuhp_step *step = cpuhp_get_step(st->state);
2e1a3483
TG
459
460 if (!step->skip_onerr)
96abb968 461 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
2e1a3483
TG
462 }
463}
464
465static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
a724632c 466 enum cpuhp_state target)
2e1a3483
TG
467{
468 enum cpuhp_state prev_state = st->state;
469 int ret = 0;
470
471 while (st->state < target) {
2e1a3483 472 st->state++;
96abb968 473 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
2e1a3483
TG
474 if (ret) {
475 st->target = prev_state;
a724632c 476 undo_cpu_up(cpu, st);
2e1a3483
TG
477 break;
478 }
479 }
480 return ret;
481}
482
4cb28ced
TG
483/*
484 * The cpu hotplug threads manage the bringup and teardown of the cpus
485 */
486static void cpuhp_create(unsigned int cpu)
487{
488 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
489
5ebe7742
PZ
490 init_completion(&st->done_up);
491 init_completion(&st->done_down);
4cb28ced
TG
492}
493
494static int cpuhp_should_run(unsigned int cpu)
495{
496 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
497
498 return st->should_run;
499}
500
4cb28ced
TG
501/*
502 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
503 * callbacks when a state gets [un]installed at runtime.
4dddfb5f
PZ
504 *
505 * Each invocation of this function by the smpboot thread does a single AP
506 * state callback.
507 *
508 * It has 3 modes of operation:
509 * - single: runs st->cb_state
510 * - up: runs ++st->state, while st->state < st->target
511 * - down: runs st->state--, while st->state > st->target
512 *
513 * When complete or on error, should_run is cleared and the completion is fired.
4cb28ced
TG
514 */
515static void cpuhp_thread_fun(unsigned int cpu)
516{
517 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
4dddfb5f
PZ
518 bool bringup = st->bringup;
519 enum cpuhp_state state;
4cb28ced
TG
520
521 /*
4dddfb5f
PZ
522 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
523 * that if we see ->should_run we also see the rest of the state.
4cb28ced
TG
524 */
525 smp_mb();
4cb28ced 526
4dddfb5f
PZ
527 if (WARN_ON_ONCE(!st->should_run))
528 return;
4cb28ced 529
5f4b55e1 530 cpuhp_lock_acquire(bringup);
4dddfb5f 531
a724632c 532 if (st->single) {
4dddfb5f
PZ
533 state = st->cb_state;
534 st->should_run = false;
535 } else {
536 if (bringup) {
537 st->state++;
538 state = st->state;
539 st->should_run = (st->state < st->target);
540 WARN_ON_ONCE(st->state > st->target);
4cb28ced 541 } else {
4dddfb5f
PZ
542 state = st->state;
543 st->state--;
544 st->should_run = (st->state > st->target);
545 WARN_ON_ONCE(st->state < st->target);
4cb28ced 546 }
4dddfb5f
PZ
547 }
548
549 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
550
551 if (st->rollback) {
552 struct cpuhp_step *step = cpuhp_get_step(state);
553 if (step->skip_onerr)
554 goto next;
555 }
556
557 if (cpuhp_is_atomic_state(state)) {
558 local_irq_disable();
559 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
560 local_irq_enable();
3b9d6da6 561
4dddfb5f
PZ
562 /*
563 * STARTING/DYING must not fail!
564 */
565 WARN_ON_ONCE(st->result);
4cb28ced 566 } else {
4dddfb5f
PZ
567 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
568 }
569
570 if (st->result) {
571 /*
572 * If we fail on a rollback, we're up a creek without no
573 * paddle, no way forward, no way back. We loose, thanks for
574 * playing.
575 */
576 WARN_ON_ONCE(st->rollback);
577 st->should_run = false;
4cb28ced 578 }
4dddfb5f
PZ
579
580next:
5f4b55e1 581 cpuhp_lock_release(bringup);
4dddfb5f
PZ
582
583 if (!st->should_run)
5ebe7742 584 complete_ap_thread(st, bringup);
4cb28ced
TG
585}
586
587/* Invoke a single callback on a remote cpu */
a724632c 588static int
cf392d10
TG
589cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
590 struct hlist_node *node)
4cb28ced
TG
591{
592 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
4dddfb5f 593 int ret;
4cb28ced
TG
594
595 if (!cpu_online(cpu))
596 return 0;
597
5f4b55e1
PZ
598 cpuhp_lock_acquire(false);
599 cpuhp_lock_release(false);
600
601 cpuhp_lock_acquire(true);
602 cpuhp_lock_release(true);
49dfe2a6 603
6a4e2451
TG
604 /*
605 * If we are up and running, use the hotplug thread. For early calls
606 * we invoke the thread function directly.
607 */
608 if (!st->thread)
96abb968 609 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
6a4e2451 610
4dddfb5f
PZ
611 st->rollback = false;
612 st->last = NULL;
613
614 st->node = node;
615 st->bringup = bringup;
4cb28ced 616 st->cb_state = state;
a724632c 617 st->single = true;
a724632c 618
4dddfb5f 619 __cpuhp_kick_ap(st);
4cb28ced 620
4cb28ced 621 /*
4dddfb5f 622 * If we failed and did a partial, do a rollback.
4cb28ced 623 */
4dddfb5f
PZ
624 if ((ret = st->result) && st->last) {
625 st->rollback = true;
626 st->bringup = !bringup;
627
628 __cpuhp_kick_ap(st);
629 }
630
1f7c70d6
TG
631 /*
632 * Clean up the leftovers so the next hotplug operation wont use stale
633 * data.
634 */
635 st->node = st->last = NULL;
4dddfb5f 636 return ret;
1cf4f629
TG
637}
638
639static int cpuhp_kick_ap_work(unsigned int cpu)
640{
641 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
4dddfb5f
PZ
642 enum cpuhp_state prev_state = st->state;
643 int ret;
1cf4f629 644
5f4b55e1
PZ
645 cpuhp_lock_acquire(false);
646 cpuhp_lock_release(false);
647
648 cpuhp_lock_acquire(true);
649 cpuhp_lock_release(true);
4dddfb5f
PZ
650
651 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
652 ret = cpuhp_kick_ap(st, st->target);
653 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
654
655 return ret;
4cb28ced
TG
656}
657
658static struct smp_hotplug_thread cpuhp_threads = {
659 .store = &cpuhp_state.thread,
660 .create = &cpuhp_create,
661 .thread_should_run = cpuhp_should_run,
662 .thread_fn = cpuhp_thread_fun,
663 .thread_comm = "cpuhp/%u",
664 .selfparking = true,
665};
666
667void __init cpuhp_threads_init(void)
668{
669 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
670 kthread_unpark(this_cpu_read(cpuhp_state.thread));
671}
672
777c6e0d 673#ifdef CONFIG_HOTPLUG_CPU
e4cc2f87
AV
674/**
675 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
676 * @cpu: a CPU id
677 *
678 * This function walks all processes, finds a valid mm struct for each one and
679 * then clears a corresponding bit in mm's cpumask. While this all sounds
680 * trivial, there are various non-obvious corner cases, which this function
681 * tries to solve in a safe manner.
682 *
683 * Also note that the function uses a somewhat relaxed locking scheme, so it may
684 * be called only for an already offlined CPU.
685 */
cb79295e
AV
686void clear_tasks_mm_cpumask(int cpu)
687{
688 struct task_struct *p;
689
690 /*
691 * This function is called after the cpu is taken down and marked
692 * offline, so its not like new tasks will ever get this cpu set in
693 * their mm mask. -- Peter Zijlstra
694 * Thus, we may use rcu_read_lock() here, instead of grabbing
695 * full-fledged tasklist_lock.
696 */
e4cc2f87 697 WARN_ON(cpu_online(cpu));
cb79295e
AV
698 rcu_read_lock();
699 for_each_process(p) {
700 struct task_struct *t;
701
e4cc2f87
AV
702 /*
703 * Main thread might exit, but other threads may still have
704 * a valid mm. Find one.
705 */
cb79295e
AV
706 t = find_lock_task_mm(p);
707 if (!t)
708 continue;
709 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
710 task_unlock(t);
711 }
712 rcu_read_unlock();
713}
714
1da177e4 715/* Take this CPU down. */
71cf5aee 716static int take_cpu_down(void *_param)
1da177e4 717{
4baa0afc
TG
718 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
719 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
090e77c3 720 int err, cpu = smp_processor_id();
724a8688 721 int ret;
1da177e4 722
1da177e4
LT
723 /* Ensure this CPU doesn't handle any more interrupts. */
724 err = __cpu_disable();
725 if (err < 0)
f3705136 726 return err;
1da177e4 727
a724632c
TG
728 /*
729 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
730 * do this step again.
731 */
732 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
733 st->state--;
4baa0afc 734 /* Invoke the former CPU_DYING callbacks */
724a8688
PZ
735 for (; st->state > target; st->state--) {
736 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
737 /*
738 * DYING must not fail!
739 */
740 WARN_ON_ONCE(ret);
741 }
4baa0afc 742
52c063d1
TG
743 /* Give up timekeeping duties */
744 tick_handover_do_timer();
14e568e7 745 /* Park the stopper thread */
090e77c3 746 stop_machine_park(cpu);
f3705136 747 return 0;
1da177e4
LT
748}
749
98458172 750static int takedown_cpu(unsigned int cpu)
1da177e4 751{
e69aab13 752 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
98458172 753 int err;
1da177e4 754
2a58c527 755 /* Park the smpboot threads */
1cf4f629 756 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
2a58c527 757 smpboot_park_threads(cpu);
1cf4f629 758
6acce3ef 759 /*
a8994181
TG
760 * Prevent irq alloc/free while the dying cpu reorganizes the
761 * interrupt affinities.
6acce3ef 762 */
a8994181 763 irq_lock_sparse();
6acce3ef 764
a8994181
TG
765 /*
766 * So now all preempt/rcu users must observe !cpu_active().
767 */
210e2133 768 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
04321587 769 if (err) {
3b9d6da6 770 /* CPU refused to die */
a8994181 771 irq_unlock_sparse();
3b9d6da6
SAS
772 /* Unpark the hotplug thread so we can rollback there */
773 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
98458172 774 return err;
8fa1d7d3 775 }
04321587 776 BUG_ON(cpu_online(cpu));
1da177e4 777
48c5ccae 778 /*
5b1ead68
BJ
779 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
780 * all runnable tasks from the CPU, there's only the idle task left now
48c5ccae 781 * that the migration thread is done doing the stop_machine thing.
51a96c77
PZ
782 *
783 * Wait for the stop thread to go away.
48c5ccae 784 */
5ebe7742 785 wait_for_ap_thread(st, false);
e69aab13 786 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1da177e4 787
a8994181
TG
788 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
789 irq_unlock_sparse();
790
345527b1 791 hotplug_cpu__broadcast_tick_pull(cpu);
1da177e4
LT
792 /* This actually kills the CPU. */
793 __cpu_die(cpu);
794
a49b116d 795 tick_cleanup_dead_cpu(cpu);
a58163d8 796 rcutree_migrate_callbacks(cpu);
98458172
TG
797 return 0;
798}
1da177e4 799
71f87b2f
TG
800static void cpuhp_complete_idle_dead(void *arg)
801{
802 struct cpuhp_cpu_state *st = arg;
803
5ebe7742 804 complete_ap_thread(st, false);
71f87b2f
TG
805}
806
e69aab13
TG
807void cpuhp_report_idle_dead(void)
808{
809 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
810
811 BUG_ON(st->state != CPUHP_AP_OFFLINE);
27d50c7e 812 rcu_report_dead(smp_processor_id());
71f87b2f
TG
813 st->state = CPUHP_AP_IDLE_DEAD;
814 /*
815 * We cannot call complete after rcu_report_dead() so we delegate it
816 * to an online cpu.
817 */
818 smp_call_function_single(cpumask_first(cpu_online_mask),
819 cpuhp_complete_idle_dead, st, 0);
e69aab13
TG
820}
821
4dddfb5f
PZ
822static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
823{
824 for (st->state++; st->state < st->target; st->state++) {
825 struct cpuhp_step *step = cpuhp_get_step(st->state);
cff7d378 826
4dddfb5f
PZ
827 if (!step->skip_onerr)
828 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
829 }
830}
831
832static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
833 enum cpuhp_state target)
834{
835 enum cpuhp_state prev_state = st->state;
836 int ret = 0;
837
838 for (; st->state > target; st->state--) {
839 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
840 if (ret) {
841 st->target = prev_state;
842 undo_cpu_down(cpu, st);
843 break;
844 }
845 }
846 return ret;
847}
cff7d378 848
98458172 849/* Requires cpu_add_remove_lock to be held */
af1f4045
TG
850static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
851 enum cpuhp_state target)
98458172 852{
cff7d378
TG
853 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
854 int prev_state, ret = 0;
98458172
TG
855
856 if (num_online_cpus() == 1)
857 return -EBUSY;
858
757c989b 859 if (!cpu_present(cpu))
98458172
TG
860 return -EINVAL;
861
8f553c49 862 cpus_write_lock();
98458172
TG
863
864 cpuhp_tasks_frozen = tasks_frozen;
865
4dddfb5f 866 prev_state = cpuhp_set_state(st, target);
1cf4f629
TG
867 /*
868 * If the current CPU state is in the range of the AP hotplug thread,
869 * then we need to kick the thread.
870 */
8df3e07e 871 if (st->state > CPUHP_TEARDOWN_CPU) {
4dddfb5f 872 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1cf4f629
TG
873 ret = cpuhp_kick_ap_work(cpu);
874 /*
875 * The AP side has done the error rollback already. Just
876 * return the error code..
877 */
878 if (ret)
879 goto out;
880
881 /*
882 * We might have stopped still in the range of the AP hotplug
883 * thread. Nothing to do anymore.
884 */
8df3e07e 885 if (st->state > CPUHP_TEARDOWN_CPU)
1cf4f629 886 goto out;
4dddfb5f
PZ
887
888 st->target = target;
1cf4f629
TG
889 }
890 /*
8df3e07e 891 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1cf4f629
TG
892 * to do the further cleanups.
893 */
a724632c 894 ret = cpuhp_down_callbacks(cpu, st, target);
3b9d6da6 895 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
4dddfb5f
PZ
896 cpuhp_reset_state(st, prev_state);
897 __cpuhp_kick_ap(st);
3b9d6da6 898 }
98458172 899
1cf4f629 900out:
8f553c49 901 cpus_write_unlock();
941154bd
TG
902 /*
903 * Do post unplug cleanup. This is still protected against
904 * concurrent CPU hotplug via cpu_add_remove_lock.
905 */
906 lockup_detector_cleanup();
cff7d378 907 return ret;
e3920fb4
RW
908}
909
af1f4045 910static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
e3920fb4 911{
9ea09af3 912 int err;
e3920fb4 913
d221938c 914 cpu_maps_update_begin();
e761b772
MK
915
916 if (cpu_hotplug_disabled) {
e3920fb4 917 err = -EBUSY;
e761b772
MK
918 goto out;
919 }
920
af1f4045 921 err = _cpu_down(cpu, 0, target);
e3920fb4 922
e761b772 923out:
d221938c 924 cpu_maps_update_done();
1da177e4
LT
925 return err;
926}
4dddfb5f 927
af1f4045
TG
928int cpu_down(unsigned int cpu)
929{
930 return do_cpu_down(cpu, CPUHP_OFFLINE);
931}
b62b8ef9 932EXPORT_SYMBOL(cpu_down);
4dddfb5f
PZ
933
934#else
935#define takedown_cpu NULL
1da177e4
LT
936#endif /*CONFIG_HOTPLUG_CPU*/
937
4baa0afc 938/**
ee1e714b 939 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
4baa0afc
TG
940 * @cpu: cpu that just started
941 *
4baa0afc
TG
942 * It must be called by the arch code on the new cpu, before the new cpu
943 * enables interrupts and before the "boot" cpu returns from __cpu_up().
944 */
945void notify_cpu_starting(unsigned int cpu)
946{
947 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
948 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
724a8688 949 int ret;
4baa0afc 950
0c6d4576 951 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
4baa0afc 952 while (st->state < target) {
4baa0afc 953 st->state++;
724a8688
PZ
954 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
955 /*
956 * STARTING must not fail!
957 */
958 WARN_ON_ONCE(ret);
4baa0afc
TG
959 }
960}
961
949338e3 962/*
9cd4f1a4
TG
963 * Called from the idle task. Wake up the controlling task which brings the
964 * stopper and the hotplug thread of the upcoming CPU up and then delegates
965 * the rest of the online bringup to the hotplug thread.
949338e3 966 */
8df3e07e 967void cpuhp_online_idle(enum cpuhp_state state)
949338e3 968{
8df3e07e 969 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
8df3e07e
TG
970
971 /* Happens for the boot cpu */
972 if (state != CPUHP_AP_ONLINE_IDLE)
973 return;
974
975 st->state = CPUHP_AP_ONLINE_IDLE;
5ebe7742 976 complete_ap_thread(st, true);
949338e3
TG
977}
978
e3920fb4 979/* Requires cpu_add_remove_lock to be held */
af1f4045 980static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1da177e4 981{
cff7d378 982 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
3bb5d2ee 983 struct task_struct *idle;
2e1a3483 984 int ret = 0;
1da177e4 985
8f553c49 986 cpus_write_lock();
38498a67 987
757c989b 988 if (!cpu_present(cpu)) {
5e5041f3
YI
989 ret = -EINVAL;
990 goto out;
991 }
992
757c989b
TG
993 /*
994 * The caller of do_cpu_up might have raced with another
995 * caller. Ignore it for now.
996 */
997 if (st->state >= target)
38498a67 998 goto out;
757c989b
TG
999
1000 if (st->state == CPUHP_OFFLINE) {
1001 /* Let it fail before we try to bring the cpu up */
1002 idle = idle_thread_get(cpu);
1003 if (IS_ERR(idle)) {
1004 ret = PTR_ERR(idle);
1005 goto out;
1006 }
3bb5d2ee 1007 }
38498a67 1008
ba997462
TG
1009 cpuhp_tasks_frozen = tasks_frozen;
1010
4dddfb5f 1011 cpuhp_set_state(st, target);
1cf4f629
TG
1012 /*
1013 * If the current CPU state is in the range of the AP hotplug thread,
1014 * then we need to kick the thread once more.
1015 */
8df3e07e 1016 if (st->state > CPUHP_BRINGUP_CPU) {
1cf4f629
TG
1017 ret = cpuhp_kick_ap_work(cpu);
1018 /*
1019 * The AP side has done the error rollback already. Just
1020 * return the error code..
1021 */
1022 if (ret)
1023 goto out;
1024 }
1025
1026 /*
1027 * Try to reach the target state. We max out on the BP at
8df3e07e 1028 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1cf4f629
TG
1029 * responsible for bringing it up to the target state.
1030 */
8df3e07e 1031 target = min((int)target, CPUHP_BRINGUP_CPU);
a724632c 1032 ret = cpuhp_up_callbacks(cpu, st, target);
38498a67 1033out:
8f553c49 1034 cpus_write_unlock();
e3920fb4
RW
1035 return ret;
1036}
1037
af1f4045 1038static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
e3920fb4
RW
1039{
1040 int err = 0;
cf23422b 1041
e0b582ec 1042 if (!cpu_possible(cpu)) {
84117da5
FF
1043 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1044 cpu);
87d5e023 1045#if defined(CONFIG_IA64)
84117da5 1046 pr_err("please check additional_cpus= boot parameter\n");
73e753a5
KH
1047#endif
1048 return -EINVAL;
1049 }
e3920fb4 1050
01b0f197
TK
1051 err = try_online_node(cpu_to_node(cpu));
1052 if (err)
1053 return err;
cf23422b 1054
d221938c 1055 cpu_maps_update_begin();
e761b772
MK
1056
1057 if (cpu_hotplug_disabled) {
e3920fb4 1058 err = -EBUSY;
e761b772
MK
1059 goto out;
1060 }
1061
af1f4045 1062 err = _cpu_up(cpu, 0, target);
e761b772 1063out:
d221938c 1064 cpu_maps_update_done();
e3920fb4
RW
1065 return err;
1066}
af1f4045
TG
1067
1068int cpu_up(unsigned int cpu)
1069{
1070 return do_cpu_up(cpu, CPUHP_ONLINE);
1071}
a513f6ba 1072EXPORT_SYMBOL_GPL(cpu_up);
e3920fb4 1073
f3de4be9 1074#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 1075static cpumask_var_t frozen_cpus;
e3920fb4 1076
d391e552 1077int freeze_secondary_cpus(int primary)
e3920fb4 1078{
d391e552 1079 int cpu, error = 0;
e3920fb4 1080
d221938c 1081 cpu_maps_update_begin();
d391e552
JM
1082 if (!cpu_online(primary))
1083 primary = cpumask_first(cpu_online_mask);
9ee349ad
XF
1084 /*
1085 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb4
RW
1086 * with the userspace trying to use the CPU hotplug at the same time
1087 */
e0b582ec 1088 cpumask_clear(frozen_cpus);
6ad4c188 1089
84117da5 1090 pr_info("Disabling non-boot CPUs ...\n");
e3920fb4 1091 for_each_online_cpu(cpu) {
d391e552 1092 if (cpu == primary)
e3920fb4 1093 continue;
bb3632c6 1094 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
af1f4045 1095 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
bb3632c6 1096 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
feae3203 1097 if (!error)
e0b582ec 1098 cpumask_set_cpu(cpu, frozen_cpus);
feae3203 1099 else {
84117da5 1100 pr_err("Error taking CPU%d down: %d\n", cpu, error);
e3920fb4
RW
1101 break;
1102 }
1103 }
86886e55 1104
89af7ba5 1105 if (!error)
e3920fb4 1106 BUG_ON(num_online_cpus() > 1);
89af7ba5 1107 else
84117da5 1108 pr_err("Non-boot CPUs are not disabled\n");
89af7ba5
VK
1109
1110 /*
1111 * Make sure the CPUs won't be enabled by someone else. We need to do
1112 * this even in case of failure as all disable_nonboot_cpus() users are
1113 * supposed to do enable_nonboot_cpus() on the failure path.
1114 */
1115 cpu_hotplug_disabled++;
1116
d221938c 1117 cpu_maps_update_done();
e3920fb4
RW
1118 return error;
1119}
1120
d0af9eed
SS
1121void __weak arch_enable_nonboot_cpus_begin(void)
1122{
1123}
1124
1125void __weak arch_enable_nonboot_cpus_end(void)
1126{
1127}
1128
71cf5aee 1129void enable_nonboot_cpus(void)
e3920fb4
RW
1130{
1131 int cpu, error;
1132
1133 /* Allow everyone to use the CPU hotplug again */
d221938c 1134 cpu_maps_update_begin();
01b41159 1135 __cpu_hotplug_enable();
e0b582ec 1136 if (cpumask_empty(frozen_cpus))
1d64b9cb 1137 goto out;
e3920fb4 1138
84117da5 1139 pr_info("Enabling non-boot CPUs ...\n");
d0af9eed
SS
1140
1141 arch_enable_nonboot_cpus_begin();
1142
e0b582ec 1143 for_each_cpu(cpu, frozen_cpus) {
bb3632c6 1144 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
af1f4045 1145 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
bb3632c6 1146 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
e3920fb4 1147 if (!error) {
84117da5 1148 pr_info("CPU%d is up\n", cpu);
e3920fb4
RW
1149 continue;
1150 }
84117da5 1151 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
e3920fb4 1152 }
d0af9eed
SS
1153
1154 arch_enable_nonboot_cpus_end();
1155
e0b582ec 1156 cpumask_clear(frozen_cpus);
1d64b9cb 1157out:
d221938c 1158 cpu_maps_update_done();
1da177e4 1159}
e0b582ec 1160
d7268a31 1161static int __init alloc_frozen_cpus(void)
e0b582ec
RR
1162{
1163 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1164 return -ENOMEM;
1165 return 0;
1166}
1167core_initcall(alloc_frozen_cpus);
79cfbdfa 1168
79cfbdfa
SB
1169/*
1170 * When callbacks for CPU hotplug notifications are being executed, we must
1171 * ensure that the state of the system with respect to the tasks being frozen
1172 * or not, as reported by the notification, remains unchanged *throughout the
1173 * duration* of the execution of the callbacks.
1174 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1175 *
1176 * This synchronization is implemented by mutually excluding regular CPU
1177 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1178 * Hibernate notifications.
1179 */
1180static int
1181cpu_hotplug_pm_callback(struct notifier_block *nb,
1182 unsigned long action, void *ptr)
1183{
1184 switch (action) {
1185
1186 case PM_SUSPEND_PREPARE:
1187 case PM_HIBERNATION_PREPARE:
16e53dbf 1188 cpu_hotplug_disable();
79cfbdfa
SB
1189 break;
1190
1191 case PM_POST_SUSPEND:
1192 case PM_POST_HIBERNATION:
16e53dbf 1193 cpu_hotplug_enable();
79cfbdfa
SB
1194 break;
1195
1196 default:
1197 return NOTIFY_DONE;
1198 }
1199
1200 return NOTIFY_OK;
1201}
1202
1203
d7268a31 1204static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa 1205{
6e32d479
FY
1206 /*
1207 * cpu_hotplug_pm_callback has higher priority than x86
1208 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1209 * to disable cpu hotplug to avoid cpu hotplug race.
1210 */
79cfbdfa
SB
1211 pm_notifier(cpu_hotplug_pm_callback, 0);
1212 return 0;
1213}
1214core_initcall(cpu_hotplug_pm_sync_init);
1215
f3de4be9 1216#endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec 1217
8ce371f9
PZ
1218int __boot_cpu_id;
1219
68f4f1ec 1220#endif /* CONFIG_SMP */
b8d317d1 1221
cff7d378 1222/* Boot processor state steps */
17a2f1ce 1223static struct cpuhp_step cpuhp_hp_states[] = {
cff7d378
TG
1224 [CPUHP_OFFLINE] = {
1225 .name = "offline",
3c1627e9
TG
1226 .startup.single = NULL,
1227 .teardown.single = NULL,
cff7d378
TG
1228 },
1229#ifdef CONFIG_SMP
1230 [CPUHP_CREATE_THREADS]= {
677f6646 1231 .name = "threads:prepare",
3c1627e9
TG
1232 .startup.single = smpboot_create_threads,
1233 .teardown.single = NULL,
757c989b 1234 .cant_stop = true,
cff7d378 1235 },
00e16c3d 1236 [CPUHP_PERF_PREPARE] = {
3c1627e9
TG
1237 .name = "perf:prepare",
1238 .startup.single = perf_event_init_cpu,
1239 .teardown.single = perf_event_exit_cpu,
00e16c3d 1240 },
7ee681b2 1241 [CPUHP_WORKQUEUE_PREP] = {
3c1627e9
TG
1242 .name = "workqueue:prepare",
1243 .startup.single = workqueue_prepare_cpu,
1244 .teardown.single = NULL,
7ee681b2 1245 },
27590dc1 1246 [CPUHP_HRTIMERS_PREPARE] = {
3c1627e9
TG
1247 .name = "hrtimers:prepare",
1248 .startup.single = hrtimers_prepare_cpu,
1249 .teardown.single = hrtimers_dead_cpu,
27590dc1 1250 },
31487f83 1251 [CPUHP_SMPCFD_PREPARE] = {
677f6646 1252 .name = "smpcfd:prepare",
3c1627e9
TG
1253 .startup.single = smpcfd_prepare_cpu,
1254 .teardown.single = smpcfd_dead_cpu,
31487f83 1255 },
e6d4989a
RW
1256 [CPUHP_RELAY_PREPARE] = {
1257 .name = "relay:prepare",
1258 .startup.single = relay_prepare_cpu,
1259 .teardown.single = NULL,
1260 },
6731d4f1
SAS
1261 [CPUHP_SLAB_PREPARE] = {
1262 .name = "slab:prepare",
1263 .startup.single = slab_prepare_cpu,
1264 .teardown.single = slab_dead_cpu,
31487f83 1265 },
4df83742 1266 [CPUHP_RCUTREE_PREP] = {
677f6646 1267 .name = "RCU/tree:prepare",
3c1627e9
TG
1268 .startup.single = rcutree_prepare_cpu,
1269 .teardown.single = rcutree_dead_cpu,
4df83742 1270 },
4fae16df
RC
1271 /*
1272 * On the tear-down path, timers_dead_cpu() must be invoked
1273 * before blk_mq_queue_reinit_notify() from notify_dead(),
1274 * otherwise a RCU stall occurs.
1275 */
26456f87 1276 [CPUHP_TIMERS_PREPARE] = {
3c1627e9 1277 .name = "timers:dead",
26456f87 1278 .startup.single = timers_prepare_cpu,
3c1627e9 1279 .teardown.single = timers_dead_cpu,
4fae16df 1280 },
d10ef6f9 1281 /* Kicks the plugged cpu into life */
cff7d378
TG
1282 [CPUHP_BRINGUP_CPU] = {
1283 .name = "cpu:bringup",
3c1627e9
TG
1284 .startup.single = bringup_cpu,
1285 .teardown.single = NULL,
757c989b 1286 .cant_stop = true,
4baa0afc 1287 },
d10ef6f9
TG
1288 /* Final state before CPU kills itself */
1289 [CPUHP_AP_IDLE_DEAD] = {
1290 .name = "idle:dead",
1291 },
1292 /*
1293 * Last state before CPU enters the idle loop to die. Transient state
1294 * for synchronization.
1295 */
1296 [CPUHP_AP_OFFLINE] = {
1297 .name = "ap:offline",
1298 .cant_stop = true,
1299 },
9cf7243d
TG
1300 /* First state is scheduler control. Interrupts are disabled */
1301 [CPUHP_AP_SCHED_STARTING] = {
1302 .name = "sched:starting",
3c1627e9
TG
1303 .startup.single = sched_cpu_starting,
1304 .teardown.single = sched_cpu_dying,
9cf7243d 1305 },
4df83742 1306 [CPUHP_AP_RCUTREE_DYING] = {
677f6646 1307 .name = "RCU/tree:dying",
3c1627e9
TG
1308 .startup.single = NULL,
1309 .teardown.single = rcutree_dying_cpu,
4baa0afc 1310 },
46febd37
LJ
1311 [CPUHP_AP_SMPCFD_DYING] = {
1312 .name = "smpcfd:dying",
1313 .startup.single = NULL,
1314 .teardown.single = smpcfd_dying_cpu,
1315 },
d10ef6f9
TG
1316 /* Entry state on starting. Interrupts enabled from here on. Transient
1317 * state for synchronsization */
1318 [CPUHP_AP_ONLINE] = {
1319 .name = "ap:online",
1320 },
17a2f1ce
LJ
1321 /*
1322 * Handled on controll processor until the plugged processor manages
1323 * this itself.
1324 */
1325 [CPUHP_TEARDOWN_CPU] = {
1326 .name = "cpu:teardown",
1327 .startup.single = NULL,
1328 .teardown.single = takedown_cpu,
1329 .cant_stop = true,
1330 },
d10ef6f9 1331 /* Handle smpboot threads park/unpark */
1cf4f629 1332 [CPUHP_AP_SMPBOOT_THREADS] = {
677f6646 1333 .name = "smpboot/threads:online",
3c1627e9
TG
1334 .startup.single = smpboot_unpark_threads,
1335 .teardown.single = NULL,
1cf4f629 1336 },
c5cb83bb
TG
1337 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1338 .name = "irq/affinity:online",
1339 .startup.single = irq_affinity_online_cpu,
1340 .teardown.single = NULL,
1341 },
00e16c3d 1342 [CPUHP_AP_PERF_ONLINE] = {
3c1627e9
TG
1343 .name = "perf:online",
1344 .startup.single = perf_event_init_cpu,
1345 .teardown.single = perf_event_exit_cpu,
00e16c3d 1346 },
7ee681b2 1347 [CPUHP_AP_WORKQUEUE_ONLINE] = {
3c1627e9
TG
1348 .name = "workqueue:online",
1349 .startup.single = workqueue_online_cpu,
1350 .teardown.single = workqueue_offline_cpu,
7ee681b2 1351 },
4df83742 1352 [CPUHP_AP_RCUTREE_ONLINE] = {
677f6646 1353 .name = "RCU/tree:online",
3c1627e9
TG
1354 .startup.single = rcutree_online_cpu,
1355 .teardown.single = rcutree_offline_cpu,
4df83742 1356 },
4baa0afc 1357#endif
d10ef6f9
TG
1358 /*
1359 * The dynamically registered state space is here
1360 */
1361
aaddd7d1
TG
1362#ifdef CONFIG_SMP
1363 /* Last state is scheduler control setting the cpu active */
1364 [CPUHP_AP_ACTIVE] = {
1365 .name = "sched:active",
3c1627e9
TG
1366 .startup.single = sched_cpu_activate,
1367 .teardown.single = sched_cpu_deactivate,
aaddd7d1
TG
1368 },
1369#endif
1370
d10ef6f9 1371 /* CPU is fully up and running. */
4baa0afc
TG
1372 [CPUHP_ONLINE] = {
1373 .name = "online",
3c1627e9
TG
1374 .startup.single = NULL,
1375 .teardown.single = NULL,
4baa0afc
TG
1376 },
1377};
1378
5b7aa87e
TG
1379/* Sanity check for callbacks */
1380static int cpuhp_cb_check(enum cpuhp_state state)
1381{
1382 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1383 return -EINVAL;
1384 return 0;
1385}
1386
dc280d93
TG
1387/*
1388 * Returns a free for dynamic slot assignment of the Online state. The states
1389 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1390 * by having no name assigned.
1391 */
1392static int cpuhp_reserve_state(enum cpuhp_state state)
1393{
4205e478
TG
1394 enum cpuhp_state i, end;
1395 struct cpuhp_step *step;
dc280d93 1396
4205e478
TG
1397 switch (state) {
1398 case CPUHP_AP_ONLINE_DYN:
17a2f1ce 1399 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
4205e478
TG
1400 end = CPUHP_AP_ONLINE_DYN_END;
1401 break;
1402 case CPUHP_BP_PREPARE_DYN:
17a2f1ce 1403 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
4205e478
TG
1404 end = CPUHP_BP_PREPARE_DYN_END;
1405 break;
1406 default:
1407 return -EINVAL;
1408 }
1409
1410 for (i = state; i <= end; i++, step++) {
1411 if (!step->name)
dc280d93
TG
1412 return i;
1413 }
1414 WARN(1, "No more dynamic states available for CPU hotplug\n");
1415 return -ENOSPC;
1416}
1417
1418static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1419 int (*startup)(unsigned int cpu),
1420 int (*teardown)(unsigned int cpu),
1421 bool multi_instance)
5b7aa87e
TG
1422{
1423 /* (Un)Install the callbacks for further cpu hotplug operations */
1424 struct cpuhp_step *sp;
dc280d93 1425 int ret = 0;
5b7aa87e 1426
0c96b273
EB
1427 /*
1428 * If name is NULL, then the state gets removed.
1429 *
1430 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1431 * the first allocation from these dynamic ranges, so the removal
1432 * would trigger a new allocation and clear the wrong (already
1433 * empty) state, leaving the callbacks of the to be cleared state
1434 * dangling, which causes wreckage on the next hotplug operation.
1435 */
1436 if (name && (state == CPUHP_AP_ONLINE_DYN ||
1437 state == CPUHP_BP_PREPARE_DYN)) {
dc280d93
TG
1438 ret = cpuhp_reserve_state(state);
1439 if (ret < 0)
dc434e05 1440 return ret;
dc280d93
TG
1441 state = ret;
1442 }
5b7aa87e 1443 sp = cpuhp_get_step(state);
dc434e05
SAS
1444 if (name && sp->name)
1445 return -EBUSY;
1446
3c1627e9
TG
1447 sp->startup.single = startup;
1448 sp->teardown.single = teardown;
5b7aa87e 1449 sp->name = name;
cf392d10
TG
1450 sp->multi_instance = multi_instance;
1451 INIT_HLIST_HEAD(&sp->list);
dc280d93 1452 return ret;
5b7aa87e
TG
1453}
1454
1455static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1456{
3c1627e9 1457 return cpuhp_get_step(state)->teardown.single;
5b7aa87e
TG
1458}
1459
5b7aa87e
TG
1460/*
1461 * Call the startup/teardown function for a step either on the AP or
1462 * on the current CPU.
1463 */
cf392d10
TG
1464static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1465 struct hlist_node *node)
5b7aa87e 1466{
a724632c 1467 struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e
TG
1468 int ret;
1469
4dddfb5f
PZ
1470 /*
1471 * If there's nothing to do, we done.
1472 * Relies on the union for multi_instance.
1473 */
3c1627e9
TG
1474 if ((bringup && !sp->startup.single) ||
1475 (!bringup && !sp->teardown.single))
5b7aa87e 1476 return 0;
5b7aa87e
TG
1477 /*
1478 * The non AP bound callbacks can fail on bringup. On teardown
1479 * e.g. module removal we crash for now.
1480 */
1cf4f629
TG
1481#ifdef CONFIG_SMP
1482 if (cpuhp_is_ap_state(state))
cf392d10 1483 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1cf4f629 1484 else
96abb968 1485 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1cf4f629 1486#else
96abb968 1487 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1cf4f629 1488#endif
5b7aa87e
TG
1489 BUG_ON(ret && !bringup);
1490 return ret;
1491}
1492
1493/*
1494 * Called from __cpuhp_setup_state on a recoverable failure.
1495 *
1496 * Note: The teardown callbacks for rollback are not allowed to fail!
1497 */
1498static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
cf392d10 1499 struct hlist_node *node)
5b7aa87e
TG
1500{
1501 int cpu;
1502
5b7aa87e
TG
1503 /* Roll back the already executed steps on the other cpus */
1504 for_each_present_cpu(cpu) {
1505 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1506 int cpustate = st->state;
1507
1508 if (cpu >= failedcpu)
1509 break;
1510
1511 /* Did we invoke the startup call on that cpu ? */
1512 if (cpustate >= state)
cf392d10 1513 cpuhp_issue_call(cpu, state, false, node);
5b7aa87e
TG
1514 }
1515}
1516
9805c673
TG
1517int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1518 struct hlist_node *node,
1519 bool invoke)
cf392d10
TG
1520{
1521 struct cpuhp_step *sp;
1522 int cpu;
1523 int ret;
1524
9805c673
TG
1525 lockdep_assert_cpus_held();
1526
cf392d10
TG
1527 sp = cpuhp_get_step(state);
1528 if (sp->multi_instance == false)
1529 return -EINVAL;
1530
dc434e05 1531 mutex_lock(&cpuhp_state_mutex);
cf392d10 1532
3c1627e9 1533 if (!invoke || !sp->startup.multi)
cf392d10
TG
1534 goto add_node;
1535
1536 /*
1537 * Try to call the startup callback for each present cpu
1538 * depending on the hotplug state of the cpu.
1539 */
1540 for_each_present_cpu(cpu) {
1541 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1542 int cpustate = st->state;
1543
1544 if (cpustate < state)
1545 continue;
1546
1547 ret = cpuhp_issue_call(cpu, state, true, node);
1548 if (ret) {
3c1627e9 1549 if (sp->teardown.multi)
cf392d10 1550 cpuhp_rollback_install(cpu, state, node);
dc434e05 1551 goto unlock;
cf392d10
TG
1552 }
1553 }
1554add_node:
1555 ret = 0;
cf392d10 1556 hlist_add_head(node, &sp->list);
dc434e05 1557unlock:
cf392d10 1558 mutex_unlock(&cpuhp_state_mutex);
9805c673
TG
1559 return ret;
1560}
1561
1562int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1563 bool invoke)
1564{
1565 int ret;
1566
1567 cpus_read_lock();
1568 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
8f553c49 1569 cpus_read_unlock();
cf392d10
TG
1570 return ret;
1571}
1572EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1573
5b7aa87e 1574/**
71def423 1575 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
dc280d93
TG
1576 * @state: The state to setup
1577 * @invoke: If true, the startup function is invoked for cpus where
1578 * cpu state >= @state
1579 * @startup: startup callback function
1580 * @teardown: teardown callback function
1581 * @multi_instance: State is set up for multiple instances which get
1582 * added afterwards.
5b7aa87e 1583 *
71def423 1584 * The caller needs to hold cpus read locked while calling this function.
512f0980
BO
1585 * Returns:
1586 * On success:
1587 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1588 * 0 for all other states
1589 * On failure: proper (negative) error code
5b7aa87e 1590 */
71def423
SAS
1591int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1592 const char *name, bool invoke,
1593 int (*startup)(unsigned int cpu),
1594 int (*teardown)(unsigned int cpu),
1595 bool multi_instance)
5b7aa87e
TG
1596{
1597 int cpu, ret = 0;
b9d9d691 1598 bool dynstate;
5b7aa87e 1599
71def423
SAS
1600 lockdep_assert_cpus_held();
1601
5b7aa87e
TG
1602 if (cpuhp_cb_check(state) || !name)
1603 return -EINVAL;
1604
dc434e05 1605 mutex_lock(&cpuhp_state_mutex);
5b7aa87e 1606
dc280d93
TG
1607 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1608 multi_instance);
5b7aa87e 1609
b9d9d691
TG
1610 dynstate = state == CPUHP_AP_ONLINE_DYN;
1611 if (ret > 0 && dynstate) {
1612 state = ret;
1613 ret = 0;
1614 }
1615
dc280d93 1616 if (ret || !invoke || !startup)
5b7aa87e
TG
1617 goto out;
1618
1619 /*
1620 * Try to call the startup callback for each present cpu
1621 * depending on the hotplug state of the cpu.
1622 */
1623 for_each_present_cpu(cpu) {
1624 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1625 int cpustate = st->state;
1626
1627 if (cpustate < state)
1628 continue;
1629
cf392d10 1630 ret = cpuhp_issue_call(cpu, state, true, NULL);
5b7aa87e 1631 if (ret) {
a724632c 1632 if (teardown)
cf392d10
TG
1633 cpuhp_rollback_install(cpu, state, NULL);
1634 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
5b7aa87e
TG
1635 goto out;
1636 }
1637 }
1638out:
dc434e05 1639 mutex_unlock(&cpuhp_state_mutex);
dc280d93
TG
1640 /*
1641 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1642 * dynamically allocated state in case of success.
1643 */
b9d9d691 1644 if (!ret && dynstate)
5b7aa87e
TG
1645 return state;
1646 return ret;
1647}
71def423
SAS
1648EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1649
1650int __cpuhp_setup_state(enum cpuhp_state state,
1651 const char *name, bool invoke,
1652 int (*startup)(unsigned int cpu),
1653 int (*teardown)(unsigned int cpu),
1654 bool multi_instance)
1655{
1656 int ret;
1657
1658 cpus_read_lock();
1659 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1660 teardown, multi_instance);
1661 cpus_read_unlock();
1662 return ret;
1663}
5b7aa87e
TG
1664EXPORT_SYMBOL(__cpuhp_setup_state);
1665
cf392d10
TG
1666int __cpuhp_state_remove_instance(enum cpuhp_state state,
1667 struct hlist_node *node, bool invoke)
1668{
1669 struct cpuhp_step *sp = cpuhp_get_step(state);
1670 int cpu;
1671
1672 BUG_ON(cpuhp_cb_check(state));
1673
1674 if (!sp->multi_instance)
1675 return -EINVAL;
1676
8f553c49 1677 cpus_read_lock();
dc434e05
SAS
1678 mutex_lock(&cpuhp_state_mutex);
1679
cf392d10
TG
1680 if (!invoke || !cpuhp_get_teardown_cb(state))
1681 goto remove;
1682 /*
1683 * Call the teardown callback for each present cpu depending
1684 * on the hotplug state of the cpu. This function is not
1685 * allowed to fail currently!
1686 */
1687 for_each_present_cpu(cpu) {
1688 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1689 int cpustate = st->state;
1690
1691 if (cpustate >= state)
1692 cpuhp_issue_call(cpu, state, false, node);
1693 }
1694
1695remove:
cf392d10
TG
1696 hlist_del(node);
1697 mutex_unlock(&cpuhp_state_mutex);
8f553c49 1698 cpus_read_unlock();
cf392d10
TG
1699
1700 return 0;
1701}
1702EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
dc434e05 1703
5b7aa87e 1704/**
71def423 1705 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
5b7aa87e
TG
1706 * @state: The state to remove
1707 * @invoke: If true, the teardown function is invoked for cpus where
1708 * cpu state >= @state
1709 *
71def423 1710 * The caller needs to hold cpus read locked while calling this function.
5b7aa87e
TG
1711 * The teardown callback is currently not allowed to fail. Think
1712 * about module removal!
1713 */
71def423 1714void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
5b7aa87e 1715{
cf392d10 1716 struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e
TG
1717 int cpu;
1718
1719 BUG_ON(cpuhp_cb_check(state));
1720
71def423 1721 lockdep_assert_cpus_held();
5b7aa87e 1722
dc434e05 1723 mutex_lock(&cpuhp_state_mutex);
cf392d10
TG
1724 if (sp->multi_instance) {
1725 WARN(!hlist_empty(&sp->list),
1726 "Error: Removing state %d which has instances left.\n",
1727 state);
1728 goto remove;
1729 }
1730
a724632c 1731 if (!invoke || !cpuhp_get_teardown_cb(state))
5b7aa87e
TG
1732 goto remove;
1733
1734 /*
1735 * Call the teardown callback for each present cpu depending
1736 * on the hotplug state of the cpu. This function is not
1737 * allowed to fail currently!
1738 */
1739 for_each_present_cpu(cpu) {
1740 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1741 int cpustate = st->state;
1742
1743 if (cpustate >= state)
cf392d10 1744 cpuhp_issue_call(cpu, state, false, NULL);
5b7aa87e
TG
1745 }
1746remove:
cf392d10 1747 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
dc434e05 1748 mutex_unlock(&cpuhp_state_mutex);
71def423
SAS
1749}
1750EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
1751
1752void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1753{
1754 cpus_read_lock();
1755 __cpuhp_remove_state_cpuslocked(state, invoke);
8f553c49 1756 cpus_read_unlock();
5b7aa87e
TG
1757}
1758EXPORT_SYMBOL(__cpuhp_remove_state);
1759
98f8cdce
TG
1760#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1761static ssize_t show_cpuhp_state(struct device *dev,
1762 struct device_attribute *attr, char *buf)
1763{
1764 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1765
1766 return sprintf(buf, "%d\n", st->state);
1767}
1768static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1769
757c989b
TG
1770static ssize_t write_cpuhp_target(struct device *dev,
1771 struct device_attribute *attr,
1772 const char *buf, size_t count)
1773{
1774 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1775 struct cpuhp_step *sp;
1776 int target, ret;
1777
1778 ret = kstrtoint(buf, 10, &target);
1779 if (ret)
1780 return ret;
1781
1782#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1783 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1784 return -EINVAL;
1785#else
1786 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1787 return -EINVAL;
1788#endif
1789
1790 ret = lock_device_hotplug_sysfs();
1791 if (ret)
1792 return ret;
1793
1794 mutex_lock(&cpuhp_state_mutex);
1795 sp = cpuhp_get_step(target);
1796 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1797 mutex_unlock(&cpuhp_state_mutex);
1798 if (ret)
40da1b11 1799 goto out;
757c989b
TG
1800
1801 if (st->state < target)
1802 ret = do_cpu_up(dev->id, target);
1803 else
1804 ret = do_cpu_down(dev->id, target);
40da1b11 1805out:
757c989b
TG
1806 unlock_device_hotplug();
1807 return ret ? ret : count;
1808}
1809
98f8cdce
TG
1810static ssize_t show_cpuhp_target(struct device *dev,
1811 struct device_attribute *attr, char *buf)
1812{
1813 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1814
1815 return sprintf(buf, "%d\n", st->target);
1816}
757c989b 1817static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
98f8cdce 1818
1db49484
PZ
1819
1820static ssize_t write_cpuhp_fail(struct device *dev,
1821 struct device_attribute *attr,
1822 const char *buf, size_t count)
1823{
1824 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1825 struct cpuhp_step *sp;
1826 int fail, ret;
1827
1828 ret = kstrtoint(buf, 10, &fail);
1829 if (ret)
1830 return ret;
1831
1832 /*
1833 * Cannot fail STARTING/DYING callbacks.
1834 */
1835 if (cpuhp_is_atomic_state(fail))
1836 return -EINVAL;
1837
1838 /*
1839 * Cannot fail anything that doesn't have callbacks.
1840 */
1841 mutex_lock(&cpuhp_state_mutex);
1842 sp = cpuhp_get_step(fail);
1843 if (!sp->startup.single && !sp->teardown.single)
1844 ret = -EINVAL;
1845 mutex_unlock(&cpuhp_state_mutex);
1846 if (ret)
1847 return ret;
1848
1849 st->fail = fail;
1850
1851 return count;
1852}
1853
1854static ssize_t show_cpuhp_fail(struct device *dev,
1855 struct device_attribute *attr, char *buf)
1856{
1857 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1858
1859 return sprintf(buf, "%d\n", st->fail);
1860}
1861
1862static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
1863
98f8cdce
TG
1864static struct attribute *cpuhp_cpu_attrs[] = {
1865 &dev_attr_state.attr,
1866 &dev_attr_target.attr,
1db49484 1867 &dev_attr_fail.attr,
98f8cdce
TG
1868 NULL
1869};
1870
993647a2 1871static const struct attribute_group cpuhp_cpu_attr_group = {
98f8cdce
TG
1872 .attrs = cpuhp_cpu_attrs,
1873 .name = "hotplug",
1874 NULL
1875};
1876
1877static ssize_t show_cpuhp_states(struct device *dev,
1878 struct device_attribute *attr, char *buf)
1879{
1880 ssize_t cur, res = 0;
1881 int i;
1882
1883 mutex_lock(&cpuhp_state_mutex);
757c989b 1884 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
98f8cdce
TG
1885 struct cpuhp_step *sp = cpuhp_get_step(i);
1886
1887 if (sp->name) {
1888 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1889 buf += cur;
1890 res += cur;
1891 }
1892 }
1893 mutex_unlock(&cpuhp_state_mutex);
1894 return res;
1895}
1896static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1897
1898static struct attribute *cpuhp_cpu_root_attrs[] = {
1899 &dev_attr_states.attr,
1900 NULL
1901};
1902
993647a2 1903static const struct attribute_group cpuhp_cpu_root_attr_group = {
98f8cdce
TG
1904 .attrs = cpuhp_cpu_root_attrs,
1905 .name = "hotplug",
1906 NULL
1907};
1908
1909static int __init cpuhp_sysfs_init(void)
1910{
1911 int cpu, ret;
1912
1913 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
1914 &cpuhp_cpu_root_attr_group);
1915 if (ret)
1916 return ret;
1917
1918 for_each_possible_cpu(cpu) {
1919 struct device *dev = get_cpu_device(cpu);
1920
1921 if (!dev)
1922 continue;
1923 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
1924 if (ret)
1925 return ret;
1926 }
1927 return 0;
1928}
1929device_initcall(cpuhp_sysfs_init);
1930#endif
1931
e56b3bc7
LT
1932/*
1933 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1934 * represents all NR_CPUS bits binary values of 1<<nr.
1935 *
e0b582ec 1936 * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc7
LT
1937 * mask value that has a single bit set only.
1938 */
b8d317d1 1939
e56b3bc7 1940/* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e 1941#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
e56b3bc7
LT
1942#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1943#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1944#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d1 1945
e56b3bc7
LT
1946const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1947
1948 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1949 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1950#if BITS_PER_LONG > 32
1951 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1952 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
b8d317d1
MT
1953#endif
1954};
e56b3bc7 1955EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a3
RR
1956
1957const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1958EXPORT_SYMBOL(cpu_all_bits);
b3199c02
RR
1959
1960#ifdef CONFIG_INIT_ALL_POSSIBLE
4b804c85 1961struct cpumask __cpu_possible_mask __read_mostly
c4c54dd1 1962 = {CPU_BITS_ALL};
b3199c02 1963#else
4b804c85 1964struct cpumask __cpu_possible_mask __read_mostly;
b3199c02 1965#endif
4b804c85 1966EXPORT_SYMBOL(__cpu_possible_mask);
b3199c02 1967
4b804c85
RV
1968struct cpumask __cpu_online_mask __read_mostly;
1969EXPORT_SYMBOL(__cpu_online_mask);
b3199c02 1970
4b804c85
RV
1971struct cpumask __cpu_present_mask __read_mostly;
1972EXPORT_SYMBOL(__cpu_present_mask);
b3199c02 1973
4b804c85
RV
1974struct cpumask __cpu_active_mask __read_mostly;
1975EXPORT_SYMBOL(__cpu_active_mask);
3fa41520 1976
3fa41520
RR
1977void init_cpu_present(const struct cpumask *src)
1978{
c4c54dd1 1979 cpumask_copy(&__cpu_present_mask, src);
3fa41520
RR
1980}
1981
1982void init_cpu_possible(const struct cpumask *src)
1983{
c4c54dd1 1984 cpumask_copy(&__cpu_possible_mask, src);
3fa41520
RR
1985}
1986
1987void init_cpu_online(const struct cpumask *src)
1988{
c4c54dd1 1989 cpumask_copy(&__cpu_online_mask, src);
3fa41520 1990}
cff7d378
TG
1991
1992/*
1993 * Activate the first processor.
1994 */
1995void __init boot_cpu_init(void)
1996{
1997 int cpu = smp_processor_id();
1998
1999 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2000 set_cpu_online(cpu, true);
2001 set_cpu_active(cpu, true);
2002 set_cpu_present(cpu, true);
2003 set_cpu_possible(cpu, true);
8ce371f9
PZ
2004
2005#ifdef CONFIG_SMP
2006 __boot_cpu_id = cpu;
2007#endif
cff7d378
TG
2008}
2009
2010/*
2011 * Must be called _AFTER_ setting up the per_cpu areas
2012 */
2013void __init boot_cpu_state_init(void)
2014{
2015 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
2016}