cpu/hotplug: Split out the state walk into functions
[linux-2.6-block.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
cb79295e
AV
13#include <linux/oom.h>
14#include <linux/rcupdate.h>
9984de1a 15#include <linux/export.h>
e4cc2f87 16#include <linux/bug.h>
1da177e4
LT
17#include <linux/kthread.h>
18#include <linux/stop_machine.h>
81615b62 19#include <linux/mutex.h>
5a0e3ad6 20#include <linux/gfp.h>
79cfbdfa 21#include <linux/suspend.h>
a19423b9 22#include <linux/lockdep.h>
345527b1 23#include <linux/tick.h>
a8994181 24#include <linux/irq.h>
cff7d378 25
bb3632c6 26#include <trace/events/power.h>
cff7d378
TG
27#define CREATE_TRACE_POINTS
28#include <trace/events/cpuhp.h>
1da177e4 29
38498a67
TG
30#include "smpboot.h"
31
cff7d378
TG
32/**
33 * cpuhp_cpu_state - Per cpu hotplug state storage
34 * @state: The current cpu state
35 * @target: The target state
36 */
37struct cpuhp_cpu_state {
38 enum cpuhp_state state;
39 enum cpuhp_state target;
40};
41
42static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
43
44/**
45 * cpuhp_step - Hotplug state machine step
46 * @name: Name of the step
47 * @startup: Startup function of the step
48 * @teardown: Teardown function of the step
49 * @skip_onerr: Do not invoke the functions on error rollback
50 * Will go away once the notifiers are gone
757c989b 51 * @cant_stop: Bringup/teardown can't be stopped at this step
cff7d378
TG
52 */
53struct cpuhp_step {
54 const char *name;
55 int (*startup)(unsigned int cpu);
56 int (*teardown)(unsigned int cpu);
57 bool skip_onerr;
757c989b 58 bool cant_stop;
cff7d378
TG
59};
60
98f8cdce 61static DEFINE_MUTEX(cpuhp_state_mutex);
cff7d378 62static struct cpuhp_step cpuhp_bp_states[];
4baa0afc 63static struct cpuhp_step cpuhp_ap_states[];
cff7d378
TG
64
65/**
66 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
67 * @cpu: The cpu for which the callback should be invoked
68 * @step: The step in the state machine
69 * @cb: The callback function to invoke
70 *
71 * Called from cpu hotplug and from the state register machinery
72 */
73static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step,
74 int (*cb)(unsigned int))
75{
76 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
77 int ret = 0;
78
79 if (cb) {
80 trace_cpuhp_enter(cpu, st->target, step, cb);
81 ret = cb(cpu);
82 trace_cpuhp_exit(cpu, st->state, step, ret);
83 }
84 return ret;
85}
86
98a79d6a 87#ifdef CONFIG_SMP
b3199c02 88/* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa953877 89static DEFINE_MUTEX(cpu_add_remove_lock);
090e77c3
TG
90bool cpuhp_tasks_frozen;
91EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
1da177e4 92
79a6cdeb 93/*
93ae4f97
SB
94 * The following two APIs (cpu_maps_update_begin/done) must be used when
95 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
96 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
97 * hotplug callback (un)registration performed using __register_cpu_notifier()
98 * or __unregister_cpu_notifier().
79a6cdeb
LJ
99 */
100void cpu_maps_update_begin(void)
101{
102 mutex_lock(&cpu_add_remove_lock);
103}
93ae4f97 104EXPORT_SYMBOL(cpu_notifier_register_begin);
79a6cdeb
LJ
105
106void cpu_maps_update_done(void)
107{
108 mutex_unlock(&cpu_add_remove_lock);
109}
93ae4f97 110EXPORT_SYMBOL(cpu_notifier_register_done);
79a6cdeb 111
5c113fbe 112static RAW_NOTIFIER_HEAD(cpu_chain);
1da177e4 113
e3920fb4
RW
114/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
115 * Should always be manipulated under cpu_add_remove_lock
116 */
117static int cpu_hotplug_disabled;
118
79a6cdeb
LJ
119#ifdef CONFIG_HOTPLUG_CPU
120
d221938c
GS
121static struct {
122 struct task_struct *active_writer;
87af9e7f
DH
123 /* wait queue to wake up the active_writer */
124 wait_queue_head_t wq;
125 /* verifies that no writer will get active while readers are active */
126 struct mutex lock;
d221938c
GS
127 /*
128 * Also blocks the new readers during
129 * an ongoing cpu hotplug operation.
130 */
87af9e7f 131 atomic_t refcount;
a19423b9
GS
132
133#ifdef CONFIG_DEBUG_LOCK_ALLOC
134 struct lockdep_map dep_map;
135#endif
31950eb6
LT
136} cpu_hotplug = {
137 .active_writer = NULL,
87af9e7f 138 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
31950eb6 139 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
a19423b9
GS
140#ifdef CONFIG_DEBUG_LOCK_ALLOC
141 .dep_map = {.name = "cpu_hotplug.lock" },
142#endif
31950eb6 143};
d221938c 144
a19423b9
GS
145/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
146#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
dd56af42
PM
147#define cpuhp_lock_acquire_tryread() \
148 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
a19423b9
GS
149#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
150#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
151
62db99f4 152
86ef5c9a 153void get_online_cpus(void)
a9d9baa1 154{
d221938c
GS
155 might_sleep();
156 if (cpu_hotplug.active_writer == current)
aa953877 157 return;
a19423b9 158 cpuhp_lock_acquire_read();
d221938c 159 mutex_lock(&cpu_hotplug.lock);
87af9e7f 160 atomic_inc(&cpu_hotplug.refcount);
d221938c 161 mutex_unlock(&cpu_hotplug.lock);
a9d9baa1 162}
86ef5c9a 163EXPORT_SYMBOL_GPL(get_online_cpus);
90d45d17 164
86ef5c9a 165void put_online_cpus(void)
a9d9baa1 166{
87af9e7f
DH
167 int refcount;
168
d221938c 169 if (cpu_hotplug.active_writer == current)
aa953877 170 return;
075663d1 171
87af9e7f
DH
172 refcount = atomic_dec_return(&cpu_hotplug.refcount);
173 if (WARN_ON(refcount < 0)) /* try to fix things up */
174 atomic_inc(&cpu_hotplug.refcount);
175
176 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
177 wake_up(&cpu_hotplug.wq);
075663d1 178
a19423b9 179 cpuhp_lock_release();
d221938c 180
a9d9baa1 181}
86ef5c9a 182EXPORT_SYMBOL_GPL(put_online_cpus);
a9d9baa1 183
d221938c
GS
184/*
185 * This ensures that the hotplug operation can begin only when the
186 * refcount goes to zero.
187 *
188 * Note that during a cpu-hotplug operation, the new readers, if any,
189 * will be blocked by the cpu_hotplug.lock
190 *
d2ba7e2a
ON
191 * Since cpu_hotplug_begin() is always called after invoking
192 * cpu_maps_update_begin(), we can be sure that only one writer is active.
d221938c
GS
193 *
194 * Note that theoretically, there is a possibility of a livelock:
195 * - Refcount goes to zero, last reader wakes up the sleeping
196 * writer.
197 * - Last reader unlocks the cpu_hotplug.lock.
198 * - A new reader arrives at this moment, bumps up the refcount.
199 * - The writer acquires the cpu_hotplug.lock finds the refcount
200 * non zero and goes to sleep again.
201 *
202 * However, this is very difficult to achieve in practice since
86ef5c9a 203 * get_online_cpus() not an api which is called all that often.
d221938c
GS
204 *
205 */
b9d10be7 206void cpu_hotplug_begin(void)
d221938c 207{
87af9e7f 208 DEFINE_WAIT(wait);
d2ba7e2a 209
87af9e7f 210 cpu_hotplug.active_writer = current;
a19423b9 211 cpuhp_lock_acquire();
87af9e7f 212
d2ba7e2a
ON
213 for (;;) {
214 mutex_lock(&cpu_hotplug.lock);
87af9e7f
DH
215 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
216 if (likely(!atomic_read(&cpu_hotplug.refcount)))
217 break;
d221938c
GS
218 mutex_unlock(&cpu_hotplug.lock);
219 schedule();
d221938c 220 }
87af9e7f 221 finish_wait(&cpu_hotplug.wq, &wait);
d221938c
GS
222}
223
b9d10be7 224void cpu_hotplug_done(void)
d221938c
GS
225{
226 cpu_hotplug.active_writer = NULL;
227 mutex_unlock(&cpu_hotplug.lock);
a19423b9 228 cpuhp_lock_release();
d221938c 229}
79a6cdeb 230
16e53dbf
SB
231/*
232 * Wait for currently running CPU hotplug operations to complete (if any) and
233 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
234 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
235 * hotplug path before performing hotplug operations. So acquiring that lock
236 * guarantees mutual exclusion from any currently running hotplug operations.
237 */
238void cpu_hotplug_disable(void)
239{
240 cpu_maps_update_begin();
89af7ba5 241 cpu_hotplug_disabled++;
16e53dbf
SB
242 cpu_maps_update_done();
243}
32145c46 244EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
16e53dbf
SB
245
246void cpu_hotplug_enable(void)
247{
248 cpu_maps_update_begin();
89af7ba5 249 WARN_ON(--cpu_hotplug_disabled < 0);
16e53dbf
SB
250 cpu_maps_update_done();
251}
32145c46 252EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
b9d10be7 253#endif /* CONFIG_HOTPLUG_CPU */
79a6cdeb 254
1da177e4 255/* Need to know about CPUs going up/down? */
71cf5aee 256int register_cpu_notifier(struct notifier_block *nb)
1da177e4 257{
bd5349cf 258 int ret;
d221938c 259 cpu_maps_update_begin();
bd5349cf 260 ret = raw_notifier_chain_register(&cpu_chain, nb);
d221938c 261 cpu_maps_update_done();
bd5349cf 262 return ret;
1da177e4 263}
65edc68c 264
71cf5aee 265int __register_cpu_notifier(struct notifier_block *nb)
93ae4f97
SB
266{
267 return raw_notifier_chain_register(&cpu_chain, nb);
268}
269
090e77c3 270static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
e9fb7631
AM
271 int *nr_calls)
272{
090e77c3
TG
273 unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
274 void *hcpu = (void *)(long)cpu;
275
e6bde73b
AM
276 int ret;
277
090e77c3 278 ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
e9fb7631 279 nr_calls);
e6bde73b
AM
280
281 return notifier_to_errno(ret);
e9fb7631
AM
282}
283
090e77c3 284static int cpu_notify(unsigned long val, unsigned int cpu)
e9fb7631 285{
090e77c3 286 return __cpu_notify(val, cpu, -1, NULL);
e9fb7631
AM
287}
288
ba997462
TG
289/* Notifier wrappers for transitioning to state machine */
290static int notify_prepare(unsigned int cpu)
291{
292 int nr_calls = 0;
293 int ret;
294
295 ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
296 if (ret) {
297 nr_calls--;
298 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
299 __func__, cpu);
300 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
301 }
302 return ret;
303}
304
305static int notify_online(unsigned int cpu)
306{
307 cpu_notify(CPU_ONLINE, cpu);
308 return 0;
309}
310
4baa0afc
TG
311static int notify_starting(unsigned int cpu)
312{
313 cpu_notify(CPU_STARTING, cpu);
314 return 0;
315}
316
ba997462
TG
317static int bringup_cpu(unsigned int cpu)
318{
319 struct task_struct *idle = idle_thread_get(cpu);
320 int ret;
321
322 /* Arch-specific enabling code. */
323 ret = __cpu_up(cpu, idle);
324 if (ret) {
325 cpu_notify(CPU_UP_CANCELED, cpu);
326 return ret;
327 }
328 BUG_ON(!cpu_online(cpu));
329 return 0;
330}
331
2e1a3483
TG
332/*
333 * Hotplug state machine related functions
334 */
335static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st,
336 struct cpuhp_step *steps)
337{
338 for (st->state++; st->state < st->target; st->state++) {
339 struct cpuhp_step *step = steps + st->state;
340
341 if (!step->skip_onerr)
342 cpuhp_invoke_callback(cpu, st->state, step->startup);
343 }
344}
345
346static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
347 struct cpuhp_step *steps, enum cpuhp_state target)
348{
349 enum cpuhp_state prev_state = st->state;
350 int ret = 0;
351
352 for (; st->state > target; st->state--) {
353 struct cpuhp_step *step = steps + st->state;
354
355 ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
356 if (ret) {
357 st->target = prev_state;
358 undo_cpu_down(cpu, st, steps);
359 break;
360 }
361 }
362 return ret;
363}
364
365static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st,
366 struct cpuhp_step *steps)
367{
368 for (st->state--; st->state > st->target; st->state--) {
369 struct cpuhp_step *step = steps + st->state;
370
371 if (!step->skip_onerr)
372 cpuhp_invoke_callback(cpu, st->state, step->teardown);
373 }
374}
375
376static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
377 struct cpuhp_step *steps, enum cpuhp_state target)
378{
379 enum cpuhp_state prev_state = st->state;
380 int ret = 0;
381
382 while (st->state < target) {
383 struct cpuhp_step *step;
384
385 st->state++;
386 step = steps + st->state;
387 ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
388 if (ret) {
389 st->target = prev_state;
390 undo_cpu_up(cpu, st, steps);
391 break;
392 }
393 }
394 return ret;
395}
396
00b9b0af 397#ifdef CONFIG_HOTPLUG_CPU
1da177e4 398EXPORT_SYMBOL(register_cpu_notifier);
93ae4f97 399EXPORT_SYMBOL(__register_cpu_notifier);
71cf5aee 400void unregister_cpu_notifier(struct notifier_block *nb)
1da177e4 401{
d221938c 402 cpu_maps_update_begin();
bd5349cf 403 raw_notifier_chain_unregister(&cpu_chain, nb);
d221938c 404 cpu_maps_update_done();
1da177e4
LT
405}
406EXPORT_SYMBOL(unregister_cpu_notifier);
407
71cf5aee 408void __unregister_cpu_notifier(struct notifier_block *nb)
93ae4f97
SB
409{
410 raw_notifier_chain_unregister(&cpu_chain, nb);
411}
412EXPORT_SYMBOL(__unregister_cpu_notifier);
413
e4cc2f87
AV
414/**
415 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
416 * @cpu: a CPU id
417 *
418 * This function walks all processes, finds a valid mm struct for each one and
419 * then clears a corresponding bit in mm's cpumask. While this all sounds
420 * trivial, there are various non-obvious corner cases, which this function
421 * tries to solve in a safe manner.
422 *
423 * Also note that the function uses a somewhat relaxed locking scheme, so it may
424 * be called only for an already offlined CPU.
425 */
cb79295e
AV
426void clear_tasks_mm_cpumask(int cpu)
427{
428 struct task_struct *p;
429
430 /*
431 * This function is called after the cpu is taken down and marked
432 * offline, so its not like new tasks will ever get this cpu set in
433 * their mm mask. -- Peter Zijlstra
434 * Thus, we may use rcu_read_lock() here, instead of grabbing
435 * full-fledged tasklist_lock.
436 */
e4cc2f87 437 WARN_ON(cpu_online(cpu));
cb79295e
AV
438 rcu_read_lock();
439 for_each_process(p) {
440 struct task_struct *t;
441
e4cc2f87
AV
442 /*
443 * Main thread might exit, but other threads may still have
444 * a valid mm. Find one.
445 */
cb79295e
AV
446 t = find_lock_task_mm(p);
447 if (!t)
448 continue;
449 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
450 task_unlock(t);
451 }
452 rcu_read_unlock();
453}
454
b728ca06 455static inline void check_for_tasks(int dead_cpu)
1da177e4 456{
b728ca06 457 struct task_struct *g, *p;
1da177e4 458
a75a6068
ON
459 read_lock(&tasklist_lock);
460 for_each_process_thread(g, p) {
b728ca06
KT
461 if (!p->on_rq)
462 continue;
463 /*
464 * We do the check with unlocked task_rq(p)->lock.
465 * Order the reading to do not warn about a task,
466 * which was running on this cpu in the past, and
467 * it's just been woken on another cpu.
468 */
469 rmb();
470 if (task_cpu(p) != dead_cpu)
471 continue;
472
473 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
474 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
a75a6068
ON
475 }
476 read_unlock(&tasklist_lock);
1da177e4
LT
477}
478
98458172
TG
479static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
480{
481 BUG_ON(cpu_notify(val, cpu));
482}
483
484static int notify_down_prepare(unsigned int cpu)
485{
486 int err, nr_calls = 0;
487
488 err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
489 if (err) {
490 nr_calls--;
491 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
492 pr_warn("%s: attempt to take down CPU %u failed\n",
493 __func__, cpu);
494 }
495 return err;
496}
497
4baa0afc
TG
498static int notify_dying(unsigned int cpu)
499{
500 cpu_notify(CPU_DYING, cpu);
501 return 0;
502}
503
1da177e4 504/* Take this CPU down. */
71cf5aee 505static int take_cpu_down(void *_param)
1da177e4 506{
4baa0afc
TG
507 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
508 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
090e77c3 509 int err, cpu = smp_processor_id();
1da177e4 510
1da177e4
LT
511 /* Ensure this CPU doesn't handle any more interrupts. */
512 err = __cpu_disable();
513 if (err < 0)
f3705136 514 return err;
1da177e4 515
4baa0afc
TG
516 /* Invoke the former CPU_DYING callbacks */
517 for (; st->state > target; st->state--) {
518 struct cpuhp_step *step = cpuhp_ap_states + st->state;
519
520 cpuhp_invoke_callback(cpu, st->state, step->teardown);
521 }
52c063d1
TG
522 /* Give up timekeeping duties */
523 tick_handover_do_timer();
14e568e7 524 /* Park the stopper thread */
090e77c3 525 stop_machine_park(cpu);
f3705136 526 return 0;
1da177e4
LT
527}
528
98458172 529static int takedown_cpu(unsigned int cpu)
1da177e4 530{
98458172 531 int err;
1da177e4 532
6acce3ef
PZ
533 /*
534 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
535 * and RCU users of this state to go away such that all new such users
536 * will observe it.
537 *
538 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
779de6ce 539 * not imply sync_sched(), so wait for both.
106dd5af
M
540 *
541 * Do sync before park smpboot threads to take care the rcu boost case.
6acce3ef 542 */
779de6ce
PM
543 if (IS_ENABLED(CONFIG_PREEMPT))
544 synchronize_rcu_mult(call_rcu, call_rcu_sched);
545 else
546 synchronize_rcu();
6acce3ef
PZ
547
548 /*
a8994181
TG
549 * Prevent irq alloc/free while the dying cpu reorganizes the
550 * interrupt affinities.
6acce3ef 551 */
a8994181 552 irq_lock_sparse();
6acce3ef 553
a8994181
TG
554 /*
555 * So now all preempt/rcu users must observe !cpu_active().
556 */
090e77c3 557 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
04321587 558 if (err) {
1da177e4 559 /* CPU didn't die: tell everyone. Can't complain. */
090e77c3 560 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
a8994181 561 irq_unlock_sparse();
98458172 562 return err;
8fa1d7d3 563 }
04321587 564 BUG_ON(cpu_online(cpu));
1da177e4 565
48c5ccae
PZ
566 /*
567 * The migration_call() CPU_DYING callback will have removed all
568 * runnable tasks from the cpu, there's only the idle task left now
569 * that the migration thread is done doing the stop_machine thing.
51a96c77
PZ
570 *
571 * Wait for the stop thread to go away.
48c5ccae 572 */
528a25b0 573 while (!per_cpu(cpu_dead_idle, cpu))
51a96c77 574 cpu_relax();
528a25b0
PM
575 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
576 per_cpu(cpu_dead_idle, cpu) = false;
1da177e4 577
a8994181
TG
578 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
579 irq_unlock_sparse();
580
345527b1 581 hotplug_cpu__broadcast_tick_pull(cpu);
1da177e4
LT
582 /* This actually kills the CPU. */
583 __cpu_die(cpu);
584
a49b116d 585 tick_cleanup_dead_cpu(cpu);
98458172
TG
586 return 0;
587}
1da177e4 588
98458172
TG
589static int notify_dead(unsigned int cpu)
590{
591 cpu_notify_nofail(CPU_DEAD, cpu);
1da177e4 592 check_for_tasks(cpu);
98458172
TG
593 return 0;
594}
595
cff7d378
TG
596#else
597#define notify_down_prepare NULL
598#define takedown_cpu NULL
599#define notify_dead NULL
4baa0afc 600#define notify_dying NULL
cff7d378
TG
601#endif
602
603#ifdef CONFIG_HOTPLUG_CPU
cff7d378 604
98458172 605/* Requires cpu_add_remove_lock to be held */
af1f4045
TG
606static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
607 enum cpuhp_state target)
98458172 608{
cff7d378
TG
609 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
610 int prev_state, ret = 0;
611 bool hasdied = false;
98458172
TG
612
613 if (num_online_cpus() == 1)
614 return -EBUSY;
615
757c989b 616 if (!cpu_present(cpu))
98458172
TG
617 return -EINVAL;
618
619 cpu_hotplug_begin();
620
621 cpuhp_tasks_frozen = tasks_frozen;
622
cff7d378 623 prev_state = st->state;
af1f4045 624 st->target = target;
2e1a3483 625 ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
98458172 626
cff7d378 627 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
1da177e4 628
d221938c 629 cpu_hotplug_done();
cff7d378
TG
630 /* This post dead nonsense must die */
631 if (!ret && hasdied)
090e77c3 632 cpu_notify_nofail(CPU_POST_DEAD, cpu);
cff7d378 633 return ret;
e3920fb4
RW
634}
635
af1f4045 636static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
e3920fb4 637{
9ea09af3 638 int err;
e3920fb4 639
d221938c 640 cpu_maps_update_begin();
e761b772
MK
641
642 if (cpu_hotplug_disabled) {
e3920fb4 643 err = -EBUSY;
e761b772
MK
644 goto out;
645 }
646
af1f4045 647 err = _cpu_down(cpu, 0, target);
e3920fb4 648
e761b772 649out:
d221938c 650 cpu_maps_update_done();
1da177e4
LT
651 return err;
652}
af1f4045
TG
653int cpu_down(unsigned int cpu)
654{
655 return do_cpu_down(cpu, CPUHP_OFFLINE);
656}
b62b8ef9 657EXPORT_SYMBOL(cpu_down);
1da177e4
LT
658#endif /*CONFIG_HOTPLUG_CPU*/
659
4baa0afc
TG
660/**
661 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
662 * @cpu: cpu that just started
663 *
664 * This function calls the cpu_chain notifiers with CPU_STARTING.
665 * It must be called by the arch code on the new cpu, before the new cpu
666 * enables interrupts and before the "boot" cpu returns from __cpu_up().
667 */
668void notify_cpu_starting(unsigned int cpu)
669{
670 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
671 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
672
673 while (st->state < target) {
674 struct cpuhp_step *step;
675
676 st->state++;
677 step = cpuhp_ap_states + st->state;
678 cpuhp_invoke_callback(cpu, st->state, step->startup);
679 }
680}
681
949338e3
TG
682/*
683 * Called from the idle task. We need to set active here, so we can kick off
684 * the stopper thread.
685 */
686static int cpuhp_set_cpu_active(unsigned int cpu)
687{
688 /* The cpu is marked online, set it active now */
689 set_cpu_active(cpu, true);
690 /* Unpark the stopper thread */
691 stop_machine_unpark(cpu);
692 return 0;
693}
694
e3920fb4 695/* Requires cpu_add_remove_lock to be held */
af1f4045 696static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1da177e4 697{
cff7d378 698 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
3bb5d2ee 699 struct task_struct *idle;
2e1a3483 700 int ret = 0;
1da177e4 701
d221938c 702 cpu_hotplug_begin();
38498a67 703
757c989b 704 if (!cpu_present(cpu)) {
5e5041f3
YI
705 ret = -EINVAL;
706 goto out;
707 }
708
757c989b
TG
709 /*
710 * The caller of do_cpu_up might have raced with another
711 * caller. Ignore it for now.
712 */
713 if (st->state >= target)
38498a67 714 goto out;
757c989b
TG
715
716 if (st->state == CPUHP_OFFLINE) {
717 /* Let it fail before we try to bring the cpu up */
718 idle = idle_thread_get(cpu);
719 if (IS_ERR(idle)) {
720 ret = PTR_ERR(idle);
721 goto out;
722 }
3bb5d2ee 723 }
38498a67 724
ba997462
TG
725 cpuhp_tasks_frozen = tasks_frozen;
726
af1f4045 727 st->target = target;
2e1a3483 728 ret = cpuhp_up_callbacks(cpu, st, cpuhp_bp_states, target);
38498a67 729out:
d221938c 730 cpu_hotplug_done();
e3920fb4
RW
731 return ret;
732}
733
af1f4045 734static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
e3920fb4
RW
735{
736 int err = 0;
cf23422b 737
e0b582ec 738 if (!cpu_possible(cpu)) {
84117da5
FF
739 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
740 cpu);
87d5e023 741#if defined(CONFIG_IA64)
84117da5 742 pr_err("please check additional_cpus= boot parameter\n");
73e753a5
KH
743#endif
744 return -EINVAL;
745 }
e3920fb4 746
01b0f197
TK
747 err = try_online_node(cpu_to_node(cpu));
748 if (err)
749 return err;
cf23422b 750
d221938c 751 cpu_maps_update_begin();
e761b772
MK
752
753 if (cpu_hotplug_disabled) {
e3920fb4 754 err = -EBUSY;
e761b772
MK
755 goto out;
756 }
757
af1f4045 758 err = _cpu_up(cpu, 0, target);
e761b772 759out:
d221938c 760 cpu_maps_update_done();
e3920fb4
RW
761 return err;
762}
af1f4045
TG
763
764int cpu_up(unsigned int cpu)
765{
766 return do_cpu_up(cpu, CPUHP_ONLINE);
767}
a513f6ba 768EXPORT_SYMBOL_GPL(cpu_up);
e3920fb4 769
f3de4be9 770#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 771static cpumask_var_t frozen_cpus;
e3920fb4
RW
772
773int disable_nonboot_cpus(void)
774{
e9a5f426 775 int cpu, first_cpu, error = 0;
e3920fb4 776
d221938c 777 cpu_maps_update_begin();
e0b582ec 778 first_cpu = cpumask_first(cpu_online_mask);
9ee349ad
XF
779 /*
780 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb4
RW
781 * with the userspace trying to use the CPU hotplug at the same time
782 */
e0b582ec 783 cpumask_clear(frozen_cpus);
6ad4c188 784
84117da5 785 pr_info("Disabling non-boot CPUs ...\n");
e3920fb4
RW
786 for_each_online_cpu(cpu) {
787 if (cpu == first_cpu)
788 continue;
bb3632c6 789 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
af1f4045 790 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
bb3632c6 791 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
feae3203 792 if (!error)
e0b582ec 793 cpumask_set_cpu(cpu, frozen_cpus);
feae3203 794 else {
84117da5 795 pr_err("Error taking CPU%d down: %d\n", cpu, error);
e3920fb4
RW
796 break;
797 }
798 }
86886e55 799
89af7ba5 800 if (!error)
e3920fb4 801 BUG_ON(num_online_cpus() > 1);
89af7ba5 802 else
84117da5 803 pr_err("Non-boot CPUs are not disabled\n");
89af7ba5
VK
804
805 /*
806 * Make sure the CPUs won't be enabled by someone else. We need to do
807 * this even in case of failure as all disable_nonboot_cpus() users are
808 * supposed to do enable_nonboot_cpus() on the failure path.
809 */
810 cpu_hotplug_disabled++;
811
d221938c 812 cpu_maps_update_done();
e3920fb4
RW
813 return error;
814}
815
d0af9eed
SS
816void __weak arch_enable_nonboot_cpus_begin(void)
817{
818}
819
820void __weak arch_enable_nonboot_cpus_end(void)
821{
822}
823
71cf5aee 824void enable_nonboot_cpus(void)
e3920fb4
RW
825{
826 int cpu, error;
827
828 /* Allow everyone to use the CPU hotplug again */
d221938c 829 cpu_maps_update_begin();
89af7ba5 830 WARN_ON(--cpu_hotplug_disabled < 0);
e0b582ec 831 if (cpumask_empty(frozen_cpus))
1d64b9cb 832 goto out;
e3920fb4 833
84117da5 834 pr_info("Enabling non-boot CPUs ...\n");
d0af9eed
SS
835
836 arch_enable_nonboot_cpus_begin();
837
e0b582ec 838 for_each_cpu(cpu, frozen_cpus) {
bb3632c6 839 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
af1f4045 840 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
bb3632c6 841 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
e3920fb4 842 if (!error) {
84117da5 843 pr_info("CPU%d is up\n", cpu);
e3920fb4
RW
844 continue;
845 }
84117da5 846 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
e3920fb4 847 }
d0af9eed
SS
848
849 arch_enable_nonboot_cpus_end();
850
e0b582ec 851 cpumask_clear(frozen_cpus);
1d64b9cb 852out:
d221938c 853 cpu_maps_update_done();
1da177e4 854}
e0b582ec 855
d7268a31 856static int __init alloc_frozen_cpus(void)
e0b582ec
RR
857{
858 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
859 return -ENOMEM;
860 return 0;
861}
862core_initcall(alloc_frozen_cpus);
79cfbdfa 863
79cfbdfa
SB
864/*
865 * When callbacks for CPU hotplug notifications are being executed, we must
866 * ensure that the state of the system with respect to the tasks being frozen
867 * or not, as reported by the notification, remains unchanged *throughout the
868 * duration* of the execution of the callbacks.
869 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
870 *
871 * This synchronization is implemented by mutually excluding regular CPU
872 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
873 * Hibernate notifications.
874 */
875static int
876cpu_hotplug_pm_callback(struct notifier_block *nb,
877 unsigned long action, void *ptr)
878{
879 switch (action) {
880
881 case PM_SUSPEND_PREPARE:
882 case PM_HIBERNATION_PREPARE:
16e53dbf 883 cpu_hotplug_disable();
79cfbdfa
SB
884 break;
885
886 case PM_POST_SUSPEND:
887 case PM_POST_HIBERNATION:
16e53dbf 888 cpu_hotplug_enable();
79cfbdfa
SB
889 break;
890
891 default:
892 return NOTIFY_DONE;
893 }
894
895 return NOTIFY_OK;
896}
897
898
d7268a31 899static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa 900{
6e32d479
FY
901 /*
902 * cpu_hotplug_pm_callback has higher priority than x86
903 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
904 * to disable cpu hotplug to avoid cpu hotplug race.
905 */
79cfbdfa
SB
906 pm_notifier(cpu_hotplug_pm_callback, 0);
907 return 0;
908}
909core_initcall(cpu_hotplug_pm_sync_init);
910
f3de4be9 911#endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec
MK
912
913#endif /* CONFIG_SMP */
b8d317d1 914
cff7d378
TG
915/* Boot processor state steps */
916static struct cpuhp_step cpuhp_bp_states[] = {
917 [CPUHP_OFFLINE] = {
918 .name = "offline",
919 .startup = NULL,
920 .teardown = NULL,
921 },
922#ifdef CONFIG_SMP
923 [CPUHP_CREATE_THREADS]= {
924 .name = "threads:create",
925 .startup = smpboot_create_threads,
926 .teardown = NULL,
757c989b 927 .cant_stop = true,
cff7d378
TG
928 },
929 [CPUHP_NOTIFY_PREPARE] = {
930 .name = "notify:prepare",
931 .startup = notify_prepare,
932 .teardown = notify_dead,
933 .skip_onerr = true,
757c989b 934 .cant_stop = true,
cff7d378
TG
935 },
936 [CPUHP_BRINGUP_CPU] = {
937 .name = "cpu:bringup",
938 .startup = bringup_cpu,
4baa0afc 939 .teardown = NULL,
757c989b 940 .cant_stop = true,
4baa0afc
TG
941 },
942 [CPUHP_TEARDOWN_CPU] = {
943 .name = "cpu:teardown",
944 .startup = NULL,
cff7d378 945 .teardown = takedown_cpu,
757c989b 946 .cant_stop = true,
cff7d378 947 },
949338e3
TG
948 [CPUHP_CPU_SET_ACTIVE] = {
949 .name = "cpu:active",
950 .startup = cpuhp_set_cpu_active,
951 .teardown = NULL,
952 },
931ef163
TG
953 [CPUHP_SMPBOOT_THREADS] = {
954 .name = "smpboot:threads",
955 .startup = smpboot_unpark_threads,
956 .teardown = smpboot_park_threads,
957 },
cff7d378
TG
958 [CPUHP_NOTIFY_ONLINE] = {
959 .name = "notify:online",
960 .startup = notify_online,
961 .teardown = notify_down_prepare,
757c989b 962 .cant_stop = true,
cff7d378
TG
963 },
964#endif
965 [CPUHP_ONLINE] = {
966 .name = "online",
967 .startup = NULL,
968 .teardown = NULL,
969 },
970};
971
4baa0afc
TG
972/* Application processor state steps */
973static struct cpuhp_step cpuhp_ap_states[] = {
974#ifdef CONFIG_SMP
975 [CPUHP_AP_NOTIFY_STARTING] = {
976 .name = "notify:starting",
977 .startup = notify_starting,
978 .teardown = notify_dying,
979 .skip_onerr = true,
757c989b 980 .cant_stop = true,
4baa0afc
TG
981 },
982#endif
983 [CPUHP_ONLINE] = {
984 .name = "online",
985 .startup = NULL,
986 .teardown = NULL,
987 },
988};
989
5b7aa87e
TG
990/* Sanity check for callbacks */
991static int cpuhp_cb_check(enum cpuhp_state state)
992{
993 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
994 return -EINVAL;
995 return 0;
996}
997
98f8cdce
TG
998static bool cpuhp_is_ap_state(enum cpuhp_state state)
999{
1000 return (state > CPUHP_AP_OFFLINE && state < CPUHP_AP_ONLINE);
1001}
1002
1003static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
1004{
1005 struct cpuhp_step *sp;
1006
1007 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
1008 return sp + state;
1009}
1010
5b7aa87e
TG
1011static void cpuhp_store_callbacks(enum cpuhp_state state,
1012 const char *name,
1013 int (*startup)(unsigned int cpu),
1014 int (*teardown)(unsigned int cpu))
1015{
1016 /* (Un)Install the callbacks for further cpu hotplug operations */
1017 struct cpuhp_step *sp;
1018
1019 mutex_lock(&cpuhp_state_mutex);
1020 sp = cpuhp_get_step(state);
1021 sp->startup = startup;
1022 sp->teardown = teardown;
1023 sp->name = name;
1024 mutex_unlock(&cpuhp_state_mutex);
1025}
1026
1027static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1028{
1029 return cpuhp_get_step(state)->teardown;
1030}
1031
1032/* Helper function to run callback on the target cpu */
1033static void cpuhp_on_cpu_cb(void *__cb)
1034{
1035 int (*cb)(unsigned int cpu) = __cb;
1036
1037 BUG_ON(cb(smp_processor_id()));
1038}
1039
1040/*
1041 * Call the startup/teardown function for a step either on the AP or
1042 * on the current CPU.
1043 */
1044static int cpuhp_issue_call(int cpu, enum cpuhp_state state,
1045 int (*cb)(unsigned int), bool bringup)
1046{
1047 int ret;
1048
1049 if (!cb)
1050 return 0;
1051
1052 /*
1053 * This invokes the callback directly for now. In a later step we
1054 * convert that to use cpuhp_invoke_callback().
1055 */
1056 if (cpuhp_is_ap_state(state)) {
1057 /*
1058 * Note, that a function called on the AP is not
1059 * allowed to fail.
1060 */
1061 if (cpu_online(cpu))
1062 smp_call_function_single(cpu, cpuhp_on_cpu_cb, cb, 1);
1063 return 0;
1064 }
1065
1066 /*
1067 * The non AP bound callbacks can fail on bringup. On teardown
1068 * e.g. module removal we crash for now.
1069 */
1070 ret = cb(cpu);
1071 BUG_ON(ret && !bringup);
1072 return ret;
1073}
1074
1075/*
1076 * Called from __cpuhp_setup_state on a recoverable failure.
1077 *
1078 * Note: The teardown callbacks for rollback are not allowed to fail!
1079 */
1080static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1081 int (*teardown)(unsigned int cpu))
1082{
1083 int cpu;
1084
1085 if (!teardown)
1086 return;
1087
1088 /* Roll back the already executed steps on the other cpus */
1089 for_each_present_cpu(cpu) {
1090 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1091 int cpustate = st->state;
1092
1093 if (cpu >= failedcpu)
1094 break;
1095
1096 /* Did we invoke the startup call on that cpu ? */
1097 if (cpustate >= state)
1098 cpuhp_issue_call(cpu, state, teardown, false);
1099 }
1100}
1101
1102/*
1103 * Returns a free for dynamic slot assignment of the Online state. The states
1104 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1105 * by having no name assigned.
1106 */
1107static int cpuhp_reserve_state(enum cpuhp_state state)
1108{
1109 enum cpuhp_state i;
1110
1111 mutex_lock(&cpuhp_state_mutex);
1112 for (i = CPUHP_ONLINE_DYN; i <= CPUHP_ONLINE_DYN_END; i++) {
1113 if (cpuhp_bp_states[i].name)
1114 continue;
1115
1116 cpuhp_bp_states[i].name = "Reserved";
1117 mutex_unlock(&cpuhp_state_mutex);
1118 return i;
1119 }
1120 mutex_unlock(&cpuhp_state_mutex);
1121 WARN(1, "No more dynamic states available for CPU hotplug\n");
1122 return -ENOSPC;
1123}
1124
1125/**
1126 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1127 * @state: The state to setup
1128 * @invoke: If true, the startup function is invoked for cpus where
1129 * cpu state >= @state
1130 * @startup: startup callback function
1131 * @teardown: teardown callback function
1132 *
1133 * Returns 0 if successful, otherwise a proper error code
1134 */
1135int __cpuhp_setup_state(enum cpuhp_state state,
1136 const char *name, bool invoke,
1137 int (*startup)(unsigned int cpu),
1138 int (*teardown)(unsigned int cpu))
1139{
1140 int cpu, ret = 0;
1141 int dyn_state = 0;
1142
1143 if (cpuhp_cb_check(state) || !name)
1144 return -EINVAL;
1145
1146 get_online_cpus();
1147
1148 /* currently assignments for the ONLINE state are possible */
1149 if (state == CPUHP_ONLINE_DYN) {
1150 dyn_state = 1;
1151 ret = cpuhp_reserve_state(state);
1152 if (ret < 0)
1153 goto out;
1154 state = ret;
1155 }
1156
1157 cpuhp_store_callbacks(state, name, startup, teardown);
1158
1159 if (!invoke || !startup)
1160 goto out;
1161
1162 /*
1163 * Try to call the startup callback for each present cpu
1164 * depending on the hotplug state of the cpu.
1165 */
1166 for_each_present_cpu(cpu) {
1167 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1168 int cpustate = st->state;
1169
1170 if (cpustate < state)
1171 continue;
1172
1173 ret = cpuhp_issue_call(cpu, state, startup, true);
1174 if (ret) {
1175 cpuhp_rollback_install(cpu, state, teardown);
1176 cpuhp_store_callbacks(state, NULL, NULL, NULL);
1177 goto out;
1178 }
1179 }
1180out:
1181 put_online_cpus();
1182 if (!ret && dyn_state)
1183 return state;
1184 return ret;
1185}
1186EXPORT_SYMBOL(__cpuhp_setup_state);
1187
1188/**
1189 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1190 * @state: The state to remove
1191 * @invoke: If true, the teardown function is invoked for cpus where
1192 * cpu state >= @state
1193 *
1194 * The teardown callback is currently not allowed to fail. Think
1195 * about module removal!
1196 */
1197void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1198{
1199 int (*teardown)(unsigned int cpu) = cpuhp_get_teardown_cb(state);
1200 int cpu;
1201
1202 BUG_ON(cpuhp_cb_check(state));
1203
1204 get_online_cpus();
1205
1206 if (!invoke || !teardown)
1207 goto remove;
1208
1209 /*
1210 * Call the teardown callback for each present cpu depending
1211 * on the hotplug state of the cpu. This function is not
1212 * allowed to fail currently!
1213 */
1214 for_each_present_cpu(cpu) {
1215 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1216 int cpustate = st->state;
1217
1218 if (cpustate >= state)
1219 cpuhp_issue_call(cpu, state, teardown, false);
1220 }
1221remove:
1222 cpuhp_store_callbacks(state, NULL, NULL, NULL);
1223 put_online_cpus();
1224}
1225EXPORT_SYMBOL(__cpuhp_remove_state);
1226
98f8cdce
TG
1227#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1228static ssize_t show_cpuhp_state(struct device *dev,
1229 struct device_attribute *attr, char *buf)
1230{
1231 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1232
1233 return sprintf(buf, "%d\n", st->state);
1234}
1235static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1236
757c989b
TG
1237static ssize_t write_cpuhp_target(struct device *dev,
1238 struct device_attribute *attr,
1239 const char *buf, size_t count)
1240{
1241 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1242 struct cpuhp_step *sp;
1243 int target, ret;
1244
1245 ret = kstrtoint(buf, 10, &target);
1246 if (ret)
1247 return ret;
1248
1249#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1250 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1251 return -EINVAL;
1252#else
1253 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1254 return -EINVAL;
1255#endif
1256
1257 ret = lock_device_hotplug_sysfs();
1258 if (ret)
1259 return ret;
1260
1261 mutex_lock(&cpuhp_state_mutex);
1262 sp = cpuhp_get_step(target);
1263 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1264 mutex_unlock(&cpuhp_state_mutex);
1265 if (ret)
1266 return ret;
1267
1268 if (st->state < target)
1269 ret = do_cpu_up(dev->id, target);
1270 else
1271 ret = do_cpu_down(dev->id, target);
1272
1273 unlock_device_hotplug();
1274 return ret ? ret : count;
1275}
1276
98f8cdce
TG
1277static ssize_t show_cpuhp_target(struct device *dev,
1278 struct device_attribute *attr, char *buf)
1279{
1280 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1281
1282 return sprintf(buf, "%d\n", st->target);
1283}
757c989b 1284static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
98f8cdce
TG
1285
1286static struct attribute *cpuhp_cpu_attrs[] = {
1287 &dev_attr_state.attr,
1288 &dev_attr_target.attr,
1289 NULL
1290};
1291
1292static struct attribute_group cpuhp_cpu_attr_group = {
1293 .attrs = cpuhp_cpu_attrs,
1294 .name = "hotplug",
1295 NULL
1296};
1297
1298static ssize_t show_cpuhp_states(struct device *dev,
1299 struct device_attribute *attr, char *buf)
1300{
1301 ssize_t cur, res = 0;
1302 int i;
1303
1304 mutex_lock(&cpuhp_state_mutex);
757c989b 1305 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
98f8cdce
TG
1306 struct cpuhp_step *sp = cpuhp_get_step(i);
1307
1308 if (sp->name) {
1309 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1310 buf += cur;
1311 res += cur;
1312 }
1313 }
1314 mutex_unlock(&cpuhp_state_mutex);
1315 return res;
1316}
1317static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1318
1319static struct attribute *cpuhp_cpu_root_attrs[] = {
1320 &dev_attr_states.attr,
1321 NULL
1322};
1323
1324static struct attribute_group cpuhp_cpu_root_attr_group = {
1325 .attrs = cpuhp_cpu_root_attrs,
1326 .name = "hotplug",
1327 NULL
1328};
1329
1330static int __init cpuhp_sysfs_init(void)
1331{
1332 int cpu, ret;
1333
1334 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
1335 &cpuhp_cpu_root_attr_group);
1336 if (ret)
1337 return ret;
1338
1339 for_each_possible_cpu(cpu) {
1340 struct device *dev = get_cpu_device(cpu);
1341
1342 if (!dev)
1343 continue;
1344 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
1345 if (ret)
1346 return ret;
1347 }
1348 return 0;
1349}
1350device_initcall(cpuhp_sysfs_init);
1351#endif
1352
e56b3bc7
LT
1353/*
1354 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1355 * represents all NR_CPUS bits binary values of 1<<nr.
1356 *
e0b582ec 1357 * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc7
LT
1358 * mask value that has a single bit set only.
1359 */
b8d317d1 1360
e56b3bc7 1361/* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e 1362#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
e56b3bc7
LT
1363#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1364#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1365#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d1 1366
e56b3bc7
LT
1367const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1368
1369 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1370 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1371#if BITS_PER_LONG > 32
1372 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1373 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
b8d317d1
MT
1374#endif
1375};
e56b3bc7 1376EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a3
RR
1377
1378const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1379EXPORT_SYMBOL(cpu_all_bits);
b3199c02
RR
1380
1381#ifdef CONFIG_INIT_ALL_POSSIBLE
4b804c85 1382struct cpumask __cpu_possible_mask __read_mostly
c4c54dd1 1383 = {CPU_BITS_ALL};
b3199c02 1384#else
4b804c85 1385struct cpumask __cpu_possible_mask __read_mostly;
b3199c02 1386#endif
4b804c85 1387EXPORT_SYMBOL(__cpu_possible_mask);
b3199c02 1388
4b804c85
RV
1389struct cpumask __cpu_online_mask __read_mostly;
1390EXPORT_SYMBOL(__cpu_online_mask);
b3199c02 1391
4b804c85
RV
1392struct cpumask __cpu_present_mask __read_mostly;
1393EXPORT_SYMBOL(__cpu_present_mask);
b3199c02 1394
4b804c85
RV
1395struct cpumask __cpu_active_mask __read_mostly;
1396EXPORT_SYMBOL(__cpu_active_mask);
3fa41520 1397
3fa41520
RR
1398void init_cpu_present(const struct cpumask *src)
1399{
c4c54dd1 1400 cpumask_copy(&__cpu_present_mask, src);
3fa41520
RR
1401}
1402
1403void init_cpu_possible(const struct cpumask *src)
1404{
c4c54dd1 1405 cpumask_copy(&__cpu_possible_mask, src);
3fa41520
RR
1406}
1407
1408void init_cpu_online(const struct cpumask *src)
1409{
c4c54dd1 1410 cpumask_copy(&__cpu_online_mask, src);
3fa41520 1411}
cff7d378
TG
1412
1413/*
1414 * Activate the first processor.
1415 */
1416void __init boot_cpu_init(void)
1417{
1418 int cpu = smp_processor_id();
1419
1420 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1421 set_cpu_online(cpu, true);
1422 set_cpu_active(cpu, true);
1423 set_cpu_present(cpu, true);
1424 set_cpu_possible(cpu, true);
1425}
1426
1427/*
1428 * Must be called _AFTER_ setting up the per_cpu areas
1429 */
1430void __init boot_cpu_state_init(void)
1431{
1432 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
1433}