cpu/hotplug: Make target state writeable
[linux-2.6-block.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
cb79295e
AV
13#include <linux/oom.h>
14#include <linux/rcupdate.h>
9984de1a 15#include <linux/export.h>
e4cc2f87 16#include <linux/bug.h>
1da177e4
LT
17#include <linux/kthread.h>
18#include <linux/stop_machine.h>
81615b62 19#include <linux/mutex.h>
5a0e3ad6 20#include <linux/gfp.h>
79cfbdfa 21#include <linux/suspend.h>
a19423b9 22#include <linux/lockdep.h>
345527b1 23#include <linux/tick.h>
a8994181 24#include <linux/irq.h>
cff7d378 25
bb3632c6 26#include <trace/events/power.h>
cff7d378
TG
27#define CREATE_TRACE_POINTS
28#include <trace/events/cpuhp.h>
1da177e4 29
38498a67
TG
30#include "smpboot.h"
31
cff7d378
TG
32/**
33 * cpuhp_cpu_state - Per cpu hotplug state storage
34 * @state: The current cpu state
35 * @target: The target state
36 */
37struct cpuhp_cpu_state {
38 enum cpuhp_state state;
39 enum cpuhp_state target;
40};
41
42static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
43
44/**
45 * cpuhp_step - Hotplug state machine step
46 * @name: Name of the step
47 * @startup: Startup function of the step
48 * @teardown: Teardown function of the step
49 * @skip_onerr: Do not invoke the functions on error rollback
50 * Will go away once the notifiers are gone
757c989b 51 * @cant_stop: Bringup/teardown can't be stopped at this step
cff7d378
TG
52 */
53struct cpuhp_step {
54 const char *name;
55 int (*startup)(unsigned int cpu);
56 int (*teardown)(unsigned int cpu);
57 bool skip_onerr;
757c989b 58 bool cant_stop;
cff7d378
TG
59};
60
98f8cdce 61static DEFINE_MUTEX(cpuhp_state_mutex);
cff7d378 62static struct cpuhp_step cpuhp_bp_states[];
4baa0afc 63static struct cpuhp_step cpuhp_ap_states[];
cff7d378
TG
64
65/**
66 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
67 * @cpu: The cpu for which the callback should be invoked
68 * @step: The step in the state machine
69 * @cb: The callback function to invoke
70 *
71 * Called from cpu hotplug and from the state register machinery
72 */
73static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step,
74 int (*cb)(unsigned int))
75{
76 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
77 int ret = 0;
78
79 if (cb) {
80 trace_cpuhp_enter(cpu, st->target, step, cb);
81 ret = cb(cpu);
82 trace_cpuhp_exit(cpu, st->state, step, ret);
83 }
84 return ret;
85}
86
98a79d6a 87#ifdef CONFIG_SMP
b3199c02 88/* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa953877 89static DEFINE_MUTEX(cpu_add_remove_lock);
090e77c3
TG
90bool cpuhp_tasks_frozen;
91EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
1da177e4 92
79a6cdeb 93/*
93ae4f97
SB
94 * The following two APIs (cpu_maps_update_begin/done) must be used when
95 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
96 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
97 * hotplug callback (un)registration performed using __register_cpu_notifier()
98 * or __unregister_cpu_notifier().
79a6cdeb
LJ
99 */
100void cpu_maps_update_begin(void)
101{
102 mutex_lock(&cpu_add_remove_lock);
103}
93ae4f97 104EXPORT_SYMBOL(cpu_notifier_register_begin);
79a6cdeb
LJ
105
106void cpu_maps_update_done(void)
107{
108 mutex_unlock(&cpu_add_remove_lock);
109}
93ae4f97 110EXPORT_SYMBOL(cpu_notifier_register_done);
79a6cdeb 111
5c113fbe 112static RAW_NOTIFIER_HEAD(cpu_chain);
1da177e4 113
e3920fb4
RW
114/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
115 * Should always be manipulated under cpu_add_remove_lock
116 */
117static int cpu_hotplug_disabled;
118
79a6cdeb
LJ
119#ifdef CONFIG_HOTPLUG_CPU
120
d221938c
GS
121static struct {
122 struct task_struct *active_writer;
87af9e7f
DH
123 /* wait queue to wake up the active_writer */
124 wait_queue_head_t wq;
125 /* verifies that no writer will get active while readers are active */
126 struct mutex lock;
d221938c
GS
127 /*
128 * Also blocks the new readers during
129 * an ongoing cpu hotplug operation.
130 */
87af9e7f 131 atomic_t refcount;
a19423b9
GS
132
133#ifdef CONFIG_DEBUG_LOCK_ALLOC
134 struct lockdep_map dep_map;
135#endif
31950eb6
LT
136} cpu_hotplug = {
137 .active_writer = NULL,
87af9e7f 138 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
31950eb6 139 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
a19423b9
GS
140#ifdef CONFIG_DEBUG_LOCK_ALLOC
141 .dep_map = {.name = "cpu_hotplug.lock" },
142#endif
31950eb6 143};
d221938c 144
a19423b9
GS
145/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
146#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
dd56af42
PM
147#define cpuhp_lock_acquire_tryread() \
148 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
a19423b9
GS
149#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
150#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
151
62db99f4 152
86ef5c9a 153void get_online_cpus(void)
a9d9baa1 154{
d221938c
GS
155 might_sleep();
156 if (cpu_hotplug.active_writer == current)
aa953877 157 return;
a19423b9 158 cpuhp_lock_acquire_read();
d221938c 159 mutex_lock(&cpu_hotplug.lock);
87af9e7f 160 atomic_inc(&cpu_hotplug.refcount);
d221938c 161 mutex_unlock(&cpu_hotplug.lock);
a9d9baa1 162}
86ef5c9a 163EXPORT_SYMBOL_GPL(get_online_cpus);
90d45d17 164
86ef5c9a 165void put_online_cpus(void)
a9d9baa1 166{
87af9e7f
DH
167 int refcount;
168
d221938c 169 if (cpu_hotplug.active_writer == current)
aa953877 170 return;
075663d1 171
87af9e7f
DH
172 refcount = atomic_dec_return(&cpu_hotplug.refcount);
173 if (WARN_ON(refcount < 0)) /* try to fix things up */
174 atomic_inc(&cpu_hotplug.refcount);
175
176 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
177 wake_up(&cpu_hotplug.wq);
075663d1 178
a19423b9 179 cpuhp_lock_release();
d221938c 180
a9d9baa1 181}
86ef5c9a 182EXPORT_SYMBOL_GPL(put_online_cpus);
a9d9baa1 183
d221938c
GS
184/*
185 * This ensures that the hotplug operation can begin only when the
186 * refcount goes to zero.
187 *
188 * Note that during a cpu-hotplug operation, the new readers, if any,
189 * will be blocked by the cpu_hotplug.lock
190 *
d2ba7e2a
ON
191 * Since cpu_hotplug_begin() is always called after invoking
192 * cpu_maps_update_begin(), we can be sure that only one writer is active.
d221938c
GS
193 *
194 * Note that theoretically, there is a possibility of a livelock:
195 * - Refcount goes to zero, last reader wakes up the sleeping
196 * writer.
197 * - Last reader unlocks the cpu_hotplug.lock.
198 * - A new reader arrives at this moment, bumps up the refcount.
199 * - The writer acquires the cpu_hotplug.lock finds the refcount
200 * non zero and goes to sleep again.
201 *
202 * However, this is very difficult to achieve in practice since
86ef5c9a 203 * get_online_cpus() not an api which is called all that often.
d221938c
GS
204 *
205 */
b9d10be7 206void cpu_hotplug_begin(void)
d221938c 207{
87af9e7f 208 DEFINE_WAIT(wait);
d2ba7e2a 209
87af9e7f 210 cpu_hotplug.active_writer = current;
a19423b9 211 cpuhp_lock_acquire();
87af9e7f 212
d2ba7e2a
ON
213 for (;;) {
214 mutex_lock(&cpu_hotplug.lock);
87af9e7f
DH
215 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
216 if (likely(!atomic_read(&cpu_hotplug.refcount)))
217 break;
d221938c
GS
218 mutex_unlock(&cpu_hotplug.lock);
219 schedule();
d221938c 220 }
87af9e7f 221 finish_wait(&cpu_hotplug.wq, &wait);
d221938c
GS
222}
223
b9d10be7 224void cpu_hotplug_done(void)
d221938c
GS
225{
226 cpu_hotplug.active_writer = NULL;
227 mutex_unlock(&cpu_hotplug.lock);
a19423b9 228 cpuhp_lock_release();
d221938c 229}
79a6cdeb 230
16e53dbf
SB
231/*
232 * Wait for currently running CPU hotplug operations to complete (if any) and
233 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
234 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
235 * hotplug path before performing hotplug operations. So acquiring that lock
236 * guarantees mutual exclusion from any currently running hotplug operations.
237 */
238void cpu_hotplug_disable(void)
239{
240 cpu_maps_update_begin();
89af7ba5 241 cpu_hotplug_disabled++;
16e53dbf
SB
242 cpu_maps_update_done();
243}
32145c46 244EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
16e53dbf
SB
245
246void cpu_hotplug_enable(void)
247{
248 cpu_maps_update_begin();
89af7ba5 249 WARN_ON(--cpu_hotplug_disabled < 0);
16e53dbf
SB
250 cpu_maps_update_done();
251}
32145c46 252EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
b9d10be7 253#endif /* CONFIG_HOTPLUG_CPU */
79a6cdeb 254
1da177e4 255/* Need to know about CPUs going up/down? */
71cf5aee 256int register_cpu_notifier(struct notifier_block *nb)
1da177e4 257{
bd5349cf 258 int ret;
d221938c 259 cpu_maps_update_begin();
bd5349cf 260 ret = raw_notifier_chain_register(&cpu_chain, nb);
d221938c 261 cpu_maps_update_done();
bd5349cf 262 return ret;
1da177e4 263}
65edc68c 264
71cf5aee 265int __register_cpu_notifier(struct notifier_block *nb)
93ae4f97
SB
266{
267 return raw_notifier_chain_register(&cpu_chain, nb);
268}
269
090e77c3 270static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
e9fb7631
AM
271 int *nr_calls)
272{
090e77c3
TG
273 unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
274 void *hcpu = (void *)(long)cpu;
275
e6bde73b
AM
276 int ret;
277
090e77c3 278 ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
e9fb7631 279 nr_calls);
e6bde73b
AM
280
281 return notifier_to_errno(ret);
e9fb7631
AM
282}
283
090e77c3 284static int cpu_notify(unsigned long val, unsigned int cpu)
e9fb7631 285{
090e77c3 286 return __cpu_notify(val, cpu, -1, NULL);
e9fb7631
AM
287}
288
ba997462
TG
289/* Notifier wrappers for transitioning to state machine */
290static int notify_prepare(unsigned int cpu)
291{
292 int nr_calls = 0;
293 int ret;
294
295 ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
296 if (ret) {
297 nr_calls--;
298 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
299 __func__, cpu);
300 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
301 }
302 return ret;
303}
304
305static int notify_online(unsigned int cpu)
306{
307 cpu_notify(CPU_ONLINE, cpu);
308 return 0;
309}
310
4baa0afc
TG
311static int notify_starting(unsigned int cpu)
312{
313 cpu_notify(CPU_STARTING, cpu);
314 return 0;
315}
316
ba997462
TG
317static int bringup_cpu(unsigned int cpu)
318{
319 struct task_struct *idle = idle_thread_get(cpu);
320 int ret;
321
322 /* Arch-specific enabling code. */
323 ret = __cpu_up(cpu, idle);
324 if (ret) {
325 cpu_notify(CPU_UP_CANCELED, cpu);
326 return ret;
327 }
328 BUG_ON(!cpu_online(cpu));
329 return 0;
330}
331
00b9b0af 332#ifdef CONFIG_HOTPLUG_CPU
1da177e4 333EXPORT_SYMBOL(register_cpu_notifier);
93ae4f97 334EXPORT_SYMBOL(__register_cpu_notifier);
1da177e4 335
71cf5aee 336void unregister_cpu_notifier(struct notifier_block *nb)
1da177e4 337{
d221938c 338 cpu_maps_update_begin();
bd5349cf 339 raw_notifier_chain_unregister(&cpu_chain, nb);
d221938c 340 cpu_maps_update_done();
1da177e4
LT
341}
342EXPORT_SYMBOL(unregister_cpu_notifier);
343
71cf5aee 344void __unregister_cpu_notifier(struct notifier_block *nb)
93ae4f97
SB
345{
346 raw_notifier_chain_unregister(&cpu_chain, nb);
347}
348EXPORT_SYMBOL(__unregister_cpu_notifier);
349
e4cc2f87
AV
350/**
351 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
352 * @cpu: a CPU id
353 *
354 * This function walks all processes, finds a valid mm struct for each one and
355 * then clears a corresponding bit in mm's cpumask. While this all sounds
356 * trivial, there are various non-obvious corner cases, which this function
357 * tries to solve in a safe manner.
358 *
359 * Also note that the function uses a somewhat relaxed locking scheme, so it may
360 * be called only for an already offlined CPU.
361 */
cb79295e
AV
362void clear_tasks_mm_cpumask(int cpu)
363{
364 struct task_struct *p;
365
366 /*
367 * This function is called after the cpu is taken down and marked
368 * offline, so its not like new tasks will ever get this cpu set in
369 * their mm mask. -- Peter Zijlstra
370 * Thus, we may use rcu_read_lock() here, instead of grabbing
371 * full-fledged tasklist_lock.
372 */
e4cc2f87 373 WARN_ON(cpu_online(cpu));
cb79295e
AV
374 rcu_read_lock();
375 for_each_process(p) {
376 struct task_struct *t;
377
e4cc2f87
AV
378 /*
379 * Main thread might exit, but other threads may still have
380 * a valid mm. Find one.
381 */
cb79295e
AV
382 t = find_lock_task_mm(p);
383 if (!t)
384 continue;
385 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
386 task_unlock(t);
387 }
388 rcu_read_unlock();
389}
390
b728ca06 391static inline void check_for_tasks(int dead_cpu)
1da177e4 392{
b728ca06 393 struct task_struct *g, *p;
1da177e4 394
a75a6068
ON
395 read_lock(&tasklist_lock);
396 for_each_process_thread(g, p) {
b728ca06
KT
397 if (!p->on_rq)
398 continue;
399 /*
400 * We do the check with unlocked task_rq(p)->lock.
401 * Order the reading to do not warn about a task,
402 * which was running on this cpu in the past, and
403 * it's just been woken on another cpu.
404 */
405 rmb();
406 if (task_cpu(p) != dead_cpu)
407 continue;
408
409 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
410 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
a75a6068
ON
411 }
412 read_unlock(&tasklist_lock);
1da177e4
LT
413}
414
98458172
TG
415static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
416{
417 BUG_ON(cpu_notify(val, cpu));
418}
419
420static int notify_down_prepare(unsigned int cpu)
421{
422 int err, nr_calls = 0;
423
424 err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
425 if (err) {
426 nr_calls--;
427 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
428 pr_warn("%s: attempt to take down CPU %u failed\n",
429 __func__, cpu);
430 }
431 return err;
432}
433
4baa0afc
TG
434static int notify_dying(unsigned int cpu)
435{
436 cpu_notify(CPU_DYING, cpu);
437 return 0;
438}
439
1da177e4 440/* Take this CPU down. */
71cf5aee 441static int take_cpu_down(void *_param)
1da177e4 442{
4baa0afc
TG
443 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
444 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
090e77c3 445 int err, cpu = smp_processor_id();
1da177e4 446
1da177e4
LT
447 /* Ensure this CPU doesn't handle any more interrupts. */
448 err = __cpu_disable();
449 if (err < 0)
f3705136 450 return err;
1da177e4 451
4baa0afc
TG
452 /* Invoke the former CPU_DYING callbacks */
453 for (; st->state > target; st->state--) {
454 struct cpuhp_step *step = cpuhp_ap_states + st->state;
455
456 cpuhp_invoke_callback(cpu, st->state, step->teardown);
457 }
52c063d1
TG
458 /* Give up timekeeping duties */
459 tick_handover_do_timer();
14e568e7 460 /* Park the stopper thread */
090e77c3 461 stop_machine_park(cpu);
f3705136 462 return 0;
1da177e4
LT
463}
464
98458172 465static int takedown_cpu(unsigned int cpu)
1da177e4 466{
98458172 467 int err;
1da177e4 468
6acce3ef
PZ
469 /*
470 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
471 * and RCU users of this state to go away such that all new such users
472 * will observe it.
473 *
474 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
779de6ce 475 * not imply sync_sched(), so wait for both.
106dd5af
M
476 *
477 * Do sync before park smpboot threads to take care the rcu boost case.
6acce3ef 478 */
779de6ce
PM
479 if (IS_ENABLED(CONFIG_PREEMPT))
480 synchronize_rcu_mult(call_rcu, call_rcu_sched);
481 else
482 synchronize_rcu();
6acce3ef 483
106dd5af
M
484 smpboot_park_threads(cpu);
485
6acce3ef 486 /*
a8994181
TG
487 * Prevent irq alloc/free while the dying cpu reorganizes the
488 * interrupt affinities.
6acce3ef 489 */
a8994181 490 irq_lock_sparse();
6acce3ef 491
a8994181
TG
492 /*
493 * So now all preempt/rcu users must observe !cpu_active().
494 */
090e77c3 495 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
04321587 496 if (err) {
1da177e4 497 /* CPU didn't die: tell everyone. Can't complain. */
090e77c3 498 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
a8994181 499 irq_unlock_sparse();
98458172 500 return err;
8fa1d7d3 501 }
04321587 502 BUG_ON(cpu_online(cpu));
1da177e4 503
48c5ccae
PZ
504 /*
505 * The migration_call() CPU_DYING callback will have removed all
506 * runnable tasks from the cpu, there's only the idle task left now
507 * that the migration thread is done doing the stop_machine thing.
51a96c77
PZ
508 *
509 * Wait for the stop thread to go away.
48c5ccae 510 */
528a25b0 511 while (!per_cpu(cpu_dead_idle, cpu))
51a96c77 512 cpu_relax();
528a25b0
PM
513 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
514 per_cpu(cpu_dead_idle, cpu) = false;
1da177e4 515
a8994181
TG
516 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
517 irq_unlock_sparse();
518
345527b1 519 hotplug_cpu__broadcast_tick_pull(cpu);
1da177e4
LT
520 /* This actually kills the CPU. */
521 __cpu_die(cpu);
522
a49b116d 523 tick_cleanup_dead_cpu(cpu);
98458172
TG
524 return 0;
525}
1da177e4 526
98458172
TG
527static int notify_dead(unsigned int cpu)
528{
529 cpu_notify_nofail(CPU_DEAD, cpu);
1da177e4 530 check_for_tasks(cpu);
98458172
TG
531 return 0;
532}
533
cff7d378
TG
534#else
535#define notify_down_prepare NULL
536#define takedown_cpu NULL
537#define notify_dead NULL
4baa0afc 538#define notify_dying NULL
cff7d378
TG
539#endif
540
541#ifdef CONFIG_HOTPLUG_CPU
542static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
543{
544 for (st->state++; st->state < st->target; st->state++) {
545 struct cpuhp_step *step = cpuhp_bp_states + st->state;
546
547 if (!step->skip_onerr)
548 cpuhp_invoke_callback(cpu, st->state, step->startup);
549 }
550}
551
98458172 552/* Requires cpu_add_remove_lock to be held */
af1f4045
TG
553static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
554 enum cpuhp_state target)
98458172 555{
cff7d378
TG
556 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
557 int prev_state, ret = 0;
558 bool hasdied = false;
98458172
TG
559
560 if (num_online_cpus() == 1)
561 return -EBUSY;
562
757c989b 563 if (!cpu_present(cpu))
98458172
TG
564 return -EINVAL;
565
566 cpu_hotplug_begin();
567
568 cpuhp_tasks_frozen = tasks_frozen;
569
cff7d378 570 prev_state = st->state;
af1f4045 571 st->target = target;
cff7d378
TG
572 for (; st->state > st->target; st->state--) {
573 struct cpuhp_step *step = cpuhp_bp_states + st->state;
98458172 574
cff7d378
TG
575 ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
576 if (ret) {
577 st->target = prev_state;
578 undo_cpu_down(cpu, st);
579 break;
580 }
581 }
582 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
1da177e4 583
d221938c 584 cpu_hotplug_done();
cff7d378
TG
585 /* This post dead nonsense must die */
586 if (!ret && hasdied)
090e77c3 587 cpu_notify_nofail(CPU_POST_DEAD, cpu);
cff7d378 588 return ret;
e3920fb4
RW
589}
590
af1f4045 591static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
e3920fb4 592{
9ea09af3 593 int err;
e3920fb4 594
d221938c 595 cpu_maps_update_begin();
e761b772
MK
596
597 if (cpu_hotplug_disabled) {
e3920fb4 598 err = -EBUSY;
e761b772
MK
599 goto out;
600 }
601
af1f4045 602 err = _cpu_down(cpu, 0, target);
e3920fb4 603
e761b772 604out:
d221938c 605 cpu_maps_update_done();
1da177e4
LT
606 return err;
607}
af1f4045
TG
608int cpu_down(unsigned int cpu)
609{
610 return do_cpu_down(cpu, CPUHP_OFFLINE);
611}
b62b8ef9 612EXPORT_SYMBOL(cpu_down);
1da177e4
LT
613#endif /*CONFIG_HOTPLUG_CPU*/
614
00df35f9
PM
615/*
616 * Unpark per-CPU smpboot kthreads at CPU-online time.
617 */
618static int smpboot_thread_call(struct notifier_block *nfb,
619 unsigned long action, void *hcpu)
620{
621 int cpu = (long)hcpu;
622
623 switch (action & ~CPU_TASKS_FROZEN) {
624
64eaf974 625 case CPU_DOWN_FAILED:
00df35f9
PM
626 case CPU_ONLINE:
627 smpboot_unpark_threads(cpu);
628 break;
629
630 default:
631 break;
632 }
633
634 return NOTIFY_OK;
635}
636
637static struct notifier_block smpboot_thread_notifier = {
638 .notifier_call = smpboot_thread_call,
639 .priority = CPU_PRI_SMPBOOT,
640};
641
927da9df 642void smpboot_thread_init(void)
00df35f9
PM
643{
644 register_cpu_notifier(&smpboot_thread_notifier);
645}
646
4baa0afc
TG
647/**
648 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
649 * @cpu: cpu that just started
650 *
651 * This function calls the cpu_chain notifiers with CPU_STARTING.
652 * It must be called by the arch code on the new cpu, before the new cpu
653 * enables interrupts and before the "boot" cpu returns from __cpu_up().
654 */
655void notify_cpu_starting(unsigned int cpu)
656{
657 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
658 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
659
660 while (st->state < target) {
661 struct cpuhp_step *step;
662
663 st->state++;
664 step = cpuhp_ap_states + st->state;
665 cpuhp_invoke_callback(cpu, st->state, step->startup);
666 }
667}
668
cff7d378
TG
669static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
670{
671 for (st->state--; st->state > st->target; st->state--) {
672 struct cpuhp_step *step = cpuhp_bp_states + st->state;
673
674 if (!step->skip_onerr)
675 cpuhp_invoke_callback(cpu, st->state, step->teardown);
676 }
677}
678
e3920fb4 679/* Requires cpu_add_remove_lock to be held */
af1f4045 680static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1da177e4 681{
cff7d378 682 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
3bb5d2ee 683 struct task_struct *idle;
cff7d378 684 int prev_state, ret = 0;
1da177e4 685
d221938c 686 cpu_hotplug_begin();
38498a67 687
757c989b 688 if (!cpu_present(cpu)) {
5e5041f3
YI
689 ret = -EINVAL;
690 goto out;
691 }
692
757c989b
TG
693 /*
694 * The caller of do_cpu_up might have raced with another
695 * caller. Ignore it for now.
696 */
697 if (st->state >= target)
38498a67 698 goto out;
757c989b
TG
699
700 if (st->state == CPUHP_OFFLINE) {
701 /* Let it fail before we try to bring the cpu up */
702 idle = idle_thread_get(cpu);
703 if (IS_ERR(idle)) {
704 ret = PTR_ERR(idle);
705 goto out;
706 }
3bb5d2ee 707 }
38498a67 708
ba997462
TG
709 cpuhp_tasks_frozen = tasks_frozen;
710
cff7d378 711 prev_state = st->state;
af1f4045 712 st->target = target;
cff7d378
TG
713 while (st->state < st->target) {
714 struct cpuhp_step *step;
715
716 st->state++;
717 step = cpuhp_bp_states + st->state;
718 ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
719 if (ret) {
720 st->target = prev_state;
721 undo_cpu_up(cpu, st);
722 break;
723 }
724 }
38498a67 725out:
d221938c 726 cpu_hotplug_done();
e3920fb4
RW
727 return ret;
728}
729
af1f4045 730static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
e3920fb4
RW
731{
732 int err = 0;
cf23422b 733
e0b582ec 734 if (!cpu_possible(cpu)) {
84117da5
FF
735 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
736 cpu);
87d5e023 737#if defined(CONFIG_IA64)
84117da5 738 pr_err("please check additional_cpus= boot parameter\n");
73e753a5
KH
739#endif
740 return -EINVAL;
741 }
e3920fb4 742
01b0f197
TK
743 err = try_online_node(cpu_to_node(cpu));
744 if (err)
745 return err;
cf23422b 746
d221938c 747 cpu_maps_update_begin();
e761b772
MK
748
749 if (cpu_hotplug_disabled) {
e3920fb4 750 err = -EBUSY;
e761b772
MK
751 goto out;
752 }
753
af1f4045 754 err = _cpu_up(cpu, 0, target);
e761b772 755out:
d221938c 756 cpu_maps_update_done();
e3920fb4
RW
757 return err;
758}
af1f4045
TG
759
760int cpu_up(unsigned int cpu)
761{
762 return do_cpu_up(cpu, CPUHP_ONLINE);
763}
a513f6ba 764EXPORT_SYMBOL_GPL(cpu_up);
e3920fb4 765
f3de4be9 766#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 767static cpumask_var_t frozen_cpus;
e3920fb4
RW
768
769int disable_nonboot_cpus(void)
770{
e9a5f426 771 int cpu, first_cpu, error = 0;
e3920fb4 772
d221938c 773 cpu_maps_update_begin();
e0b582ec 774 first_cpu = cpumask_first(cpu_online_mask);
9ee349ad
XF
775 /*
776 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb4
RW
777 * with the userspace trying to use the CPU hotplug at the same time
778 */
e0b582ec 779 cpumask_clear(frozen_cpus);
6ad4c188 780
84117da5 781 pr_info("Disabling non-boot CPUs ...\n");
e3920fb4
RW
782 for_each_online_cpu(cpu) {
783 if (cpu == first_cpu)
784 continue;
bb3632c6 785 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
af1f4045 786 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
bb3632c6 787 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
feae3203 788 if (!error)
e0b582ec 789 cpumask_set_cpu(cpu, frozen_cpus);
feae3203 790 else {
84117da5 791 pr_err("Error taking CPU%d down: %d\n", cpu, error);
e3920fb4
RW
792 break;
793 }
794 }
86886e55 795
89af7ba5 796 if (!error)
e3920fb4 797 BUG_ON(num_online_cpus() > 1);
89af7ba5 798 else
84117da5 799 pr_err("Non-boot CPUs are not disabled\n");
89af7ba5
VK
800
801 /*
802 * Make sure the CPUs won't be enabled by someone else. We need to do
803 * this even in case of failure as all disable_nonboot_cpus() users are
804 * supposed to do enable_nonboot_cpus() on the failure path.
805 */
806 cpu_hotplug_disabled++;
807
d221938c 808 cpu_maps_update_done();
e3920fb4
RW
809 return error;
810}
811
d0af9eed
SS
812void __weak arch_enable_nonboot_cpus_begin(void)
813{
814}
815
816void __weak arch_enable_nonboot_cpus_end(void)
817{
818}
819
71cf5aee 820void enable_nonboot_cpus(void)
e3920fb4
RW
821{
822 int cpu, error;
823
824 /* Allow everyone to use the CPU hotplug again */
d221938c 825 cpu_maps_update_begin();
89af7ba5 826 WARN_ON(--cpu_hotplug_disabled < 0);
e0b582ec 827 if (cpumask_empty(frozen_cpus))
1d64b9cb 828 goto out;
e3920fb4 829
84117da5 830 pr_info("Enabling non-boot CPUs ...\n");
d0af9eed
SS
831
832 arch_enable_nonboot_cpus_begin();
833
e0b582ec 834 for_each_cpu(cpu, frozen_cpus) {
bb3632c6 835 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
af1f4045 836 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
bb3632c6 837 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
e3920fb4 838 if (!error) {
84117da5 839 pr_info("CPU%d is up\n", cpu);
e3920fb4
RW
840 continue;
841 }
84117da5 842 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
e3920fb4 843 }
d0af9eed
SS
844
845 arch_enable_nonboot_cpus_end();
846
e0b582ec 847 cpumask_clear(frozen_cpus);
1d64b9cb 848out:
d221938c 849 cpu_maps_update_done();
1da177e4 850}
e0b582ec 851
d7268a31 852static int __init alloc_frozen_cpus(void)
e0b582ec
RR
853{
854 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
855 return -ENOMEM;
856 return 0;
857}
858core_initcall(alloc_frozen_cpus);
79cfbdfa 859
79cfbdfa
SB
860/*
861 * When callbacks for CPU hotplug notifications are being executed, we must
862 * ensure that the state of the system with respect to the tasks being frozen
863 * or not, as reported by the notification, remains unchanged *throughout the
864 * duration* of the execution of the callbacks.
865 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
866 *
867 * This synchronization is implemented by mutually excluding regular CPU
868 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
869 * Hibernate notifications.
870 */
871static int
872cpu_hotplug_pm_callback(struct notifier_block *nb,
873 unsigned long action, void *ptr)
874{
875 switch (action) {
876
877 case PM_SUSPEND_PREPARE:
878 case PM_HIBERNATION_PREPARE:
16e53dbf 879 cpu_hotplug_disable();
79cfbdfa
SB
880 break;
881
882 case PM_POST_SUSPEND:
883 case PM_POST_HIBERNATION:
16e53dbf 884 cpu_hotplug_enable();
79cfbdfa
SB
885 break;
886
887 default:
888 return NOTIFY_DONE;
889 }
890
891 return NOTIFY_OK;
892}
893
894
d7268a31 895static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa 896{
6e32d479
FY
897 /*
898 * cpu_hotplug_pm_callback has higher priority than x86
899 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
900 * to disable cpu hotplug to avoid cpu hotplug race.
901 */
79cfbdfa
SB
902 pm_notifier(cpu_hotplug_pm_callback, 0);
903 return 0;
904}
905core_initcall(cpu_hotplug_pm_sync_init);
906
f3de4be9 907#endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec
MK
908
909#endif /* CONFIG_SMP */
b8d317d1 910
cff7d378
TG
911/* Boot processor state steps */
912static struct cpuhp_step cpuhp_bp_states[] = {
913 [CPUHP_OFFLINE] = {
914 .name = "offline",
915 .startup = NULL,
916 .teardown = NULL,
917 },
918#ifdef CONFIG_SMP
919 [CPUHP_CREATE_THREADS]= {
920 .name = "threads:create",
921 .startup = smpboot_create_threads,
922 .teardown = NULL,
757c989b 923 .cant_stop = true,
cff7d378
TG
924 },
925 [CPUHP_NOTIFY_PREPARE] = {
926 .name = "notify:prepare",
927 .startup = notify_prepare,
928 .teardown = notify_dead,
929 .skip_onerr = true,
757c989b 930 .cant_stop = true,
cff7d378
TG
931 },
932 [CPUHP_BRINGUP_CPU] = {
933 .name = "cpu:bringup",
934 .startup = bringup_cpu,
4baa0afc 935 .teardown = NULL,
757c989b 936 .cant_stop = true,
4baa0afc
TG
937 },
938 [CPUHP_TEARDOWN_CPU] = {
939 .name = "cpu:teardown",
940 .startup = NULL,
cff7d378 941 .teardown = takedown_cpu,
757c989b 942 .cant_stop = true,
cff7d378
TG
943 },
944 [CPUHP_NOTIFY_ONLINE] = {
945 .name = "notify:online",
946 .startup = notify_online,
947 .teardown = notify_down_prepare,
757c989b 948 .cant_stop = true,
cff7d378
TG
949 },
950#endif
951 [CPUHP_ONLINE] = {
952 .name = "online",
953 .startup = NULL,
954 .teardown = NULL,
955 },
956};
957
4baa0afc
TG
958/* Application processor state steps */
959static struct cpuhp_step cpuhp_ap_states[] = {
960#ifdef CONFIG_SMP
961 [CPUHP_AP_NOTIFY_STARTING] = {
962 .name = "notify:starting",
963 .startup = notify_starting,
964 .teardown = notify_dying,
965 .skip_onerr = true,
757c989b 966 .cant_stop = true,
4baa0afc
TG
967 },
968#endif
969 [CPUHP_ONLINE] = {
970 .name = "online",
971 .startup = NULL,
972 .teardown = NULL,
973 },
974};
975
98f8cdce
TG
976static bool cpuhp_is_ap_state(enum cpuhp_state state)
977{
978 return (state > CPUHP_AP_OFFLINE && state < CPUHP_AP_ONLINE);
979}
980
981static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
982{
983 struct cpuhp_step *sp;
984
985 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
986 return sp + state;
987}
988
989#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
990static ssize_t show_cpuhp_state(struct device *dev,
991 struct device_attribute *attr, char *buf)
992{
993 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
994
995 return sprintf(buf, "%d\n", st->state);
996}
997static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
998
757c989b
TG
999static ssize_t write_cpuhp_target(struct device *dev,
1000 struct device_attribute *attr,
1001 const char *buf, size_t count)
1002{
1003 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1004 struct cpuhp_step *sp;
1005 int target, ret;
1006
1007 ret = kstrtoint(buf, 10, &target);
1008 if (ret)
1009 return ret;
1010
1011#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1012 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1013 return -EINVAL;
1014#else
1015 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1016 return -EINVAL;
1017#endif
1018
1019 ret = lock_device_hotplug_sysfs();
1020 if (ret)
1021 return ret;
1022
1023 mutex_lock(&cpuhp_state_mutex);
1024 sp = cpuhp_get_step(target);
1025 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1026 mutex_unlock(&cpuhp_state_mutex);
1027 if (ret)
1028 return ret;
1029
1030 if (st->state < target)
1031 ret = do_cpu_up(dev->id, target);
1032 else
1033 ret = do_cpu_down(dev->id, target);
1034
1035 unlock_device_hotplug();
1036 return ret ? ret : count;
1037}
1038
98f8cdce
TG
1039static ssize_t show_cpuhp_target(struct device *dev,
1040 struct device_attribute *attr, char *buf)
1041{
1042 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1043
1044 return sprintf(buf, "%d\n", st->target);
1045}
757c989b 1046static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
98f8cdce
TG
1047
1048static struct attribute *cpuhp_cpu_attrs[] = {
1049 &dev_attr_state.attr,
1050 &dev_attr_target.attr,
1051 NULL
1052};
1053
1054static struct attribute_group cpuhp_cpu_attr_group = {
1055 .attrs = cpuhp_cpu_attrs,
1056 .name = "hotplug",
1057 NULL
1058};
1059
1060static ssize_t show_cpuhp_states(struct device *dev,
1061 struct device_attribute *attr, char *buf)
1062{
1063 ssize_t cur, res = 0;
1064 int i;
1065
1066 mutex_lock(&cpuhp_state_mutex);
757c989b 1067 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
98f8cdce
TG
1068 struct cpuhp_step *sp = cpuhp_get_step(i);
1069
1070 if (sp->name) {
1071 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1072 buf += cur;
1073 res += cur;
1074 }
1075 }
1076 mutex_unlock(&cpuhp_state_mutex);
1077 return res;
1078}
1079static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1080
1081static struct attribute *cpuhp_cpu_root_attrs[] = {
1082 &dev_attr_states.attr,
1083 NULL
1084};
1085
1086static struct attribute_group cpuhp_cpu_root_attr_group = {
1087 .attrs = cpuhp_cpu_root_attrs,
1088 .name = "hotplug",
1089 NULL
1090};
1091
1092static int __init cpuhp_sysfs_init(void)
1093{
1094 int cpu, ret;
1095
1096 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
1097 &cpuhp_cpu_root_attr_group);
1098 if (ret)
1099 return ret;
1100
1101 for_each_possible_cpu(cpu) {
1102 struct device *dev = get_cpu_device(cpu);
1103
1104 if (!dev)
1105 continue;
1106 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
1107 if (ret)
1108 return ret;
1109 }
1110 return 0;
1111}
1112device_initcall(cpuhp_sysfs_init);
1113#endif
1114
e56b3bc7
LT
1115/*
1116 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1117 * represents all NR_CPUS bits binary values of 1<<nr.
1118 *
e0b582ec 1119 * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc7
LT
1120 * mask value that has a single bit set only.
1121 */
b8d317d1 1122
e56b3bc7 1123/* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e 1124#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
e56b3bc7
LT
1125#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1126#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1127#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d1 1128
e56b3bc7
LT
1129const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1130
1131 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1132 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1133#if BITS_PER_LONG > 32
1134 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1135 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
b8d317d1
MT
1136#endif
1137};
e56b3bc7 1138EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a3
RR
1139
1140const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1141EXPORT_SYMBOL(cpu_all_bits);
b3199c02
RR
1142
1143#ifdef CONFIG_INIT_ALL_POSSIBLE
4b804c85 1144struct cpumask __cpu_possible_mask __read_mostly
c4c54dd1 1145 = {CPU_BITS_ALL};
b3199c02 1146#else
4b804c85 1147struct cpumask __cpu_possible_mask __read_mostly;
b3199c02 1148#endif
4b804c85 1149EXPORT_SYMBOL(__cpu_possible_mask);
b3199c02 1150
4b804c85
RV
1151struct cpumask __cpu_online_mask __read_mostly;
1152EXPORT_SYMBOL(__cpu_online_mask);
b3199c02 1153
4b804c85
RV
1154struct cpumask __cpu_present_mask __read_mostly;
1155EXPORT_SYMBOL(__cpu_present_mask);
b3199c02 1156
4b804c85
RV
1157struct cpumask __cpu_active_mask __read_mostly;
1158EXPORT_SYMBOL(__cpu_active_mask);
3fa41520 1159
3fa41520
RR
1160void init_cpu_present(const struct cpumask *src)
1161{
c4c54dd1 1162 cpumask_copy(&__cpu_present_mask, src);
3fa41520
RR
1163}
1164
1165void init_cpu_possible(const struct cpumask *src)
1166{
c4c54dd1 1167 cpumask_copy(&__cpu_possible_mask, src);
3fa41520
RR
1168}
1169
1170void init_cpu_online(const struct cpumask *src)
1171{
c4c54dd1 1172 cpumask_copy(&__cpu_online_mask, src);
3fa41520 1173}
cff7d378
TG
1174
1175/*
1176 * Activate the first processor.
1177 */
1178void __init boot_cpu_init(void)
1179{
1180 int cpu = smp_processor_id();
1181
1182 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1183 set_cpu_online(cpu, true);
1184 set_cpu_active(cpu, true);
1185 set_cpu_present(cpu, true);
1186 set_cpu_possible(cpu, true);
1187}
1188
1189/*
1190 * Must be called _AFTER_ setting up the per_cpu areas
1191 */
1192void __init boot_cpu_state_init(void)
1193{
1194 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
1195}