cpu/hotplug: Let upcoming cpu bring itself fully up
[linux-2.6-block.git] / kernel / cpu.c
1 /* CPU control.
2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
3  *
4  * This code is licenced under the GPL.
5  */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <linux/irq.h>
25 #include <linux/smpboot.h>
26
27 #include <trace/events/power.h>
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/cpuhp.h>
30
31 #include "smpboot.h"
32
33 /**
34  * cpuhp_cpu_state - Per cpu hotplug state storage
35  * @state:      The current cpu state
36  * @target:     The target state
37  * @thread:     Pointer to the hotplug thread
38  * @should_run: Thread should execute
39  * @cb_stat:    The state for a single callback (install/uninstall)
40  * @cb:         Single callback function (install/uninstall)
41  * @result:     Result of the operation
42  * @done:       Signal completion to the issuer of the task
43  */
44 struct cpuhp_cpu_state {
45         enum cpuhp_state        state;
46         enum cpuhp_state        target;
47 #ifdef CONFIG_SMP
48         struct task_struct      *thread;
49         bool                    should_run;
50         enum cpuhp_state        cb_state;
51         int                     (*cb)(unsigned int cpu);
52         int                     result;
53         struct completion       done;
54 #endif
55 };
56
57 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
58
59 /**
60  * cpuhp_step - Hotplug state machine step
61  * @name:       Name of the step
62  * @startup:    Startup function of the step
63  * @teardown:   Teardown function of the step
64  * @skip_onerr: Do not invoke the functions on error rollback
65  *              Will go away once the notifiers are gone
66  * @cant_stop:  Bringup/teardown can't be stopped at this step
67  */
68 struct cpuhp_step {
69         const char      *name;
70         int             (*startup)(unsigned int cpu);
71         int             (*teardown)(unsigned int cpu);
72         bool            skip_onerr;
73         bool            cant_stop;
74 };
75
76 static DEFINE_MUTEX(cpuhp_state_mutex);
77 static struct cpuhp_step cpuhp_bp_states[];
78 static struct cpuhp_step cpuhp_ap_states[];
79
80 /**
81  * cpuhp_invoke_callback _ Invoke the callbacks for a given state
82  * @cpu:        The cpu for which the callback should be invoked
83  * @step:       The step in the state machine
84  * @cb:         The callback function to invoke
85  *
86  * Called from cpu hotplug and from the state register machinery
87  */
88 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step,
89                                  int (*cb)(unsigned int))
90 {
91         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
92         int ret = 0;
93
94         if (cb) {
95                 trace_cpuhp_enter(cpu, st->target, step, cb);
96                 ret = cb(cpu);
97                 trace_cpuhp_exit(cpu, st->state, step, ret);
98         }
99         return ret;
100 }
101
102 #ifdef CONFIG_SMP
103 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
104 static DEFINE_MUTEX(cpu_add_remove_lock);
105 bool cpuhp_tasks_frozen;
106 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
107
108 /*
109  * The following two APIs (cpu_maps_update_begin/done) must be used when
110  * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
111  * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
112  * hotplug callback (un)registration performed using __register_cpu_notifier()
113  * or __unregister_cpu_notifier().
114  */
115 void cpu_maps_update_begin(void)
116 {
117         mutex_lock(&cpu_add_remove_lock);
118 }
119 EXPORT_SYMBOL(cpu_notifier_register_begin);
120
121 void cpu_maps_update_done(void)
122 {
123         mutex_unlock(&cpu_add_remove_lock);
124 }
125 EXPORT_SYMBOL(cpu_notifier_register_done);
126
127 static RAW_NOTIFIER_HEAD(cpu_chain);
128
129 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
130  * Should always be manipulated under cpu_add_remove_lock
131  */
132 static int cpu_hotplug_disabled;
133
134 #ifdef CONFIG_HOTPLUG_CPU
135
136 static struct {
137         struct task_struct *active_writer;
138         /* wait queue to wake up the active_writer */
139         wait_queue_head_t wq;
140         /* verifies that no writer will get active while readers are active */
141         struct mutex lock;
142         /*
143          * Also blocks the new readers during
144          * an ongoing cpu hotplug operation.
145          */
146         atomic_t refcount;
147
148 #ifdef CONFIG_DEBUG_LOCK_ALLOC
149         struct lockdep_map dep_map;
150 #endif
151 } cpu_hotplug = {
152         .active_writer = NULL,
153         .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
154         .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
155 #ifdef CONFIG_DEBUG_LOCK_ALLOC
156         .dep_map = {.name = "cpu_hotplug.lock" },
157 #endif
158 };
159
160 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
161 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
162 #define cpuhp_lock_acquire_tryread() \
163                                   lock_map_acquire_tryread(&cpu_hotplug.dep_map)
164 #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
165 #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
166
167
168 void get_online_cpus(void)
169 {
170         might_sleep();
171         if (cpu_hotplug.active_writer == current)
172                 return;
173         cpuhp_lock_acquire_read();
174         mutex_lock(&cpu_hotplug.lock);
175         atomic_inc(&cpu_hotplug.refcount);
176         mutex_unlock(&cpu_hotplug.lock);
177 }
178 EXPORT_SYMBOL_GPL(get_online_cpus);
179
180 void put_online_cpus(void)
181 {
182         int refcount;
183
184         if (cpu_hotplug.active_writer == current)
185                 return;
186
187         refcount = atomic_dec_return(&cpu_hotplug.refcount);
188         if (WARN_ON(refcount < 0)) /* try to fix things up */
189                 atomic_inc(&cpu_hotplug.refcount);
190
191         if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
192                 wake_up(&cpu_hotplug.wq);
193
194         cpuhp_lock_release();
195
196 }
197 EXPORT_SYMBOL_GPL(put_online_cpus);
198
199 /*
200  * This ensures that the hotplug operation can begin only when the
201  * refcount goes to zero.
202  *
203  * Note that during a cpu-hotplug operation, the new readers, if any,
204  * will be blocked by the cpu_hotplug.lock
205  *
206  * Since cpu_hotplug_begin() is always called after invoking
207  * cpu_maps_update_begin(), we can be sure that only one writer is active.
208  *
209  * Note that theoretically, there is a possibility of a livelock:
210  * - Refcount goes to zero, last reader wakes up the sleeping
211  *   writer.
212  * - Last reader unlocks the cpu_hotplug.lock.
213  * - A new reader arrives at this moment, bumps up the refcount.
214  * - The writer acquires the cpu_hotplug.lock finds the refcount
215  *   non zero and goes to sleep again.
216  *
217  * However, this is very difficult to achieve in practice since
218  * get_online_cpus() not an api which is called all that often.
219  *
220  */
221 void cpu_hotplug_begin(void)
222 {
223         DEFINE_WAIT(wait);
224
225         cpu_hotplug.active_writer = current;
226         cpuhp_lock_acquire();
227
228         for (;;) {
229                 mutex_lock(&cpu_hotplug.lock);
230                 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
231                 if (likely(!atomic_read(&cpu_hotplug.refcount)))
232                                 break;
233                 mutex_unlock(&cpu_hotplug.lock);
234                 schedule();
235         }
236         finish_wait(&cpu_hotplug.wq, &wait);
237 }
238
239 void cpu_hotplug_done(void)
240 {
241         cpu_hotplug.active_writer = NULL;
242         mutex_unlock(&cpu_hotplug.lock);
243         cpuhp_lock_release();
244 }
245
246 /*
247  * Wait for currently running CPU hotplug operations to complete (if any) and
248  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
249  * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
250  * hotplug path before performing hotplug operations. So acquiring that lock
251  * guarantees mutual exclusion from any currently running hotplug operations.
252  */
253 void cpu_hotplug_disable(void)
254 {
255         cpu_maps_update_begin();
256         cpu_hotplug_disabled++;
257         cpu_maps_update_done();
258 }
259 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
260
261 void cpu_hotplug_enable(void)
262 {
263         cpu_maps_update_begin();
264         WARN_ON(--cpu_hotplug_disabled < 0);
265         cpu_maps_update_done();
266 }
267 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
268 #endif  /* CONFIG_HOTPLUG_CPU */
269
270 /* Need to know about CPUs going up/down? */
271 int register_cpu_notifier(struct notifier_block *nb)
272 {
273         int ret;
274         cpu_maps_update_begin();
275         ret = raw_notifier_chain_register(&cpu_chain, nb);
276         cpu_maps_update_done();
277         return ret;
278 }
279
280 int __register_cpu_notifier(struct notifier_block *nb)
281 {
282         return raw_notifier_chain_register(&cpu_chain, nb);
283 }
284
285 static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
286                         int *nr_calls)
287 {
288         unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
289         void *hcpu = (void *)(long)cpu;
290
291         int ret;
292
293         ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
294                                         nr_calls);
295
296         return notifier_to_errno(ret);
297 }
298
299 static int cpu_notify(unsigned long val, unsigned int cpu)
300 {
301         return __cpu_notify(val, cpu, -1, NULL);
302 }
303
304 /* Notifier wrappers for transitioning to state machine */
305 static int notify_prepare(unsigned int cpu)
306 {
307         int nr_calls = 0;
308         int ret;
309
310         ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
311         if (ret) {
312                 nr_calls--;
313                 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
314                                 __func__, cpu);
315                 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
316         }
317         return ret;
318 }
319
320 static int notify_online(unsigned int cpu)
321 {
322         cpu_notify(CPU_ONLINE, cpu);
323         return 0;
324 }
325
326 static int notify_starting(unsigned int cpu)
327 {
328         cpu_notify(CPU_STARTING, cpu);
329         return 0;
330 }
331
332 static int bringup_wait_for_ap(unsigned int cpu)
333 {
334         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
335
336         wait_for_completion(&st->done);
337         return st->result;
338 }
339
340 static int bringup_cpu(unsigned int cpu)
341 {
342         struct task_struct *idle = idle_thread_get(cpu);
343         int ret;
344
345         /* Arch-specific enabling code. */
346         ret = __cpu_up(cpu, idle);
347         if (ret) {
348                 cpu_notify(CPU_UP_CANCELED, cpu);
349                 return ret;
350         }
351         ret = bringup_wait_for_ap(cpu);
352         BUG_ON(!cpu_online(cpu));
353         return ret;
354 }
355
356 /*
357  * Hotplug state machine related functions
358  */
359 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st,
360                           struct cpuhp_step *steps)
361 {
362         for (st->state++; st->state < st->target; st->state++) {
363                 struct cpuhp_step *step = steps + st->state;
364
365                 if (!step->skip_onerr)
366                         cpuhp_invoke_callback(cpu, st->state, step->startup);
367         }
368 }
369
370 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
371                                 struct cpuhp_step *steps, enum cpuhp_state target)
372 {
373         enum cpuhp_state prev_state = st->state;
374         int ret = 0;
375
376         for (; st->state > target; st->state--) {
377                 struct cpuhp_step *step = steps + st->state;
378
379                 ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
380                 if (ret) {
381                         st->target = prev_state;
382                         undo_cpu_down(cpu, st, steps);
383                         break;
384                 }
385         }
386         return ret;
387 }
388
389 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st,
390                         struct cpuhp_step *steps)
391 {
392         for (st->state--; st->state > st->target; st->state--) {
393                 struct cpuhp_step *step = steps + st->state;
394
395                 if (!step->skip_onerr)
396                         cpuhp_invoke_callback(cpu, st->state, step->teardown);
397         }
398 }
399
400 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
401                               struct cpuhp_step *steps, enum cpuhp_state target)
402 {
403         enum cpuhp_state prev_state = st->state;
404         int ret = 0;
405
406         while (st->state < target) {
407                 struct cpuhp_step *step;
408
409                 st->state++;
410                 step = steps + st->state;
411                 ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
412                 if (ret) {
413                         st->target = prev_state;
414                         undo_cpu_up(cpu, st, steps);
415                         break;
416                 }
417         }
418         return ret;
419 }
420
421 /*
422  * The cpu hotplug threads manage the bringup and teardown of the cpus
423  */
424 static void cpuhp_create(unsigned int cpu)
425 {
426         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
427
428         init_completion(&st->done);
429 }
430
431 static int cpuhp_should_run(unsigned int cpu)
432 {
433         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
434
435         return st->should_run;
436 }
437
438 /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
439 static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
440 {
441         enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
442
443         return cpuhp_down_callbacks(cpu, st, cpuhp_ap_states, target);
444 }
445
446 /* Execute the online startup callbacks. Used to be CPU_ONLINE */
447 static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
448 {
449         return cpuhp_up_callbacks(cpu, st, cpuhp_ap_states, st->target);
450 }
451
452 /*
453  * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
454  * callbacks when a state gets [un]installed at runtime.
455  */
456 static void cpuhp_thread_fun(unsigned int cpu)
457 {
458         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
459         int ret = 0;
460
461         /*
462          * Paired with the mb() in cpuhp_kick_ap_work and
463          * cpuhp_invoke_ap_callback, so the work set is consistent visible.
464          */
465         smp_mb();
466         if (!st->should_run)
467                 return;
468
469         st->should_run = false;
470
471         /* Single callback invocation for [un]install ? */
472         if (st->cb) {
473                 if (st->cb_state < CPUHP_AP_ONLINE) {
474                         local_irq_disable();
475                         ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
476                         local_irq_enable();
477                 } else {
478                         ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
479                 }
480         } else {
481                 /* Cannot happen .... */
482                 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
483
484                 /* Regular hotplug work */
485                 if (st->state < st->target)
486                         ret = cpuhp_ap_online(cpu, st);
487                 else if (st->state > st->target)
488                         ret = cpuhp_ap_offline(cpu, st);
489         }
490         st->result = ret;
491         complete(&st->done);
492 }
493
494 /* Invoke a single callback on a remote cpu */
495 static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state,
496                                     int (*cb)(unsigned int))
497 {
498         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
499
500         if (!cpu_online(cpu))
501                 return 0;
502
503         st->cb_state = state;
504         st->cb = cb;
505         /*
506          * Make sure the above stores are visible before should_run becomes
507          * true. Paired with the mb() above in cpuhp_thread_fun()
508          */
509         smp_mb();
510         st->should_run = true;
511         wake_up_process(st->thread);
512         wait_for_completion(&st->done);
513         return st->result;
514 }
515
516 /* Regular hotplug invocation of the AP hotplug thread */
517 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
518 {
519         st->result = 0;
520         st->cb = NULL;
521         /*
522          * Make sure the above stores are visible before should_run becomes
523          * true. Paired with the mb() above in cpuhp_thread_fun()
524          */
525         smp_mb();
526         st->should_run = true;
527         wake_up_process(st->thread);
528 }
529
530 static int cpuhp_kick_ap_work(unsigned int cpu)
531 {
532         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
533         enum cpuhp_state state = st->state;
534
535         trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
536         __cpuhp_kick_ap_work(st);
537         wait_for_completion(&st->done);
538         trace_cpuhp_exit(cpu, st->state, state, st->result);
539         return st->result;
540 }
541
542 static struct smp_hotplug_thread cpuhp_threads = {
543         .store                  = &cpuhp_state.thread,
544         .create                 = &cpuhp_create,
545         .thread_should_run      = cpuhp_should_run,
546         .thread_fn              = cpuhp_thread_fun,
547         .thread_comm            = "cpuhp/%u",
548         .selfparking            = true,
549 };
550
551 void __init cpuhp_threads_init(void)
552 {
553         BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
554         kthread_unpark(this_cpu_read(cpuhp_state.thread));
555 }
556
557 #ifdef CONFIG_HOTPLUG_CPU
558 EXPORT_SYMBOL(register_cpu_notifier);
559 EXPORT_SYMBOL(__register_cpu_notifier);
560 void unregister_cpu_notifier(struct notifier_block *nb)
561 {
562         cpu_maps_update_begin();
563         raw_notifier_chain_unregister(&cpu_chain, nb);
564         cpu_maps_update_done();
565 }
566 EXPORT_SYMBOL(unregister_cpu_notifier);
567
568 void __unregister_cpu_notifier(struct notifier_block *nb)
569 {
570         raw_notifier_chain_unregister(&cpu_chain, nb);
571 }
572 EXPORT_SYMBOL(__unregister_cpu_notifier);
573
574 /**
575  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
576  * @cpu: a CPU id
577  *
578  * This function walks all processes, finds a valid mm struct for each one and
579  * then clears a corresponding bit in mm's cpumask.  While this all sounds
580  * trivial, there are various non-obvious corner cases, which this function
581  * tries to solve in a safe manner.
582  *
583  * Also note that the function uses a somewhat relaxed locking scheme, so it may
584  * be called only for an already offlined CPU.
585  */
586 void clear_tasks_mm_cpumask(int cpu)
587 {
588         struct task_struct *p;
589
590         /*
591          * This function is called after the cpu is taken down and marked
592          * offline, so its not like new tasks will ever get this cpu set in
593          * their mm mask. -- Peter Zijlstra
594          * Thus, we may use rcu_read_lock() here, instead of grabbing
595          * full-fledged tasklist_lock.
596          */
597         WARN_ON(cpu_online(cpu));
598         rcu_read_lock();
599         for_each_process(p) {
600                 struct task_struct *t;
601
602                 /*
603                  * Main thread might exit, but other threads may still have
604                  * a valid mm. Find one.
605                  */
606                 t = find_lock_task_mm(p);
607                 if (!t)
608                         continue;
609                 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
610                 task_unlock(t);
611         }
612         rcu_read_unlock();
613 }
614
615 static inline void check_for_tasks(int dead_cpu)
616 {
617         struct task_struct *g, *p;
618
619         read_lock(&tasklist_lock);
620         for_each_process_thread(g, p) {
621                 if (!p->on_rq)
622                         continue;
623                 /*
624                  * We do the check with unlocked task_rq(p)->lock.
625                  * Order the reading to do not warn about a task,
626                  * which was running on this cpu in the past, and
627                  * it's just been woken on another cpu.
628                  */
629                 rmb();
630                 if (task_cpu(p) != dead_cpu)
631                         continue;
632
633                 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
634                         p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
635         }
636         read_unlock(&tasklist_lock);
637 }
638
639 static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
640 {
641         BUG_ON(cpu_notify(val, cpu));
642 }
643
644 static int notify_down_prepare(unsigned int cpu)
645 {
646         int err, nr_calls = 0;
647
648         err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
649         if (err) {
650                 nr_calls--;
651                 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
652                 pr_warn("%s: attempt to take down CPU %u failed\n",
653                                 __func__, cpu);
654         }
655         return err;
656 }
657
658 static int notify_dying(unsigned int cpu)
659 {
660         cpu_notify(CPU_DYING, cpu);
661         return 0;
662 }
663
664 /* Take this CPU down. */
665 static int take_cpu_down(void *_param)
666 {
667         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
668         enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
669         int err, cpu = smp_processor_id();
670
671         /* Ensure this CPU doesn't handle any more interrupts. */
672         err = __cpu_disable();
673         if (err < 0)
674                 return err;
675
676         /* Invoke the former CPU_DYING callbacks */
677         for (; st->state > target; st->state--) {
678                 struct cpuhp_step *step = cpuhp_ap_states + st->state;
679
680                 cpuhp_invoke_callback(cpu, st->state, step->teardown);
681         }
682         /* Give up timekeeping duties */
683         tick_handover_do_timer();
684         /* Park the stopper thread */
685         stop_machine_park(cpu);
686         return 0;
687 }
688
689 static int takedown_cpu(unsigned int cpu)
690 {
691         int err;
692
693         /*
694          * By now we've cleared cpu_active_mask, wait for all preempt-disabled
695          * and RCU users of this state to go away such that all new such users
696          * will observe it.
697          *
698          * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
699          * not imply sync_sched(), so wait for both.
700          *
701          * Do sync before park smpboot threads to take care the rcu boost case.
702          */
703         if (IS_ENABLED(CONFIG_PREEMPT))
704                 synchronize_rcu_mult(call_rcu, call_rcu_sched);
705         else
706                 synchronize_rcu();
707
708         /* Park the hotplug thread */
709         kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
710
711         /*
712          * Prevent irq alloc/free while the dying cpu reorganizes the
713          * interrupt affinities.
714          */
715         irq_lock_sparse();
716
717         /*
718          * So now all preempt/rcu users must observe !cpu_active().
719          */
720         err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
721         if (err) {
722                 /* CPU didn't die: tell everyone.  Can't complain. */
723                 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
724                 irq_unlock_sparse();
725                 return err;
726         }
727         BUG_ON(cpu_online(cpu));
728
729         /*
730          * The migration_call() CPU_DYING callback will have removed all
731          * runnable tasks from the cpu, there's only the idle task left now
732          * that the migration thread is done doing the stop_machine thing.
733          *
734          * Wait for the stop thread to go away.
735          */
736         while (!per_cpu(cpu_dead_idle, cpu))
737                 cpu_relax();
738         smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
739         per_cpu(cpu_dead_idle, cpu) = false;
740
741         /* Interrupts are moved away from the dying cpu, reenable alloc/free */
742         irq_unlock_sparse();
743
744         hotplug_cpu__broadcast_tick_pull(cpu);
745         /* This actually kills the CPU. */
746         __cpu_die(cpu);
747
748         tick_cleanup_dead_cpu(cpu);
749         return 0;
750 }
751
752 static int notify_dead(unsigned int cpu)
753 {
754         cpu_notify_nofail(CPU_DEAD, cpu);
755         check_for_tasks(cpu);
756         return 0;
757 }
758
759 #else
760 #define notify_down_prepare     NULL
761 #define takedown_cpu            NULL
762 #define notify_dead             NULL
763 #define notify_dying            NULL
764 #endif
765
766 #ifdef CONFIG_HOTPLUG_CPU
767
768 /* Requires cpu_add_remove_lock to be held */
769 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
770                            enum cpuhp_state target)
771 {
772         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
773         int prev_state, ret = 0;
774         bool hasdied = false;
775
776         if (num_online_cpus() == 1)
777                 return -EBUSY;
778
779         if (!cpu_present(cpu))
780                 return -EINVAL;
781
782         cpu_hotplug_begin();
783
784         cpuhp_tasks_frozen = tasks_frozen;
785
786         prev_state = st->state;
787         st->target = target;
788         /*
789          * If the current CPU state is in the range of the AP hotplug thread,
790          * then we need to kick the thread.
791          */
792         if (st->state > CPUHP_TEARDOWN_CPU) {
793                 ret = cpuhp_kick_ap_work(cpu);
794                 /*
795                  * The AP side has done the error rollback already. Just
796                  * return the error code..
797                  */
798                 if (ret)
799                         goto out;
800
801                 /*
802                  * We might have stopped still in the range of the AP hotplug
803                  * thread. Nothing to do anymore.
804                  */
805                 if (st->state > CPUHP_TEARDOWN_CPU)
806                         goto out;
807         }
808         /*
809          * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
810          * to do the further cleanups.
811          */
812         ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
813
814         hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
815 out:
816         cpu_hotplug_done();
817         /* This post dead nonsense must die */
818         if (!ret && hasdied)
819                 cpu_notify_nofail(CPU_POST_DEAD, cpu);
820         return ret;
821 }
822
823 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
824 {
825         int err;
826
827         cpu_maps_update_begin();
828
829         if (cpu_hotplug_disabled) {
830                 err = -EBUSY;
831                 goto out;
832         }
833
834         err = _cpu_down(cpu, 0, target);
835
836 out:
837         cpu_maps_update_done();
838         return err;
839 }
840 int cpu_down(unsigned int cpu)
841 {
842         return do_cpu_down(cpu, CPUHP_OFFLINE);
843 }
844 EXPORT_SYMBOL(cpu_down);
845 #endif /*CONFIG_HOTPLUG_CPU*/
846
847 /**
848  * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
849  * @cpu: cpu that just started
850  *
851  * This function calls the cpu_chain notifiers with CPU_STARTING.
852  * It must be called by the arch code on the new cpu, before the new cpu
853  * enables interrupts and before the "boot" cpu returns from __cpu_up().
854  */
855 void notify_cpu_starting(unsigned int cpu)
856 {
857         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
858         enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
859
860         while (st->state < target) {
861                 struct cpuhp_step *step;
862
863                 st->state++;
864                 step = cpuhp_ap_states + st->state;
865                 cpuhp_invoke_callback(cpu, st->state, step->startup);
866         }
867 }
868
869 /*
870  * Called from the idle task. We need to set active here, so we can kick off
871  * the stopper thread and unpark the smpboot threads. If the target state is
872  * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
873  * cpu further.
874  */
875 void cpuhp_online_idle(enum cpuhp_state state)
876 {
877         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
878         unsigned int cpu = smp_processor_id();
879
880         /* Happens for the boot cpu */
881         if (state != CPUHP_AP_ONLINE_IDLE)
882                 return;
883
884         st->state = CPUHP_AP_ONLINE_IDLE;
885
886         /* The cpu is marked online, set it active now */
887         set_cpu_active(cpu, true);
888         /* Unpark the stopper thread and the hotplug thread of this cpu */
889         stop_machine_unpark(cpu);
890         kthread_unpark(st->thread);
891
892         /* Should we go further up ? */
893         if (st->target > CPUHP_AP_ONLINE_IDLE)
894                 __cpuhp_kick_ap_work(st);
895         else
896                 complete(&st->done);
897 }
898
899 /* Requires cpu_add_remove_lock to be held */
900 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
901 {
902         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
903         struct task_struct *idle;
904         int ret = 0;
905
906         cpu_hotplug_begin();
907
908         if (!cpu_present(cpu)) {
909                 ret = -EINVAL;
910                 goto out;
911         }
912
913         /*
914          * The caller of do_cpu_up might have raced with another
915          * caller. Ignore it for now.
916          */
917         if (st->state >= target)
918                 goto out;
919
920         if (st->state == CPUHP_OFFLINE) {
921                 /* Let it fail before we try to bring the cpu up */
922                 idle = idle_thread_get(cpu);
923                 if (IS_ERR(idle)) {
924                         ret = PTR_ERR(idle);
925                         goto out;
926                 }
927         }
928
929         cpuhp_tasks_frozen = tasks_frozen;
930
931         st->target = target;
932         /*
933          * If the current CPU state is in the range of the AP hotplug thread,
934          * then we need to kick the thread once more.
935          */
936         if (st->state > CPUHP_BRINGUP_CPU) {
937                 ret = cpuhp_kick_ap_work(cpu);
938                 /*
939                  * The AP side has done the error rollback already. Just
940                  * return the error code..
941                  */
942                 if (ret)
943                         goto out;
944         }
945
946         /*
947          * Try to reach the target state. We max out on the BP at
948          * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
949          * responsible for bringing it up to the target state.
950          */
951         target = min((int)target, CPUHP_BRINGUP_CPU);
952         ret = cpuhp_up_callbacks(cpu, st, cpuhp_bp_states, target);
953 out:
954         cpu_hotplug_done();
955         return ret;
956 }
957
958 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
959 {
960         int err = 0;
961
962         if (!cpu_possible(cpu)) {
963                 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
964                        cpu);
965 #if defined(CONFIG_IA64)
966                 pr_err("please check additional_cpus= boot parameter\n");
967 #endif
968                 return -EINVAL;
969         }
970
971         err = try_online_node(cpu_to_node(cpu));
972         if (err)
973                 return err;
974
975         cpu_maps_update_begin();
976
977         if (cpu_hotplug_disabled) {
978                 err = -EBUSY;
979                 goto out;
980         }
981
982         err = _cpu_up(cpu, 0, target);
983 out:
984         cpu_maps_update_done();
985         return err;
986 }
987
988 int cpu_up(unsigned int cpu)
989 {
990         return do_cpu_up(cpu, CPUHP_ONLINE);
991 }
992 EXPORT_SYMBOL_GPL(cpu_up);
993
994 #ifdef CONFIG_PM_SLEEP_SMP
995 static cpumask_var_t frozen_cpus;
996
997 int disable_nonboot_cpus(void)
998 {
999         int cpu, first_cpu, error = 0;
1000
1001         cpu_maps_update_begin();
1002         first_cpu = cpumask_first(cpu_online_mask);
1003         /*
1004          * We take down all of the non-boot CPUs in one shot to avoid races
1005          * with the userspace trying to use the CPU hotplug at the same time
1006          */
1007         cpumask_clear(frozen_cpus);
1008
1009         pr_info("Disabling non-boot CPUs ...\n");
1010         for_each_online_cpu(cpu) {
1011                 if (cpu == first_cpu)
1012                         continue;
1013                 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1014                 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1015                 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1016                 if (!error)
1017                         cpumask_set_cpu(cpu, frozen_cpus);
1018                 else {
1019                         pr_err("Error taking CPU%d down: %d\n", cpu, error);
1020                         break;
1021                 }
1022         }
1023
1024         if (!error)
1025                 BUG_ON(num_online_cpus() > 1);
1026         else
1027                 pr_err("Non-boot CPUs are not disabled\n");
1028
1029         /*
1030          * Make sure the CPUs won't be enabled by someone else. We need to do
1031          * this even in case of failure as all disable_nonboot_cpus() users are
1032          * supposed to do enable_nonboot_cpus() on the failure path.
1033          */
1034         cpu_hotplug_disabled++;
1035
1036         cpu_maps_update_done();
1037         return error;
1038 }
1039
1040 void __weak arch_enable_nonboot_cpus_begin(void)
1041 {
1042 }
1043
1044 void __weak arch_enable_nonboot_cpus_end(void)
1045 {
1046 }
1047
1048 void enable_nonboot_cpus(void)
1049 {
1050         int cpu, error;
1051
1052         /* Allow everyone to use the CPU hotplug again */
1053         cpu_maps_update_begin();
1054         WARN_ON(--cpu_hotplug_disabled < 0);
1055         if (cpumask_empty(frozen_cpus))
1056                 goto out;
1057
1058         pr_info("Enabling non-boot CPUs ...\n");
1059
1060         arch_enable_nonboot_cpus_begin();
1061
1062         for_each_cpu(cpu, frozen_cpus) {
1063                 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1064                 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1065                 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1066                 if (!error) {
1067                         pr_info("CPU%d is up\n", cpu);
1068                         continue;
1069                 }
1070                 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1071         }
1072
1073         arch_enable_nonboot_cpus_end();
1074
1075         cpumask_clear(frozen_cpus);
1076 out:
1077         cpu_maps_update_done();
1078 }
1079
1080 static int __init alloc_frozen_cpus(void)
1081 {
1082         if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1083                 return -ENOMEM;
1084         return 0;
1085 }
1086 core_initcall(alloc_frozen_cpus);
1087
1088 /*
1089  * When callbacks for CPU hotplug notifications are being executed, we must
1090  * ensure that the state of the system with respect to the tasks being frozen
1091  * or not, as reported by the notification, remains unchanged *throughout the
1092  * duration* of the execution of the callbacks.
1093  * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1094  *
1095  * This synchronization is implemented by mutually excluding regular CPU
1096  * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1097  * Hibernate notifications.
1098  */
1099 static int
1100 cpu_hotplug_pm_callback(struct notifier_block *nb,
1101                         unsigned long action, void *ptr)
1102 {
1103         switch (action) {
1104
1105         case PM_SUSPEND_PREPARE:
1106         case PM_HIBERNATION_PREPARE:
1107                 cpu_hotplug_disable();
1108                 break;
1109
1110         case PM_POST_SUSPEND:
1111         case PM_POST_HIBERNATION:
1112                 cpu_hotplug_enable();
1113                 break;
1114
1115         default:
1116                 return NOTIFY_DONE;
1117         }
1118
1119         return NOTIFY_OK;
1120 }
1121
1122
1123 static int __init cpu_hotplug_pm_sync_init(void)
1124 {
1125         /*
1126          * cpu_hotplug_pm_callback has higher priority than x86
1127          * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1128          * to disable cpu hotplug to avoid cpu hotplug race.
1129          */
1130         pm_notifier(cpu_hotplug_pm_callback, 0);
1131         return 0;
1132 }
1133 core_initcall(cpu_hotplug_pm_sync_init);
1134
1135 #endif /* CONFIG_PM_SLEEP_SMP */
1136
1137 #endif /* CONFIG_SMP */
1138
1139 /* Boot processor state steps */
1140 static struct cpuhp_step cpuhp_bp_states[] = {
1141         [CPUHP_OFFLINE] = {
1142                 .name                   = "offline",
1143                 .startup                = NULL,
1144                 .teardown               = NULL,
1145         },
1146 #ifdef CONFIG_SMP
1147         [CPUHP_CREATE_THREADS]= {
1148                 .name                   = "threads:create",
1149                 .startup                = smpboot_create_threads,
1150                 .teardown               = NULL,
1151                 .cant_stop              = true,
1152         },
1153         [CPUHP_NOTIFY_PREPARE] = {
1154                 .name                   = "notify:prepare",
1155                 .startup                = notify_prepare,
1156                 .teardown               = notify_dead,
1157                 .skip_onerr             = true,
1158                 .cant_stop              = true,
1159         },
1160         [CPUHP_BRINGUP_CPU] = {
1161                 .name                   = "cpu:bringup",
1162                 .startup                = bringup_cpu,
1163                 .teardown               = NULL,
1164                 .cant_stop              = true,
1165         },
1166         [CPUHP_TEARDOWN_CPU] = {
1167                 .name                   = "cpu:teardown",
1168                 .startup                = NULL,
1169                 .teardown               = takedown_cpu,
1170                 .cant_stop              = true,
1171         },
1172 #endif
1173 };
1174
1175 /* Application processor state steps */
1176 static struct cpuhp_step cpuhp_ap_states[] = {
1177 #ifdef CONFIG_SMP
1178         [CPUHP_AP_NOTIFY_STARTING] = {
1179                 .name                   = "notify:starting",
1180                 .startup                = notify_starting,
1181                 .teardown               = notify_dying,
1182                 .skip_onerr             = true,
1183                 .cant_stop              = true,
1184         },
1185         [CPUHP_AP_SMPBOOT_THREADS] = {
1186                 .name                   = "smpboot:threads",
1187                 .startup                = smpboot_unpark_threads,
1188                 .teardown               = smpboot_park_threads,
1189         },
1190         [CPUHP_AP_NOTIFY_ONLINE] = {
1191                 .name                   = "notify:online",
1192                 .startup                = notify_online,
1193                 .teardown               = notify_down_prepare,
1194         },
1195 #endif
1196         [CPUHP_ONLINE] = {
1197                 .name                   = "online",
1198                 .startup                = NULL,
1199                 .teardown               = NULL,
1200         },
1201 };
1202
1203 /* Sanity check for callbacks */
1204 static int cpuhp_cb_check(enum cpuhp_state state)
1205 {
1206         if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1207                 return -EINVAL;
1208         return 0;
1209 }
1210
1211 static bool cpuhp_is_ap_state(enum cpuhp_state state)
1212 {
1213         if (state >= CPUHP_AP_OFFLINE && state <= CPUHP_AP_ONLINE)
1214                 return true;
1215         return state > CPUHP_BRINGUP_CPU;
1216 }
1217
1218 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
1219 {
1220         struct cpuhp_step *sp;
1221
1222         sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
1223         return sp + state;
1224 }
1225
1226 static void cpuhp_store_callbacks(enum cpuhp_state state,
1227                                   const char *name,
1228                                   int (*startup)(unsigned int cpu),
1229                                   int (*teardown)(unsigned int cpu))
1230 {
1231         /* (Un)Install the callbacks for further cpu hotplug operations */
1232         struct cpuhp_step *sp;
1233
1234         mutex_lock(&cpuhp_state_mutex);
1235         sp = cpuhp_get_step(state);
1236         sp->startup = startup;
1237         sp->teardown = teardown;
1238         sp->name = name;
1239         mutex_unlock(&cpuhp_state_mutex);
1240 }
1241
1242 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1243 {
1244         return cpuhp_get_step(state)->teardown;
1245 }
1246
1247 /*
1248  * Call the startup/teardown function for a step either on the AP or
1249  * on the current CPU.
1250  */
1251 static int cpuhp_issue_call(int cpu, enum cpuhp_state state,
1252                             int (*cb)(unsigned int), bool bringup)
1253 {
1254         int ret;
1255
1256         if (!cb)
1257                 return 0;
1258         /*
1259          * The non AP bound callbacks can fail on bringup. On teardown
1260          * e.g. module removal we crash for now.
1261          */
1262 #ifdef CONFIG_SMP
1263         if (cpuhp_is_ap_state(state))
1264                 ret = cpuhp_invoke_ap_callback(cpu, state, cb);
1265         else
1266                 ret = cpuhp_invoke_callback(cpu, state, cb);
1267 #else
1268         ret = cpuhp_invoke_callback(cpu, state, cb);
1269 #endif
1270         BUG_ON(ret && !bringup);
1271         return ret;
1272 }
1273
1274 /*
1275  * Called from __cpuhp_setup_state on a recoverable failure.
1276  *
1277  * Note: The teardown callbacks for rollback are not allowed to fail!
1278  */
1279 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1280                                    int (*teardown)(unsigned int cpu))
1281 {
1282         int cpu;
1283
1284         if (!teardown)
1285                 return;
1286
1287         /* Roll back the already executed steps on the other cpus */
1288         for_each_present_cpu(cpu) {
1289                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1290                 int cpustate = st->state;
1291
1292                 if (cpu >= failedcpu)
1293                         break;
1294
1295                 /* Did we invoke the startup call on that cpu ? */
1296                 if (cpustate >= state)
1297                         cpuhp_issue_call(cpu, state, teardown, false);
1298         }
1299 }
1300
1301 /*
1302  * Returns a free for dynamic slot assignment of the Online state. The states
1303  * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1304  * by having no name assigned.
1305  */
1306 static int cpuhp_reserve_state(enum cpuhp_state state)
1307 {
1308         enum cpuhp_state i;
1309
1310         mutex_lock(&cpuhp_state_mutex);
1311         for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
1312                 if (cpuhp_ap_states[i].name)
1313                         continue;
1314
1315                 cpuhp_ap_states[i].name = "Reserved";
1316                 mutex_unlock(&cpuhp_state_mutex);
1317                 return i;
1318         }
1319         mutex_unlock(&cpuhp_state_mutex);
1320         WARN(1, "No more dynamic states available for CPU hotplug\n");
1321         return -ENOSPC;
1322 }
1323
1324 /**
1325  * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1326  * @state:      The state to setup
1327  * @invoke:     If true, the startup function is invoked for cpus where
1328  *              cpu state >= @state
1329  * @startup:    startup callback function
1330  * @teardown:   teardown callback function
1331  *
1332  * Returns 0 if successful, otherwise a proper error code
1333  */
1334 int __cpuhp_setup_state(enum cpuhp_state state,
1335                         const char *name, bool invoke,
1336                         int (*startup)(unsigned int cpu),
1337                         int (*teardown)(unsigned int cpu))
1338 {
1339         int cpu, ret = 0;
1340         int dyn_state = 0;
1341
1342         if (cpuhp_cb_check(state) || !name)
1343                 return -EINVAL;
1344
1345         get_online_cpus();
1346
1347         /* currently assignments for the ONLINE state are possible */
1348         if (state == CPUHP_AP_ONLINE_DYN) {
1349                 dyn_state = 1;
1350                 ret = cpuhp_reserve_state(state);
1351                 if (ret < 0)
1352                         goto out;
1353                 state = ret;
1354         }
1355
1356         cpuhp_store_callbacks(state, name, startup, teardown);
1357
1358         if (!invoke || !startup)
1359                 goto out;
1360
1361         /*
1362          * Try to call the startup callback for each present cpu
1363          * depending on the hotplug state of the cpu.
1364          */
1365         for_each_present_cpu(cpu) {
1366                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1367                 int cpustate = st->state;
1368
1369                 if (cpustate < state)
1370                         continue;
1371
1372                 ret = cpuhp_issue_call(cpu, state, startup, true);
1373                 if (ret) {
1374                         cpuhp_rollback_install(cpu, state, teardown);
1375                         cpuhp_store_callbacks(state, NULL, NULL, NULL);
1376                         goto out;
1377                 }
1378         }
1379 out:
1380         put_online_cpus();
1381         if (!ret && dyn_state)
1382                 return state;
1383         return ret;
1384 }
1385 EXPORT_SYMBOL(__cpuhp_setup_state);
1386
1387 /**
1388  * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1389  * @state:      The state to remove
1390  * @invoke:     If true, the teardown function is invoked for cpus where
1391  *              cpu state >= @state
1392  *
1393  * The teardown callback is currently not allowed to fail. Think
1394  * about module removal!
1395  */
1396 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1397 {
1398         int (*teardown)(unsigned int cpu) = cpuhp_get_teardown_cb(state);
1399         int cpu;
1400
1401         BUG_ON(cpuhp_cb_check(state));
1402
1403         get_online_cpus();
1404
1405         if (!invoke || !teardown)
1406                 goto remove;
1407
1408         /*
1409          * Call the teardown callback for each present cpu depending
1410          * on the hotplug state of the cpu. This function is not
1411          * allowed to fail currently!
1412          */
1413         for_each_present_cpu(cpu) {
1414                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1415                 int cpustate = st->state;
1416
1417                 if (cpustate >= state)
1418                         cpuhp_issue_call(cpu, state, teardown, false);
1419         }
1420 remove:
1421         cpuhp_store_callbacks(state, NULL, NULL, NULL);
1422         put_online_cpus();
1423 }
1424 EXPORT_SYMBOL(__cpuhp_remove_state);
1425
1426 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1427 static ssize_t show_cpuhp_state(struct device *dev,
1428                                 struct device_attribute *attr, char *buf)
1429 {
1430         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1431
1432         return sprintf(buf, "%d\n", st->state);
1433 }
1434 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1435
1436 static ssize_t write_cpuhp_target(struct device *dev,
1437                                   struct device_attribute *attr,
1438                                   const char *buf, size_t count)
1439 {
1440         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1441         struct cpuhp_step *sp;
1442         int target, ret;
1443
1444         ret = kstrtoint(buf, 10, &target);
1445         if (ret)
1446                 return ret;
1447
1448 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1449         if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1450                 return -EINVAL;
1451 #else
1452         if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1453                 return -EINVAL;
1454 #endif
1455
1456         ret = lock_device_hotplug_sysfs();
1457         if (ret)
1458                 return ret;
1459
1460         mutex_lock(&cpuhp_state_mutex);
1461         sp = cpuhp_get_step(target);
1462         ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1463         mutex_unlock(&cpuhp_state_mutex);
1464         if (ret)
1465                 return ret;
1466
1467         if (st->state < target)
1468                 ret = do_cpu_up(dev->id, target);
1469         else
1470                 ret = do_cpu_down(dev->id, target);
1471
1472         unlock_device_hotplug();
1473         return ret ? ret : count;
1474 }
1475
1476 static ssize_t show_cpuhp_target(struct device *dev,
1477                                  struct device_attribute *attr, char *buf)
1478 {
1479         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1480
1481         return sprintf(buf, "%d\n", st->target);
1482 }
1483 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1484
1485 static struct attribute *cpuhp_cpu_attrs[] = {
1486         &dev_attr_state.attr,
1487         &dev_attr_target.attr,
1488         NULL
1489 };
1490
1491 static struct attribute_group cpuhp_cpu_attr_group = {
1492         .attrs = cpuhp_cpu_attrs,
1493         .name = "hotplug",
1494         NULL
1495 };
1496
1497 static ssize_t show_cpuhp_states(struct device *dev,
1498                                  struct device_attribute *attr, char *buf)
1499 {
1500         ssize_t cur, res = 0;
1501         int i;
1502
1503         mutex_lock(&cpuhp_state_mutex);
1504         for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
1505                 struct cpuhp_step *sp = cpuhp_get_step(i);
1506
1507                 if (sp->name) {
1508                         cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1509                         buf += cur;
1510                         res += cur;
1511                 }
1512         }
1513         mutex_unlock(&cpuhp_state_mutex);
1514         return res;
1515 }
1516 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1517
1518 static struct attribute *cpuhp_cpu_root_attrs[] = {
1519         &dev_attr_states.attr,
1520         NULL
1521 };
1522
1523 static struct attribute_group cpuhp_cpu_root_attr_group = {
1524         .attrs = cpuhp_cpu_root_attrs,
1525         .name = "hotplug",
1526         NULL
1527 };
1528
1529 static int __init cpuhp_sysfs_init(void)
1530 {
1531         int cpu, ret;
1532
1533         ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
1534                                  &cpuhp_cpu_root_attr_group);
1535         if (ret)
1536                 return ret;
1537
1538         for_each_possible_cpu(cpu) {
1539                 struct device *dev = get_cpu_device(cpu);
1540
1541                 if (!dev)
1542                         continue;
1543                 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
1544                 if (ret)
1545                         return ret;
1546         }
1547         return 0;
1548 }
1549 device_initcall(cpuhp_sysfs_init);
1550 #endif
1551
1552 /*
1553  * cpu_bit_bitmap[] is a special, "compressed" data structure that
1554  * represents all NR_CPUS bits binary values of 1<<nr.
1555  *
1556  * It is used by cpumask_of() to get a constant address to a CPU
1557  * mask value that has a single bit set only.
1558  */
1559
1560 /* cpu_bit_bitmap[0] is empty - so we can back into it */
1561 #define MASK_DECLARE_1(x)       [x+1][0] = (1UL << (x))
1562 #define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1563 #define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1564 #define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1565
1566 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1567
1568         MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
1569         MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
1570 #if BITS_PER_LONG > 32
1571         MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
1572         MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
1573 #endif
1574 };
1575 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
1576
1577 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1578 EXPORT_SYMBOL(cpu_all_bits);
1579
1580 #ifdef CONFIG_INIT_ALL_POSSIBLE
1581 struct cpumask __cpu_possible_mask __read_mostly
1582         = {CPU_BITS_ALL};
1583 #else
1584 struct cpumask __cpu_possible_mask __read_mostly;
1585 #endif
1586 EXPORT_SYMBOL(__cpu_possible_mask);
1587
1588 struct cpumask __cpu_online_mask __read_mostly;
1589 EXPORT_SYMBOL(__cpu_online_mask);
1590
1591 struct cpumask __cpu_present_mask __read_mostly;
1592 EXPORT_SYMBOL(__cpu_present_mask);
1593
1594 struct cpumask __cpu_active_mask __read_mostly;
1595 EXPORT_SYMBOL(__cpu_active_mask);
1596
1597 void init_cpu_present(const struct cpumask *src)
1598 {
1599         cpumask_copy(&__cpu_present_mask, src);
1600 }
1601
1602 void init_cpu_possible(const struct cpumask *src)
1603 {
1604         cpumask_copy(&__cpu_possible_mask, src);
1605 }
1606
1607 void init_cpu_online(const struct cpumask *src)
1608 {
1609         cpumask_copy(&__cpu_online_mask, src);
1610 }
1611
1612 /*
1613  * Activate the first processor.
1614  */
1615 void __init boot_cpu_init(void)
1616 {
1617         int cpu = smp_processor_id();
1618
1619         /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1620         set_cpu_online(cpu, true);
1621         set_cpu_active(cpu, true);
1622         set_cpu_present(cpu, true);
1623         set_cpu_possible(cpu, true);
1624 }
1625
1626 /*
1627  * Must be called _AFTER_ setting up the per_cpu areas
1628  */
1629 void __init boot_cpu_state_init(void)
1630 {
1631         per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
1632 }