parisc: fix module loading failure of large kernel modules
[linux-2.6-block.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
13#include <linux/module.h>
14#include <linux/kthread.h>
15#include <linux/stop_machine.h>
81615b62 16#include <linux/mutex.h>
1da177e4 17
98a79d6a 18#ifdef CONFIG_SMP
b3199c02 19/* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa953877 20static DEFINE_MUTEX(cpu_add_remove_lock);
1da177e4 21
bd5349cf 22static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
1da177e4 23
e3920fb4
RW
24/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
25 * Should always be manipulated under cpu_add_remove_lock
26 */
27static int cpu_hotplug_disabled;
28
d221938c
GS
29static struct {
30 struct task_struct *active_writer;
31 struct mutex lock; /* Synchronizes accesses to refcount, */
32 /*
33 * Also blocks the new readers during
34 * an ongoing cpu hotplug operation.
35 */
36 int refcount;
d221938c 37} cpu_hotplug;
90d45d17 38
d221938c
GS
39void __init cpu_hotplug_init(void)
40{
41 cpu_hotplug.active_writer = NULL;
42 mutex_init(&cpu_hotplug.lock);
43 cpu_hotplug.refcount = 0;
d221938c
GS
44}
45
46#ifdef CONFIG_HOTPLUG_CPU
90d45d17 47
86ef5c9a 48void get_online_cpus(void)
a9d9baa1 49{
d221938c
GS
50 might_sleep();
51 if (cpu_hotplug.active_writer == current)
aa953877 52 return;
d221938c
GS
53 mutex_lock(&cpu_hotplug.lock);
54 cpu_hotplug.refcount++;
55 mutex_unlock(&cpu_hotplug.lock);
56
a9d9baa1 57}
86ef5c9a 58EXPORT_SYMBOL_GPL(get_online_cpus);
90d45d17 59
86ef5c9a 60void put_online_cpus(void)
a9d9baa1 61{
d221938c 62 if (cpu_hotplug.active_writer == current)
aa953877 63 return;
d221938c 64 mutex_lock(&cpu_hotplug.lock);
d2ba7e2a
ON
65 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
66 wake_up_process(cpu_hotplug.active_writer);
d221938c
GS
67 mutex_unlock(&cpu_hotplug.lock);
68
a9d9baa1 69}
86ef5c9a 70EXPORT_SYMBOL_GPL(put_online_cpus);
a9d9baa1 71
a9d9baa1 72#endif /* CONFIG_HOTPLUG_CPU */
90d45d17 73
d221938c
GS
74/*
75 * The following two API's must be used when attempting
b3199c02 76 * to serialize the updates to cpu_online_mask, cpu_present_mask.
d221938c
GS
77 */
78void cpu_maps_update_begin(void)
79{
80 mutex_lock(&cpu_add_remove_lock);
81}
82
83void cpu_maps_update_done(void)
84{
85 mutex_unlock(&cpu_add_remove_lock);
86}
87
88/*
89 * This ensures that the hotplug operation can begin only when the
90 * refcount goes to zero.
91 *
92 * Note that during a cpu-hotplug operation, the new readers, if any,
93 * will be blocked by the cpu_hotplug.lock
94 *
d2ba7e2a
ON
95 * Since cpu_hotplug_begin() is always called after invoking
96 * cpu_maps_update_begin(), we can be sure that only one writer is active.
d221938c
GS
97 *
98 * Note that theoretically, there is a possibility of a livelock:
99 * - Refcount goes to zero, last reader wakes up the sleeping
100 * writer.
101 * - Last reader unlocks the cpu_hotplug.lock.
102 * - A new reader arrives at this moment, bumps up the refcount.
103 * - The writer acquires the cpu_hotplug.lock finds the refcount
104 * non zero and goes to sleep again.
105 *
106 * However, this is very difficult to achieve in practice since
86ef5c9a 107 * get_online_cpus() not an api which is called all that often.
d221938c
GS
108 *
109 */
110static void cpu_hotplug_begin(void)
111{
d221938c 112 cpu_hotplug.active_writer = current;
d2ba7e2a
ON
113
114 for (;;) {
115 mutex_lock(&cpu_hotplug.lock);
116 if (likely(!cpu_hotplug.refcount))
117 break;
118 __set_current_state(TASK_UNINTERRUPTIBLE);
d221938c
GS
119 mutex_unlock(&cpu_hotplug.lock);
120 schedule();
d221938c 121 }
d221938c
GS
122}
123
124static void cpu_hotplug_done(void)
125{
126 cpu_hotplug.active_writer = NULL;
127 mutex_unlock(&cpu_hotplug.lock);
128}
1da177e4 129/* Need to know about CPUs going up/down? */
f7b16c10 130int __ref register_cpu_notifier(struct notifier_block *nb)
1da177e4 131{
bd5349cf 132 int ret;
d221938c 133 cpu_maps_update_begin();
bd5349cf 134 ret = raw_notifier_chain_register(&cpu_chain, nb);
d221938c 135 cpu_maps_update_done();
bd5349cf 136 return ret;
1da177e4 137}
65edc68c
CS
138
139#ifdef CONFIG_HOTPLUG_CPU
140
1da177e4
LT
141EXPORT_SYMBOL(register_cpu_notifier);
142
9647155f 143void __ref unregister_cpu_notifier(struct notifier_block *nb)
1da177e4 144{
d221938c 145 cpu_maps_update_begin();
bd5349cf 146 raw_notifier_chain_unregister(&cpu_chain, nb);
d221938c 147 cpu_maps_update_done();
1da177e4
LT
148}
149EXPORT_SYMBOL(unregister_cpu_notifier);
150
1da177e4
LT
151static inline void check_for_tasks(int cpu)
152{
153 struct task_struct *p;
154
155 write_lock_irq(&tasklist_lock);
156 for_each_process(p) {
157 if (task_cpu(p) == cpu &&
158 (!cputime_eq(p->utime, cputime_zero) ||
159 !cputime_eq(p->stime, cputime_zero)))
160 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
e7407dcc 161 (state = %ld, flags = %x) \n",
ba25f9dc
PE
162 p->comm, task_pid_nr(p), cpu,
163 p->state, p->flags);
1da177e4
LT
164 }
165 write_unlock_irq(&tasklist_lock);
166}
167
db912f96
AK
168struct take_cpu_down_param {
169 unsigned long mod;
170 void *hcpu;
171};
172
1da177e4 173/* Take this CPU down. */
514a20a5 174static int __ref take_cpu_down(void *_param)
1da177e4 175{
db912f96 176 struct take_cpu_down_param *param = _param;
1da177e4
LT
177 int err;
178
1da177e4
LT
179 /* Ensure this CPU doesn't handle any more interrupts. */
180 err = __cpu_disable();
181 if (err < 0)
f3705136 182 return err;
1da177e4 183
3ba35573
MS
184 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
185 param->hcpu);
186
f3705136
ZM
187 /* Force idle task to run as soon as we yield: it should
188 immediately notice cpu is offline and die quickly. */
189 sched_idle_next();
190 return 0;
1da177e4
LT
191}
192
e3920fb4 193/* Requires cpu_add_remove_lock to be held */
514a20a5 194static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
1da177e4 195{
e7407dcc 196 int err, nr_calls = 0;
e0b582ec 197 cpumask_var_t old_allowed;
e7407dcc 198 void *hcpu = (void *)(long)cpu;
8bb78442 199 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
db912f96
AK
200 struct take_cpu_down_param tcd_param = {
201 .mod = mod,
202 .hcpu = hcpu,
203 };
1da177e4 204
e3920fb4
RW
205 if (num_online_cpus() == 1)
206 return -EBUSY;
1da177e4 207
e3920fb4
RW
208 if (!cpu_online(cpu))
209 return -EINVAL;
1da177e4 210
e0b582ec
RR
211 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
212 return -ENOMEM;
213
d221938c 214 cpu_hotplug_begin();
8bb78442 215 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
e7407dcc 216 hcpu, -1, &nr_calls);
1da177e4 217 if (err == NOTIFY_BAD) {
a0d8cdb6 218 nr_calls--;
8bb78442
RW
219 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
220 hcpu, nr_calls, NULL);
1da177e4 221 printk("%s: attempt to take down CPU %u failed\n",
af1f16d0 222 __func__, cpu);
baaca49f
GS
223 err = -EINVAL;
224 goto out_release;
1da177e4
LT
225 }
226
227 /* Ensure that we are not runnable on dying cpu */
e0b582ec
RR
228 cpumask_copy(old_allowed, &current->cpus_allowed);
229 set_cpus_allowed_ptr(current,
230 cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
1da177e4 231
e0b582ec 232 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
04321587 233 if (err) {
1da177e4 234 /* CPU didn't die: tell everyone. Can't complain. */
8bb78442 235 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
e7407dcc 236 hcpu) == NOTIFY_BAD)
1da177e4
LT
237 BUG();
238
ffdb5976 239 goto out_allowed;
8fa1d7d3 240 }
04321587 241 BUG_ON(cpu_online(cpu));
1da177e4
LT
242
243 /* Wait for it to sleep (leaving idle task). */
244 while (!idle_cpu(cpu))
245 yield();
246
247 /* This actually kills the CPU. */
248 __cpu_die(cpu);
249
1da177e4 250 /* CPU is completely dead: tell everyone. Too late to complain. */
8bb78442
RW
251 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod,
252 hcpu) == NOTIFY_BAD)
1da177e4
LT
253 BUG();
254
255 check_for_tasks(cpu);
256
1da177e4 257out_allowed:
e0b582ec 258 set_cpus_allowed_ptr(current, old_allowed);
baaca49f 259out_release:
d221938c 260 cpu_hotplug_done();
3da1c84c
ON
261 if (!err) {
262 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod,
263 hcpu) == NOTIFY_BAD)
264 BUG();
265 }
e0b582ec 266 free_cpumask_var(old_allowed);
e3920fb4
RW
267 return err;
268}
269
514a20a5 270int __ref cpu_down(unsigned int cpu)
e3920fb4
RW
271{
272 int err = 0;
273
d221938c 274 cpu_maps_update_begin();
e761b772
MK
275
276 if (cpu_hotplug_disabled) {
e3920fb4 277 err = -EBUSY;
e761b772
MK
278 goto out;
279 }
280
281 cpu_clear(cpu, cpu_active_map);
282
39b0fad7
MK
283 /*
284 * Make sure the all cpus did the reschedule and are not
e0b582ec 285 * using stale version of the cpu_active_mask.
39b0fad7
MK
286 * This is not strictly necessary becuase stop_machine()
287 * that we run down the line already provides the required
288 * synchronization. But it's really a side effect and we do not
289 * want to depend on the innards of the stop_machine here.
290 */
291 synchronize_sched();
e3920fb4 292
e761b772 293 err = _cpu_down(cpu, 0);
e3920fb4 294
e761b772
MK
295 if (cpu_online(cpu))
296 cpu_set(cpu, cpu_active_map);
297
298out:
d221938c 299 cpu_maps_update_done();
1da177e4
LT
300 return err;
301}
b62b8ef9 302EXPORT_SYMBOL(cpu_down);
1da177e4
LT
303#endif /*CONFIG_HOTPLUG_CPU*/
304
e3920fb4 305/* Requires cpu_add_remove_lock to be held */
8bb78442 306static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
1da177e4 307{
baaca49f 308 int ret, nr_calls = 0;
1da177e4 309 void *hcpu = (void *)(long)cpu;
8bb78442 310 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
1da177e4 311
e3920fb4
RW
312 if (cpu_online(cpu) || !cpu_present(cpu))
313 return -EINVAL;
90d45d17 314
d221938c 315 cpu_hotplug_begin();
8bb78442 316 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
baaca49f 317 -1, &nr_calls);
1da177e4 318 if (ret == NOTIFY_BAD) {
a0d8cdb6 319 nr_calls--;
1da177e4 320 printk("%s: attempt to bring up CPU %u failed\n",
af1f16d0 321 __func__, cpu);
1da177e4
LT
322 ret = -EINVAL;
323 goto out_notify;
324 }
325
326 /* Arch-specific enabling code. */
327 ret = __cpu_up(cpu);
328 if (ret != 0)
329 goto out_notify;
6978c705 330 BUG_ON(!cpu_online(cpu));
1da177e4 331
279ef6bb
DA
332 cpu_set(cpu, cpu_active_map);
333
1da177e4 334 /* Now call notifier in preparation. */
8bb78442 335 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
1da177e4
LT
336
337out_notify:
338 if (ret != 0)
baaca49f 339 __raw_notifier_call_chain(&cpu_chain,
8bb78442 340 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
d221938c 341 cpu_hotplug_done();
e3920fb4
RW
342
343 return ret;
344}
345
b282b6f8 346int __cpuinit cpu_up(unsigned int cpu)
e3920fb4
RW
347{
348 int err = 0;
e0b582ec 349 if (!cpu_possible(cpu)) {
73e753a5
KH
350 printk(KERN_ERR "can't online cpu %d because it is not "
351 "configured as may-hotadd at boot time\n", cpu);
3ee1062b 352#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
73e753a5
KH
353 printk(KERN_ERR "please check additional_cpus= boot "
354 "parameter\n");
355#endif
356 return -EINVAL;
357 }
e3920fb4 358
d221938c 359 cpu_maps_update_begin();
e761b772
MK
360
361 if (cpu_hotplug_disabled) {
e3920fb4 362 err = -EBUSY;
e761b772
MK
363 goto out;
364 }
365
366 err = _cpu_up(cpu, 0);
367
e761b772 368out:
d221938c 369 cpu_maps_update_done();
e3920fb4
RW
370 return err;
371}
372
f3de4be9 373#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 374static cpumask_var_t frozen_cpus;
e3920fb4
RW
375
376int disable_nonboot_cpus(void)
377{
e1d9fd2e 378 int cpu, first_cpu, error = 0;
e3920fb4 379
d221938c 380 cpu_maps_update_begin();
e0b582ec 381 first_cpu = cpumask_first(cpu_online_mask);
e3920fb4
RW
382 /* We take down all of the non-boot CPUs in one shot to avoid races
383 * with the userspace trying to use the CPU hotplug at the same time
384 */
e0b582ec 385 cpumask_clear(frozen_cpus);
e3920fb4
RW
386 printk("Disabling non-boot CPUs ...\n");
387 for_each_online_cpu(cpu) {
388 if (cpu == first_cpu)
389 continue;
8bb78442 390 error = _cpu_down(cpu, 1);
e3920fb4 391 if (!error) {
e0b582ec 392 cpumask_set_cpu(cpu, frozen_cpus);
e3920fb4
RW
393 printk("CPU%d is down\n", cpu);
394 } else {
395 printk(KERN_ERR "Error taking CPU%d down: %d\n",
396 cpu, error);
397 break;
398 }
399 }
400 if (!error) {
401 BUG_ON(num_online_cpus() > 1);
402 /* Make sure the CPUs won't be enabled by someone else */
403 cpu_hotplug_disabled = 1;
404 } else {
e1d9fd2e 405 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
e3920fb4 406 }
d221938c 407 cpu_maps_update_done();
e3920fb4
RW
408 return error;
409}
410
fa7303e2 411void __ref enable_nonboot_cpus(void)
e3920fb4
RW
412{
413 int cpu, error;
414
415 /* Allow everyone to use the CPU hotplug again */
d221938c 416 cpu_maps_update_begin();
e3920fb4 417 cpu_hotplug_disabled = 0;
e0b582ec 418 if (cpumask_empty(frozen_cpus))
1d64b9cb 419 goto out;
e3920fb4
RW
420
421 printk("Enabling non-boot CPUs ...\n");
e0b582ec 422 for_each_cpu(cpu, frozen_cpus) {
8bb78442 423 error = _cpu_up(cpu, 1);
e3920fb4
RW
424 if (!error) {
425 printk("CPU%d is up\n", cpu);
426 continue;
427 }
1d64b9cb 428 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
e3920fb4 429 }
e0b582ec 430 cpumask_clear(frozen_cpus);
1d64b9cb 431out:
d221938c 432 cpu_maps_update_done();
1da177e4 433}
e0b582ec
RR
434
435static int alloc_frozen_cpus(void)
436{
437 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
438 return -ENOMEM;
439 return 0;
440}
441core_initcall(alloc_frozen_cpus);
f3de4be9 442#endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec 443
e545a614
MS
444/**
445 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
446 * @cpu: cpu that just started
447 *
448 * This function calls the cpu_chain notifiers with CPU_STARTING.
449 * It must be called by the arch code on the new cpu, before the new cpu
450 * enables interrupts and before the "boot" cpu returns from __cpu_up().
451 */
84196414 452void __cpuinit notify_cpu_starting(unsigned int cpu)
e545a614
MS
453{
454 unsigned long val = CPU_STARTING;
455
456#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 457 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
e545a614
MS
458 val = CPU_STARTING_FROZEN;
459#endif /* CONFIG_PM_SLEEP_SMP */
460 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu);
461}
462
68f4f1ec 463#endif /* CONFIG_SMP */
b8d317d1 464
e56b3bc7
LT
465/*
466 * cpu_bit_bitmap[] is a special, "compressed" data structure that
467 * represents all NR_CPUS bits binary values of 1<<nr.
468 *
e0b582ec 469 * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc7
LT
470 * mask value that has a single bit set only.
471 */
b8d317d1 472
e56b3bc7
LT
473/* cpu_bit_bitmap[0] is empty - so we can back into it */
474#define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
475#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
476#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
477#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d1 478
e56b3bc7
LT
479const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
480
481 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
482 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
483#if BITS_PER_LONG > 32
484 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
485 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
b8d317d1
MT
486#endif
487};
e56b3bc7 488EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a3
RR
489
490const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
491EXPORT_SYMBOL(cpu_all_bits);
b3199c02
RR
492
493#ifdef CONFIG_INIT_ALL_POSSIBLE
494static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
495 = CPU_BITS_ALL;
496#else
497static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
498#endif
499const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
500EXPORT_SYMBOL(cpu_possible_mask);
501
502static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
503const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
504EXPORT_SYMBOL(cpu_online_mask);
505
506static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
507const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
508EXPORT_SYMBOL(cpu_present_mask);
509
510static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
511const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
512EXPORT_SYMBOL(cpu_active_mask);
3fa41520
RR
513
514void set_cpu_possible(unsigned int cpu, bool possible)
515{
516 if (possible)
517 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
518 else
519 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
520}
521
522void set_cpu_present(unsigned int cpu, bool present)
523{
524 if (present)
525 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
526 else
527 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
528}
529
530void set_cpu_online(unsigned int cpu, bool online)
531{
532 if (online)
533 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
534 else
535 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
536}
537
538void set_cpu_active(unsigned int cpu, bool active)
539{
540 if (active)
541 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
542 else
543 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
544}
545
546void init_cpu_present(const struct cpumask *src)
547{
548 cpumask_copy(to_cpumask(cpu_present_bits), src);
549}
550
551void init_cpu_possible(const struct cpumask *src)
552{
553 cpumask_copy(to_cpumask(cpu_possible_bits), src);
554}
555
556void init_cpu_online(const struct cpumask *src)
557{
558 cpumask_copy(to_cpumask(cpu_online_bits), src);
559}