Merge branch 'linus' into cpus4096
[linux-2.6-block.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
13#include <linux/module.h>
14#include <linux/kthread.h>
15#include <linux/stop_machine.h>
81615b62 16#include <linux/mutex.h>
1da177e4 17
68f4f1ec
MK
18/*
19 * Represents all cpu's present in the system
20 * In systems capable of hotplug, this map could dynamically grow
21 * as new cpu's are detected in the system via any platform specific
22 * method, such as ACPI for e.g.
23 */
24cpumask_t cpu_present_map __read_mostly;
25EXPORT_SYMBOL(cpu_present_map);
26
27#ifndef CONFIG_SMP
28
29/*
30 * Represents all cpu's that are currently online.
31 */
32cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
33EXPORT_SYMBOL(cpu_online_map);
34
35cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
36EXPORT_SYMBOL(cpu_possible_map);
37
38#else /* CONFIG_SMP */
39
d221938c 40/* Serializes the updates to cpu_online_map, cpu_present_map */
aa953877 41static DEFINE_MUTEX(cpu_add_remove_lock);
1da177e4 42
bd5349cf 43static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
1da177e4 44
e3920fb4
RW
45/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
46 * Should always be manipulated under cpu_add_remove_lock
47 */
48static int cpu_hotplug_disabled;
49
d221938c
GS
50static struct {
51 struct task_struct *active_writer;
52 struct mutex lock; /* Synchronizes accesses to refcount, */
53 /*
54 * Also blocks the new readers during
55 * an ongoing cpu hotplug operation.
56 */
57 int refcount;
d221938c 58} cpu_hotplug;
90d45d17 59
d221938c
GS
60void __init cpu_hotplug_init(void)
61{
62 cpu_hotplug.active_writer = NULL;
63 mutex_init(&cpu_hotplug.lock);
64 cpu_hotplug.refcount = 0;
d221938c
GS
65}
66
e761b772
MK
67cpumask_t cpu_active_map;
68
d221938c 69#ifdef CONFIG_HOTPLUG_CPU
90d45d17 70
86ef5c9a 71void get_online_cpus(void)
a9d9baa1 72{
d221938c
GS
73 might_sleep();
74 if (cpu_hotplug.active_writer == current)
aa953877 75 return;
d221938c
GS
76 mutex_lock(&cpu_hotplug.lock);
77 cpu_hotplug.refcount++;
78 mutex_unlock(&cpu_hotplug.lock);
79
a9d9baa1 80}
86ef5c9a 81EXPORT_SYMBOL_GPL(get_online_cpus);
90d45d17 82
86ef5c9a 83void put_online_cpus(void)
a9d9baa1 84{
d221938c 85 if (cpu_hotplug.active_writer == current)
aa953877 86 return;
d221938c 87 mutex_lock(&cpu_hotplug.lock);
d2ba7e2a
ON
88 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
89 wake_up_process(cpu_hotplug.active_writer);
d221938c
GS
90 mutex_unlock(&cpu_hotplug.lock);
91
a9d9baa1 92}
86ef5c9a 93EXPORT_SYMBOL_GPL(put_online_cpus);
a9d9baa1 94
a9d9baa1 95#endif /* CONFIG_HOTPLUG_CPU */
90d45d17 96
d221938c
GS
97/*
98 * The following two API's must be used when attempting
99 * to serialize the updates to cpu_online_map, cpu_present_map.
100 */
101void cpu_maps_update_begin(void)
102{
103 mutex_lock(&cpu_add_remove_lock);
104}
105
106void cpu_maps_update_done(void)
107{
108 mutex_unlock(&cpu_add_remove_lock);
109}
110
111/*
112 * This ensures that the hotplug operation can begin only when the
113 * refcount goes to zero.
114 *
115 * Note that during a cpu-hotplug operation, the new readers, if any,
116 * will be blocked by the cpu_hotplug.lock
117 *
d2ba7e2a
ON
118 * Since cpu_hotplug_begin() is always called after invoking
119 * cpu_maps_update_begin(), we can be sure that only one writer is active.
d221938c
GS
120 *
121 * Note that theoretically, there is a possibility of a livelock:
122 * - Refcount goes to zero, last reader wakes up the sleeping
123 * writer.
124 * - Last reader unlocks the cpu_hotplug.lock.
125 * - A new reader arrives at this moment, bumps up the refcount.
126 * - The writer acquires the cpu_hotplug.lock finds the refcount
127 * non zero and goes to sleep again.
128 *
129 * However, this is very difficult to achieve in practice since
86ef5c9a 130 * get_online_cpus() not an api which is called all that often.
d221938c
GS
131 *
132 */
133static void cpu_hotplug_begin(void)
134{
d221938c 135 cpu_hotplug.active_writer = current;
d2ba7e2a
ON
136
137 for (;;) {
138 mutex_lock(&cpu_hotplug.lock);
139 if (likely(!cpu_hotplug.refcount))
140 break;
141 __set_current_state(TASK_UNINTERRUPTIBLE);
d221938c
GS
142 mutex_unlock(&cpu_hotplug.lock);
143 schedule();
d221938c 144 }
d221938c
GS
145}
146
147static void cpu_hotplug_done(void)
148{
149 cpu_hotplug.active_writer = NULL;
150 mutex_unlock(&cpu_hotplug.lock);
151}
1da177e4 152/* Need to know about CPUs going up/down? */
f7b16c10 153int __ref register_cpu_notifier(struct notifier_block *nb)
1da177e4 154{
bd5349cf 155 int ret;
d221938c 156 cpu_maps_update_begin();
bd5349cf 157 ret = raw_notifier_chain_register(&cpu_chain, nb);
d221938c 158 cpu_maps_update_done();
bd5349cf 159 return ret;
1da177e4 160}
65edc68c
CS
161
162#ifdef CONFIG_HOTPLUG_CPU
163
1da177e4
LT
164EXPORT_SYMBOL(register_cpu_notifier);
165
9647155f 166void __ref unregister_cpu_notifier(struct notifier_block *nb)
1da177e4 167{
d221938c 168 cpu_maps_update_begin();
bd5349cf 169 raw_notifier_chain_unregister(&cpu_chain, nb);
d221938c 170 cpu_maps_update_done();
1da177e4
LT
171}
172EXPORT_SYMBOL(unregister_cpu_notifier);
173
1da177e4
LT
174static inline void check_for_tasks(int cpu)
175{
176 struct task_struct *p;
177
178 write_lock_irq(&tasklist_lock);
179 for_each_process(p) {
180 if (task_cpu(p) == cpu &&
181 (!cputime_eq(p->utime, cputime_zero) ||
182 !cputime_eq(p->stime, cputime_zero)))
183 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
e7407dcc 184 (state = %ld, flags = %x) \n",
ba25f9dc
PE
185 p->comm, task_pid_nr(p), cpu,
186 p->state, p->flags);
1da177e4
LT
187 }
188 write_unlock_irq(&tasklist_lock);
189}
190
db912f96
AK
191struct take_cpu_down_param {
192 unsigned long mod;
193 void *hcpu;
194};
195
1da177e4 196/* Take this CPU down. */
514a20a5 197static int __ref take_cpu_down(void *_param)
1da177e4 198{
db912f96 199 struct take_cpu_down_param *param = _param;
1da177e4
LT
200 int err;
201
db912f96
AK
202 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
203 param->hcpu);
1da177e4
LT
204 /* Ensure this CPU doesn't handle any more interrupts. */
205 err = __cpu_disable();
206 if (err < 0)
f3705136 207 return err;
1da177e4 208
f3705136
ZM
209 /* Force idle task to run as soon as we yield: it should
210 immediately notice cpu is offline and die quickly. */
211 sched_idle_next();
212 return 0;
1da177e4
LT
213}
214
e3920fb4 215/* Requires cpu_add_remove_lock to be held */
514a20a5 216static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
1da177e4 217{
e7407dcc 218 int err, nr_calls = 0;
1da177e4
LT
219 struct task_struct *p;
220 cpumask_t old_allowed, tmp;
e7407dcc 221 void *hcpu = (void *)(long)cpu;
8bb78442 222 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
db912f96
AK
223 struct take_cpu_down_param tcd_param = {
224 .mod = mod,
225 .hcpu = hcpu,
226 };
1da177e4 227
e3920fb4
RW
228 if (num_online_cpus() == 1)
229 return -EBUSY;
1da177e4 230
e3920fb4
RW
231 if (!cpu_online(cpu))
232 return -EINVAL;
1da177e4 233
d221938c 234 cpu_hotplug_begin();
8bb78442 235 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
e7407dcc 236 hcpu, -1, &nr_calls);
1da177e4 237 if (err == NOTIFY_BAD) {
a0d8cdb6 238 nr_calls--;
8bb78442
RW
239 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
240 hcpu, nr_calls, NULL);
1da177e4 241 printk("%s: attempt to take down CPU %u failed\n",
af1f16d0 242 __func__, cpu);
baaca49f
GS
243 err = -EINVAL;
244 goto out_release;
1da177e4
LT
245 }
246
247 /* Ensure that we are not runnable on dying cpu */
248 old_allowed = current->cpus_allowed;
f70316da 249 cpus_setall(tmp);
1da177e4 250 cpu_clear(cpu, tmp);
f70316da 251 set_cpus_allowed_ptr(current, &tmp);
1da177e4 252
db912f96 253 p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
aa953877 254
8fa1d7d3 255 if (IS_ERR(p) || cpu_online(cpu)) {
1da177e4 256 /* CPU didn't die: tell everyone. Can't complain. */
8bb78442 257 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
e7407dcc 258 hcpu) == NOTIFY_BAD)
1da177e4
LT
259 BUG();
260
8fa1d7d3
ST
261 if (IS_ERR(p)) {
262 err = PTR_ERR(p);
263 goto out_allowed;
264 }
1da177e4 265 goto out_thread;
8fa1d7d3 266 }
1da177e4
LT
267
268 /* Wait for it to sleep (leaving idle task). */
269 while (!idle_cpu(cpu))
270 yield();
271
272 /* This actually kills the CPU. */
273 __cpu_die(cpu);
274
1da177e4 275 /* CPU is completely dead: tell everyone. Too late to complain. */
8bb78442
RW
276 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod,
277 hcpu) == NOTIFY_BAD)
1da177e4
LT
278 BUG();
279
280 check_for_tasks(cpu);
281
282out_thread:
283 err = kthread_stop(p);
284out_allowed:
f70316da 285 set_cpus_allowed_ptr(current, &old_allowed);
baaca49f 286out_release:
d221938c 287 cpu_hotplug_done();
3da1c84c
ON
288 if (!err) {
289 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod,
290 hcpu) == NOTIFY_BAD)
291 BUG();
292 }
e3920fb4
RW
293 return err;
294}
295
514a20a5 296int __ref cpu_down(unsigned int cpu)
e3920fb4
RW
297{
298 int err = 0;
299
d221938c 300 cpu_maps_update_begin();
e761b772
MK
301
302 if (cpu_hotplug_disabled) {
e3920fb4 303 err = -EBUSY;
e761b772
MK
304 goto out;
305 }
306
307 cpu_clear(cpu, cpu_active_map);
308
39b0fad7
MK
309 /*
310 * Make sure the all cpus did the reschedule and are not
311 * using stale version of the cpu_active_map.
312 * This is not strictly necessary becuase stop_machine()
313 * that we run down the line already provides the required
314 * synchronization. But it's really a side effect and we do not
315 * want to depend on the innards of the stop_machine here.
316 */
317 synchronize_sched();
e3920fb4 318
e761b772 319 err = _cpu_down(cpu, 0);
e3920fb4 320
e761b772
MK
321 if (cpu_online(cpu))
322 cpu_set(cpu, cpu_active_map);
323
324out:
d221938c 325 cpu_maps_update_done();
1da177e4
LT
326 return err;
327}
b62b8ef9 328EXPORT_SYMBOL(cpu_down);
1da177e4
LT
329#endif /*CONFIG_HOTPLUG_CPU*/
330
e3920fb4 331/* Requires cpu_add_remove_lock to be held */
8bb78442 332static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
1da177e4 333{
baaca49f 334 int ret, nr_calls = 0;
1da177e4 335 void *hcpu = (void *)(long)cpu;
8bb78442 336 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
1da177e4 337
e3920fb4
RW
338 if (cpu_online(cpu) || !cpu_present(cpu))
339 return -EINVAL;
90d45d17 340
d221938c 341 cpu_hotplug_begin();
8bb78442 342 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
baaca49f 343 -1, &nr_calls);
1da177e4 344 if (ret == NOTIFY_BAD) {
a0d8cdb6 345 nr_calls--;
1da177e4 346 printk("%s: attempt to bring up CPU %u failed\n",
af1f16d0 347 __func__, cpu);
1da177e4
LT
348 ret = -EINVAL;
349 goto out_notify;
350 }
351
352 /* Arch-specific enabling code. */
353 ret = __cpu_up(cpu);
354 if (ret != 0)
355 goto out_notify;
6978c705 356 BUG_ON(!cpu_online(cpu));
1da177e4
LT
357
358 /* Now call notifier in preparation. */
8bb78442 359 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
1da177e4
LT
360
361out_notify:
362 if (ret != 0)
baaca49f 363 __raw_notifier_call_chain(&cpu_chain,
8bb78442 364 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
d221938c 365 cpu_hotplug_done();
e3920fb4
RW
366
367 return ret;
368}
369
b282b6f8 370int __cpuinit cpu_up(unsigned int cpu)
e3920fb4
RW
371{
372 int err = 0;
73e753a5
KH
373 if (!cpu_isset(cpu, cpu_possible_map)) {
374 printk(KERN_ERR "can't online cpu %d because it is not "
375 "configured as may-hotadd at boot time\n", cpu);
376#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) || defined(CONFIG_S390)
377 printk(KERN_ERR "please check additional_cpus= boot "
378 "parameter\n");
379#endif
380 return -EINVAL;
381 }
e3920fb4 382
d221938c 383 cpu_maps_update_begin();
e761b772
MK
384
385 if (cpu_hotplug_disabled) {
e3920fb4 386 err = -EBUSY;
e761b772
MK
387 goto out;
388 }
389
390 err = _cpu_up(cpu, 0);
391
392 if (cpu_online(cpu))
393 cpu_set(cpu, cpu_active_map);
e3920fb4 394
e761b772 395out:
d221938c 396 cpu_maps_update_done();
e3920fb4
RW
397 return err;
398}
399
f3de4be9 400#ifdef CONFIG_PM_SLEEP_SMP
e3920fb4
RW
401static cpumask_t frozen_cpus;
402
403int disable_nonboot_cpus(void)
404{
e1d9fd2e 405 int cpu, first_cpu, error = 0;
e3920fb4 406
d221938c 407 cpu_maps_update_begin();
1d64b9cb 408 first_cpu = first_cpu(cpu_online_map);
e3920fb4
RW
409 /* We take down all of the non-boot CPUs in one shot to avoid races
410 * with the userspace trying to use the CPU hotplug at the same time
411 */
412 cpus_clear(frozen_cpus);
413 printk("Disabling non-boot CPUs ...\n");
414 for_each_online_cpu(cpu) {
415 if (cpu == first_cpu)
416 continue;
8bb78442 417 error = _cpu_down(cpu, 1);
e3920fb4
RW
418 if (!error) {
419 cpu_set(cpu, frozen_cpus);
420 printk("CPU%d is down\n", cpu);
421 } else {
422 printk(KERN_ERR "Error taking CPU%d down: %d\n",
423 cpu, error);
424 break;
425 }
426 }
427 if (!error) {
428 BUG_ON(num_online_cpus() > 1);
429 /* Make sure the CPUs won't be enabled by someone else */
430 cpu_hotplug_disabled = 1;
431 } else {
e1d9fd2e 432 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
e3920fb4 433 }
d221938c 434 cpu_maps_update_done();
e3920fb4
RW
435 return error;
436}
437
fa7303e2 438void __ref enable_nonboot_cpus(void)
e3920fb4
RW
439{
440 int cpu, error;
441
442 /* Allow everyone to use the CPU hotplug again */
d221938c 443 cpu_maps_update_begin();
e3920fb4 444 cpu_hotplug_disabled = 0;
ed746e3b 445 if (cpus_empty(frozen_cpus))
1d64b9cb 446 goto out;
e3920fb4
RW
447
448 printk("Enabling non-boot CPUs ...\n");
363ab6f1 449 for_each_cpu_mask_nr(cpu, frozen_cpus) {
8bb78442 450 error = _cpu_up(cpu, 1);
e3920fb4
RW
451 if (!error) {
452 printk("CPU%d is up\n", cpu);
453 continue;
454 }
1d64b9cb 455 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
e3920fb4
RW
456 }
457 cpus_clear(frozen_cpus);
1d64b9cb 458out:
d221938c 459 cpu_maps_update_done();
1da177e4 460}
f3de4be9 461#endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec
MK
462
463#endif /* CONFIG_SMP */
b8d317d1 464
b8d317d1
MT
465/* 64 bits of zeros, for initializers. */
466#if BITS_PER_LONG == 32
467#define Z64 0, 0
468#else
469#define Z64 0
470#endif
471
472/* Initializer macros. */
473#define CMI0(n) { .bits = { 1UL << (n) } }
474#define CMI(n, ...) { .bits = { __VA_ARGS__, 1UL << ((n) % BITS_PER_LONG) } }
475
476#define CMI8(n, ...) \
477 CMI((n), __VA_ARGS__), CMI((n)+1, __VA_ARGS__), \
478 CMI((n)+2, __VA_ARGS__), CMI((n)+3, __VA_ARGS__), \
479 CMI((n)+4, __VA_ARGS__), CMI((n)+5, __VA_ARGS__), \
480 CMI((n)+6, __VA_ARGS__), CMI((n)+7, __VA_ARGS__)
481
482#if BITS_PER_LONG == 32
483#define CMI64(n, ...) \
484 CMI8((n), __VA_ARGS__), CMI8((n)+8, __VA_ARGS__), \
485 CMI8((n)+16, __VA_ARGS__), CMI8((n)+24, __VA_ARGS__), \
486 CMI8((n)+32, 0, __VA_ARGS__), CMI8((n)+40, 0, __VA_ARGS__), \
487 CMI8((n)+48, 0, __VA_ARGS__), CMI8((n)+56, 0, __VA_ARGS__)
488#else
489#define CMI64(n, ...) \
490 CMI8((n), __VA_ARGS__), CMI8((n)+8, __VA_ARGS__), \
491 CMI8((n)+16, __VA_ARGS__), CMI8((n)+24, __VA_ARGS__), \
492 CMI8((n)+32, __VA_ARGS__), CMI8((n)+40, __VA_ARGS__), \
493 CMI8((n)+48, __VA_ARGS__), CMI8((n)+56, __VA_ARGS__)
494#endif
495
496#define CMI256(n, ...) \
497 CMI64((n), __VA_ARGS__), CMI64((n)+64, Z64, __VA_ARGS__), \
498 CMI64((n)+128, Z64, Z64, __VA_ARGS__), \
499 CMI64((n)+192, Z64, Z64, Z64, __VA_ARGS__)
500#define Z256 Z64, Z64, Z64, Z64
501
502#define CMI1024(n, ...) \
503 CMI256((n), __VA_ARGS__), \
504 CMI256((n)+256, Z256, __VA_ARGS__), \
505 CMI256((n)+512, Z256, Z256, __VA_ARGS__), \
506 CMI256((n)+768, Z256, Z256, Z256, __VA_ARGS__)
507#define Z1024 Z256, Z256, Z256, Z256
508
509/* We want this statically initialized, just to be safe. We try not
510 * to waste too much space, either. */
6524d938
MT
511static const cpumask_t cpumask_map[]
512#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
513__initdata
514#endif
515= {
b8d317d1
MT
516 CMI0(0), CMI0(1), CMI0(2), CMI0(3),
517#if NR_CPUS > 4
518 CMI0(4), CMI0(5), CMI0(6), CMI0(7),
519#endif
520#if NR_CPUS > 8
521 CMI0(8), CMI0(9), CMI0(10), CMI0(11),
522 CMI0(12), CMI0(13), CMI0(14), CMI0(15),
523#endif
524#if NR_CPUS > 16
525 CMI0(16), CMI0(17), CMI0(18), CMI0(19),
526 CMI0(20), CMI0(21), CMI0(22), CMI0(23),
527 CMI0(24), CMI0(25), CMI0(26), CMI0(27),
528 CMI0(28), CMI0(29), CMI0(30), CMI0(31),
529#endif
530#if NR_CPUS > 32
531#if BITS_PER_LONG == 32
532 CMI(32, 0), CMI(33, 0), CMI(34, 0), CMI(35, 0),
533 CMI(36, 0), CMI(37, 0), CMI(38, 0), CMI(39, 0),
534 CMI(40, 0), CMI(41, 0), CMI(42, 0), CMI(43, 0),
535 CMI(44, 0), CMI(45, 0), CMI(46, 0), CMI(47, 0),
536 CMI(48, 0), CMI(49, 0), CMI(50, 0), CMI(51, 0),
537 CMI(52, 0), CMI(53, 0), CMI(54, 0), CMI(55, 0),
538 CMI(56, 0), CMI(57, 0), CMI(58, 0), CMI(59, 0),
539 CMI(60, 0), CMI(61, 0), CMI(62, 0), CMI(63, 0),
540#else
541 CMI0(32), CMI0(33), CMI0(34), CMI0(35),
542 CMI0(36), CMI0(37), CMI0(38), CMI0(39),
543 CMI0(40), CMI0(41), CMI0(42), CMI0(43),
544 CMI0(44), CMI0(45), CMI0(46), CMI0(47),
545 CMI0(48), CMI0(49), CMI0(50), CMI0(51),
546 CMI0(52), CMI0(53), CMI0(54), CMI0(55),
547 CMI0(56), CMI0(57), CMI0(58), CMI0(59),
548 CMI0(60), CMI0(61), CMI0(62), CMI0(63),
549#endif /* BITS_PER_LONG == 64 */
550#endif
551#if NR_CPUS > 64
552 CMI64(64, Z64),
553#endif
554#if NR_CPUS > 128
555 CMI64(128, Z64, Z64), CMI64(192, Z64, Z64, Z64),
556#endif
557#if NR_CPUS > 256
558 CMI256(256, Z256),
559#endif
560#if NR_CPUS > 512
561 CMI256(512, Z256, Z256), CMI256(768, Z256, Z256, Z256),
562#endif
563#if NR_CPUS > 1024
564 CMI1024(1024, Z1024),
565#endif
566#if NR_CPUS > 2048
567 CMI1024(2048, Z1024, Z1024), CMI1024(3072, Z1024, Z1024, Z1024),
568#endif
569#if NR_CPUS > 4096
570#error NR_CPUS too big. Fix initializers or set CONFIG_HAVE_CPUMASK_OF_CPU_MAP
571#endif
572};
573
574const cpumask_t *cpumask_of_cpu_map = cpumask_map;
5a7a201c
IM
575
576EXPORT_SYMBOL_GPL(cpumask_of_cpu_map);