Merge branch 'pm-cpuidle'
[linux-block.git] / arch / x86 / xen / setup.c
CommitLineData
5ead97c8
JF
1/*
2 * Machine specific setup for xen
3 *
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
5 */
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pm.h>
a9ce6bc1 11#include <linux/memblock.h>
d91ee586 12#include <linux/cpuidle.h>
48cdd828 13#include <linux/cpufreq.h>
5ead97c8
JF
14
15#include <asm/elf.h>
6c3652ef 16#include <asm/vdso.h>
5ead97c8
JF
17#include <asm/e820.h>
18#include <asm/setup.h>
b792c755 19#include <asm/acpi.h>
8d54db79 20#include <asm/numa.h>
5ead97c8
JF
21#include <asm/xen/hypervisor.h>
22#include <asm/xen/hypercall.h>
23
45263cb0 24#include <xen/xen.h>
8006ec3e 25#include <xen/page.h>
e2a81baf 26#include <xen/interface/callback.h>
35ae11fd 27#include <xen/interface/memory.h>
5ead97c8
JF
28#include <xen/interface/physdev.h>
29#include <xen/features.h>
5ead97c8 30#include "xen-ops.h"
d2eea68e 31#include "vdso.h"
5ead97c8
JF
32
33/* These are code, but not functions. Defined in entry.S */
34extern const char xen_hypervisor_callback[];
35extern const char xen_failsafe_callback[];
f63c2f24
T
36extern void xen_sysenter_target(void);
37extern void xen_syscall_target(void);
38extern void xen_syscall32_target(void);
5ead97c8 39
42ee1471 40/* Amount of extra memory space we add to the e820 ranges */
8b5d44a5 41struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
42ee1471 42
aa24411b
DV
43/* Number of pages released from the initial allocation. */
44unsigned long xen_released_pages;
45
698bb8d1
JF
46/*
47 * The maximum amount of extra memory compared to the base size. The
48 * main scaling factor is the size of struct page. At extreme ratios
49 * of base:extra, all the base memory can be filled with page
50 * structures for the extra memory, leaving no space for anything
51 * else.
52 *
53 * 10x seems like a reasonable balance between scaling flexibility and
54 * leaving a practically usable system.
55 */
56#define EXTRA_MEM_RATIO (10)
57
dc91c728 58static void __init xen_add_extra_mem(u64 start, u64 size)
42ee1471 59{
6eaa412f 60 unsigned long pfn;
dc91c728 61 int i;
6eaa412f 62
dc91c728
DV
63 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
64 /* Add new region. */
65 if (xen_extra_mem[i].size == 0) {
66 xen_extra_mem[i].start = start;
67 xen_extra_mem[i].size = size;
68 break;
69 }
70 /* Append to existing region. */
71 if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
72 xen_extra_mem[i].size += size;
73 break;
74 }
75 }
76 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
77 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
42ee1471 78
d4bbf7e7 79 memblock_reserve(start, size);
2f7acb20 80
dc91c728 81 xen_max_p2m_pfn = PFN_DOWN(start + size);
c96aae1f
KRW
82 for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
83 unsigned long mfn = pfn_to_mfn(pfn);
84
85 if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
86 continue;
87 WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
88 pfn, mfn);
6eaa412f 89
6eaa412f 90 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
c96aae1f 91 }
42ee1471
JF
92}
93
96dc08b3
KRW
94static unsigned long __init xen_do_chunk(unsigned long start,
95 unsigned long end, bool release)
093d7b46
MR
96{
97 struct xen_memory_reservation reservation = {
98 .address_bits = 0,
99 .extent_order = 0,
100 .domid = DOMID_SELF
101 };
f89e048e 102 unsigned long len = 0;
093d7b46
MR
103 unsigned long pfn;
104 int ret;
105
2e2fb754
KRW
106 for (pfn = start; pfn < end; pfn++) {
107 unsigned long frame;
96dc08b3 108 unsigned long mfn = pfn_to_mfn(pfn);
2e2fb754 109
96dc08b3
KRW
110 if (release) {
111 /* Make sure pfn exists to start with */
112 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
113 continue;
114 frame = mfn;
115 } else {
116 if (mfn != INVALID_P2M_ENTRY)
117 continue;
118 frame = pfn;
119 }
2e2fb754
KRW
120 set_xen_guest_handle(reservation.extent_start, &frame);
121 reservation.nr_extents = 1;
122
96dc08b3
KRW
123 ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
124 &reservation);
125 WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
126 release ? "release" : "populate", pfn, ret);
127
2e2fb754 128 if (ret == 1) {
96dc08b3
KRW
129 if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
130 if (release)
131 break;
2e2fb754
KRW
132 set_xen_guest_handle(reservation.extent_start, &frame);
133 reservation.nr_extents = 1;
134 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
96dc08b3 135 &reservation);
2e2fb754
KRW
136 break;
137 }
138 len++;
139 } else
140 break;
141 }
142 if (len)
96dc08b3
KRW
143 printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
144 release ? "Freeing" : "Populating",
145 start, end, len,
146 release ? "freed" : "added");
147
2e2fb754
KRW
148 return len;
149}
83d51ab4
DV
150
151static unsigned long __init xen_release_chunk(unsigned long start,
152 unsigned long end)
153{
154 return xen_do_chunk(start, end, true);
155}
156
2e2fb754
KRW
157static unsigned long __init xen_populate_chunk(
158 const struct e820entry *list, size_t map_size,
159 unsigned long max_pfn, unsigned long *last_pfn,
160 unsigned long credits_left)
161{
162 const struct e820entry *entry;
163 unsigned int i;
164 unsigned long done = 0;
165 unsigned long dest_pfn;
166
167 for (i = 0, entry = list; i < map_size; i++, entry++) {
2e2fb754
KRW
168 unsigned long s_pfn;
169 unsigned long e_pfn;
170 unsigned long pfns;
171 long capacity;
172
c3d93f88 173 if (credits_left <= 0)
2e2fb754
KRW
174 break;
175
176 if (entry->type != E820_RAM)
177 continue;
178
c3d93f88 179 e_pfn = PFN_DOWN(entry->addr + entry->size);
2e2fb754
KRW
180
181 /* We only care about E820 after the xen_start_info->nr_pages */
182 if (e_pfn <= max_pfn)
183 continue;
184
c3d93f88 185 s_pfn = PFN_UP(entry->addr);
2e2fb754
KRW
186 /* If the E820 falls within the nr_pages, we want to start
187 * at the nr_pages PFN.
188 * If that would mean going past the E820 entry, skip it
189 */
190 if (s_pfn <= max_pfn) {
191 capacity = e_pfn - max_pfn;
192 dest_pfn = max_pfn;
193 } else {
2e2fb754
KRW
194 capacity = e_pfn - s_pfn;
195 dest_pfn = s_pfn;
196 }
2e2fb754 197
c3d93f88 198 if (credits_left < capacity)
199 capacity = credits_left;
2e2fb754 200
c3d93f88 201 pfns = xen_do_chunk(dest_pfn, dest_pfn + capacity, false);
2e2fb754 202 done += pfns;
2e2fb754 203 *last_pfn = (dest_pfn + pfns);
c3d93f88 204 if (pfns < capacity)
205 break;
206 credits_left -= pfns;
2e2fb754
KRW
207 }
208 return done;
209}
83d51ab4
DV
210
211static void __init xen_set_identity_and_release_chunk(
212 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
213 unsigned long *released, unsigned long *identity)
214{
215 unsigned long pfn;
216
217 /*
218 * If the PFNs are currently mapped, the VA mapping also needs
219 * to be updated to be 1:1.
220 */
221 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
222 (void)HYPERVISOR_update_va_mapping(
223 (unsigned long)__va(pfn << PAGE_SHIFT),
224 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
225
226 if (start_pfn < nr_pages)
227 *released += xen_release_chunk(
228 start_pfn, min(end_pfn, nr_pages));
229
230 *identity += set_phys_range_identity(start_pfn, end_pfn);
231}
232
f3f436e3
DV
233static unsigned long __init xen_set_identity_and_release(
234 const struct e820entry *list, size_t map_size, unsigned long nr_pages)
093d7b46 235{
f3f436e3 236 phys_addr_t start = 0;
093d7b46 237 unsigned long released = 0;
68df0da7 238 unsigned long identity = 0;
f3f436e3 239 const struct e820entry *entry;
68df0da7
KRW
240 int i;
241
f3f436e3
DV
242 /*
243 * Combine non-RAM regions and gaps until a RAM region (or the
244 * end of the map) is reached, then set the 1:1 map and
245 * release the pages (if available) in those non-RAM regions.
246 *
247 * The combined non-RAM regions are rounded to a whole number
248 * of pages so any partial pages are accessible via the 1:1
249 * mapping. This is needed for some BIOSes that put (for
250 * example) the DMI tables in a reserved region that begins on
251 * a non-page boundary.
252 */
68df0da7 253 for (i = 0, entry = list; i < map_size; i++, entry++) {
f3f436e3 254 phys_addr_t end = entry->addr + entry->size;
f3f436e3
DV
255 if (entry->type == E820_RAM || i == map_size - 1) {
256 unsigned long start_pfn = PFN_DOWN(start);
257 unsigned long end_pfn = PFN_UP(end);
68df0da7 258
f3f436e3
DV
259 if (entry->type == E820_RAM)
260 end_pfn = PFN_UP(entry->addr);
68df0da7 261
83d51ab4
DV
262 if (start_pfn < end_pfn)
263 xen_set_identity_and_release_chunk(
264 start_pfn, end_pfn, nr_pages,
265 &released, &identity);
68df0da7 266
f3f436e3 267 start = end;
68df0da7 268 }
68df0da7 269 }
f3f436e3 270
ca118238
KRW
271 if (released)
272 printk(KERN_INFO "Released %lu pages of unused memory\n", released);
273 if (identity)
274 printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
f3f436e3
DV
275
276 return released;
68df0da7 277}
d312ae87
DV
278
279static unsigned long __init xen_get_max_pages(void)
280{
281 unsigned long max_pages = MAX_DOMAIN_PAGES;
282 domid_t domid = DOMID_SELF;
283 int ret;
284
d3db7281
IC
285 /*
286 * For the initial domain we use the maximum reservation as
287 * the maximum page.
288 *
289 * For guest domains the current maximum reservation reflects
290 * the current maximum rather than the static maximum. In this
291 * case the e820 map provided to us will cover the static
292 * maximum region.
293 */
294 if (xen_initial_domain()) {
295 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
296 if (ret > 0)
297 max_pages = ret;
298 }
299
d312ae87
DV
300 return min(max_pages, MAX_DOMAIN_PAGES);
301}
302
dc91c728
DV
303static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
304{
305 u64 end = start + size;
306
307 /* Align RAM regions to page boundaries. */
308 if (type == E820_RAM) {
309 start = PAGE_ALIGN(start);
310 end &= ~((u64)PAGE_SIZE - 1);
311 }
312
313 e820_add_region(start, end - start, type);
314}
315
3bc38cbc
DV
316void xen_ignore_unusable(struct e820entry *list, size_t map_size)
317{
318 struct e820entry *entry;
319 unsigned int i;
320
321 for (i = 0, entry = list; i < map_size; i++, entry++) {
322 if (entry->type == E820_UNUSABLE)
323 entry->type = E820_RAM;
324 }
325}
326
5ead97c8
JF
327/**
328 * machine_specific_memory_setup - Hook for machine specific memory setup.
329 **/
5ead97c8
JF
330char * __init xen_memory_setup(void)
331{
35ae11fd
IC
332 static struct e820entry map[E820MAX] __initdata;
333
5ead97c8 334 unsigned long max_pfn = xen_start_info->nr_pages;
35ae11fd
IC
335 unsigned long long mem_end;
336 int rc;
337 struct xen_memory_map memmap;
dc91c728 338 unsigned long max_pages;
2e2fb754 339 unsigned long last_pfn = 0;
42ee1471 340 unsigned long extra_pages = 0;
2e2fb754 341 unsigned long populated;
35ae11fd 342 int i;
9e9a5fcb 343 int op;
5ead97c8 344
8006ec3e 345 max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
35ae11fd
IC
346 mem_end = PFN_PHYS(max_pfn);
347
348 memmap.nr_entries = E820MAX;
349 set_xen_guest_handle(memmap.buffer, map);
350
9e9a5fcb
IC
351 op = xen_initial_domain() ?
352 XENMEM_machine_memory_map :
353 XENMEM_memory_map;
354 rc = HYPERVISOR_memory_op(op, &memmap);
35ae11fd 355 if (rc == -ENOSYS) {
9ec23a7f 356 BUG_ON(xen_initial_domain());
35ae11fd
IC
357 memmap.nr_entries = 1;
358 map[0].addr = 0ULL;
359 map[0].size = mem_end;
360 /* 8MB slack (to balance backend allocations). */
361 map[0].size += 8ULL << 20;
362 map[0].type = E820_RAM;
363 rc = 0;
364 }
365 BUG_ON(rc);
8006ec3e 366
3bc38cbc
DV
367 /*
368 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
369 * regions, so if we're using the machine memory map leave the
370 * region as RAM as it is in the pseudo-physical map.
371 *
372 * UNUSABLE regions in domUs are not handled and will need
373 * a patch in the future.
374 */
375 if (xen_initial_domain())
376 xen_ignore_unusable(map, memmap.nr_entries);
377
dc91c728
DV
378 /* Make sure the Xen-supplied memory map is well-ordered. */
379 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
380
381 max_pages = xen_get_max_pages();
382 if (max_pages > max_pfn)
383 extra_pages += max_pages - max_pfn;
384
f3f436e3
DV
385 /*
386 * Set P2M for all non-RAM pages and E820 gaps to be identity
387 * type PFNs. Any RAM pages that would be made inaccesible by
388 * this are first released.
389 */
390 xen_released_pages = xen_set_identity_and_release(
391 map, memmap.nr_entries, max_pfn);
dc91c728 392
2e2fb754
KRW
393 /*
394 * Populate back the non-RAM pages and E820 gaps that had been
395 * released. */
396 populated = xen_populate_chunk(map, memmap.nr_entries,
397 max_pfn, &last_pfn, xen_released_pages);
398
58b7b53a
KRW
399 xen_released_pages -= populated;
400 extra_pages += xen_released_pages;
2e2fb754
KRW
401
402 if (last_pfn > max_pfn) {
403 max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
404 mem_end = PFN_PHYS(max_pfn);
405 }
dc91c728
DV
406 /*
407 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
408 * factor the base size. On non-highmem systems, the base
409 * size is the full initial memory allocation; on highmem it
410 * is limited to the max size of lowmem, so that it doesn't
411 * get completely filled.
412 *
413 * In principle there could be a problem in lowmem systems if
414 * the initial memory is also very large with respect to
415 * lowmem, but we won't try to deal with that here.
416 */
417 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
418 extra_pages);
dc91c728
DV
419 i = 0;
420 while (i < memmap.nr_entries) {
421 u64 addr = map[i].addr;
422 u64 size = map[i].size;
423 u32 type = map[i].type;
424
425 if (type == E820_RAM) {
426 if (addr < mem_end) {
427 size = min(size, mem_end - addr);
428 } else if (extra_pages) {
429 size = min(size, (u64)extra_pages * PAGE_SIZE);
430 extra_pages -= size / PAGE_SIZE;
431 xen_add_extra_mem(addr, size);
432 } else
433 type = E820_UNUSABLE;
3654581e
JF
434 }
435
dc91c728 436 xen_align_and_add_e820_region(addr, size, type);
b5b43ced 437
dc91c728
DV
438 map[i].addr += size;
439 map[i].size -= size;
440 if (map[i].size == 0)
441 i++;
35ae11fd 442 }
b792c755
JF
443
444 /*
9ec23a7f
IC
445 * In domU, the ISA region is normal, usable memory, but we
446 * reserve ISA memory anyway because too many things poke
b792c755
JF
447 * about in there.
448 */
449 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
450 E820_RESERVED);
5ead97c8 451
be5bf9fa
JF
452 /*
453 * Reserve Xen bits:
454 * - mfn_list
455 * - xen_start_info
456 * See comment above "struct start_info" in <xen/interface/xen.h>
51faaf2b
KRW
457 * We tried to make the the memblock_reserve more selective so
458 * that it would be clear what region is reserved. Sadly we ran
459 * in the problem wherein on a 64-bit hypervisor with a 32-bit
460 * initial domain, the pt_base has the cr3 value which is not
461 * neccessarily where the pagetable starts! As Jan put it: "
462 * Actually, the adjustment turns out to be correct: The page
463 * tables for a 32-on-64 dom0 get allocated in the order "first L1",
464 * "first L2", "first L3", so the offset to the page table base is
465 * indeed 2. When reading xen/include/public/xen.h's comment
466 * very strictly, this is not a violation (since there nothing is said
467 * that the first thing in the page table space is pointed to by
468 * pt_base; I admit that this seems to be implied though, namely
469 * do I think that it is implied that the page table space is the
470 * range [pt_base, pt_base + nt_pt_frames), whereas that
471 * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
472 * which - without a priori knowledge - the kernel would have
473 * difficulty to figure out)." - so lets just fall back to the
474 * easy way and reserve the whole region.
be5bf9fa 475 */
24aa0788
TH
476 memblock_reserve(__pa(xen_start_info->mfn_list),
477 xen_start_info->pt_base - xen_start_info->mfn_list);
be5bf9fa
JF
478
479 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
480
5ead97c8
JF
481 return "Xen";
482}
483
d2eea68e
RM
484/*
485 * Set the bit indicating "nosegneg" library variants should be used.
6a52e4b1
JF
486 * We only need to bother in pure 32-bit mode; compat 32-bit processes
487 * can have un-truncated segments, so wrapping around is allowed.
d2eea68e 488 */
08b6d290 489static void __init fiddle_vdso(void)
d2eea68e 490{
6a52e4b1
JF
491#ifdef CONFIG_X86_32
492 u32 *mask;
493 mask = VDSO32_SYMBOL(&vdso32_int80_start, NOTE_MASK);
494 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
495 mask = VDSO32_SYMBOL(&vdso32_sysenter_start, NOTE_MASK);
d2eea68e 496 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
6fcac6d3 497#endif
d2eea68e
RM
498}
499
148f9bb8 500static int register_callback(unsigned type, const void *func)
e2a81baf 501{
88459d4c
JF
502 struct callback_register callback = {
503 .type = type,
504 .address = XEN_CALLBACK(__KERNEL_CS, func),
e2a81baf
JF
505 .flags = CALLBACKF_mask_events,
506 };
507
88459d4c
JF
508 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
509}
510
148f9bb8 511void xen_enable_sysenter(void)
88459d4c 512{
6fcac6d3 513 int ret;
62541c37 514 unsigned sysenter_feature;
6fcac6d3
JF
515
516#ifdef CONFIG_X86_32
62541c37 517 sysenter_feature = X86_FEATURE_SEP;
6fcac6d3 518#else
62541c37 519 sysenter_feature = X86_FEATURE_SYSENTER32;
6fcac6d3 520#endif
88459d4c 521
62541c37
JF
522 if (!boot_cpu_has(sysenter_feature))
523 return;
524
6fcac6d3 525 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
62541c37
JF
526 if(ret != 0)
527 setup_clear_cpu_cap(sysenter_feature);
e2a81baf
JF
528}
529
148f9bb8 530void xen_enable_syscall(void)
6fcac6d3
JF
531{
532#ifdef CONFIG_X86_64
6fcac6d3 533 int ret;
6fcac6d3
JF
534
535 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
536 if (ret != 0) {
d5303b81 537 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
62541c37
JF
538 /* Pretty fatal; 64-bit userspace has no other
539 mechanism for syscalls. */
540 }
541
542 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
6fcac6d3
JF
543 ret = register_callback(CALLBACKTYPE_syscall32,
544 xen_syscall32_target);
d5303b81 545 if (ret != 0)
62541c37 546 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
6fcac6d3
JF
547 }
548#endif /* CONFIG_X86_64 */
549}
550
5ead97c8
JF
551void __init xen_arch_setup(void)
552{
f09f6d19
DD
553 xen_panic_handler_init();
554
5ead97c8
JF
555 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
556 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
557
558 if (!xen_feature(XENFEAT_auto_translated_physmap))
f63c2f24
T
559 HYPERVISOR_vm_assist(VMASST_CMD_enable,
560 VMASST_TYPE_pae_extended_cr3);
5ead97c8 561
88459d4c
JF
562 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
563 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
564 BUG();
5ead97c8 565
e2a81baf 566 xen_enable_sysenter();
6fcac6d3 567 xen_enable_syscall();
e2a81baf 568
5ead97c8
JF
569#ifdef CONFIG_ACPI
570 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
571 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
572 disable_acpi();
573 }
574#endif
575
576 memcpy(boot_command_line, xen_start_info->cmd_line,
577 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
578 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
579
bc15fde7 580 /* Set up idle, making sure it calls safe_halt() pvop */
d91ee586 581 disable_cpuidle();
48cdd828 582 disable_cpufreq();
6a377ddc 583 WARN_ON(xen_set_default_idle());
d2eea68e 584 fiddle_vdso();
8d54db79
KRW
585#ifdef CONFIG_NUMA
586 numa_off = 1;
587#endif
5ead97c8 588}