x86: dt: Cleanup local apic setup
[linux-block.git] / arch / x86 / kernel / setup.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5 *
6 * Memory region support
7 * David Parsons <orc@pell.chi.il.us>, July-August 1999
8 *
9 * Added E820 sanitization routine (removes overlapping memory regions);
10 * Brian Moyle <bmoyle@mvista.com>, February 2001
11 *
12 * Moved CPU detection code to cpu/${cpu}.c
13 * Patrick Mochel <mochel@osdl.org>, March 2002
14 *
15 * Provisions for empty E820 memory regions (reported by certain BIOSes).
16 * Alex Achenbach <xela@slit.de>, December 2002.
17 *
18 */
19
20/*
21 * This file handles the architecture-dependent parts of initialization
22 */
23
24#include <linux/sched.h>
25#include <linux/mm.h>
05b79bdc 26#include <linux/mmzone.h>
894673ee 27#include <linux/screen_info.h>
1da177e4
LT
28#include <linux/ioport.h>
29#include <linux/acpi.h>
efafc8b2 30#include <linux/sfi.h>
1da177e4
LT
31#include <linux/apm_bios.h>
32#include <linux/initrd.h>
33#include <linux/bootmem.h>
72d7c3b3 34#include <linux/memblock.h>
1da177e4
LT
35#include <linux/seq_file.h>
36#include <linux/console.h>
37#include <linux/mca.h>
38#include <linux/root_dev.h>
39#include <linux/highmem.h>
40#include <linux/module.h>
41#include <linux/efi.h>
42#include <linux/init.h>
43#include <linux/edd.h>
138fe4e0 44#include <linux/iscsi_ibft.h>
1da177e4 45#include <linux/nodemask.h>
1bc3b91a 46#include <linux/kexec.h>
e9928674 47#include <linux/dmi.h>
22a9835c 48#include <linux/pfn.h>
376ff035 49#include <linux/pci.h>
46d671b5 50#include <asm/pci-direct.h>
f212ec4b 51#include <linux/init_ohci1394_dma.h>
790c73f6 52#include <linux/kvm_para.h>
1bc3b91a 53
46d671b5
YL
54#include <linux/errno.h>
55#include <linux/kernel.h>
56#include <linux/stddef.h>
57#include <linux/unistd.h>
58#include <linux/ptrace.h>
46d671b5
YL
59#include <linux/user.h>
60#include <linux/delay.h>
46d671b5
YL
61
62#include <linux/kallsyms.h>
46d671b5
YL
63#include <linux/cpufreq.h>
64#include <linux/dma-mapping.h>
65#include <linux/ctype.h>
66#include <linux/uaccess.h>
67
68#include <linux/percpu.h>
69#include <linux/crash_dump.h>
69575d38 70#include <linux/tboot.h>
46d671b5 71
1da177e4 72#include <video/edid.h>
1bc3b91a 73
093af8d7 74#include <asm/mtrr.h>
9635b47d 75#include <asm/apic.h>
893f38d1 76#include <asm/trampoline.h>
1da177e4
LT
77#include <asm/e820.h>
78#include <asm/mpspec.h>
79#include <asm/setup.h>
55f26239 80#include <asm/efi.h>
8e6dafd6
IM
81#include <asm/timer.h>
82#include <asm/i8259.h>
1da177e4 83#include <asm/sections.h>
1c6e5503 84#include <asm/dmi.h>
1da177e4
LT
85#include <asm/io_apic.h>
86#include <asm/ist.h>
1164dd00 87#include <asm/setup_arch.h>
ce3fe6b2 88#include <asm/bios_ebda.h>
00bf4098 89#include <asm/cacheflush.h>
2fde61fd 90#include <asm/processor.h>
cc9f7a0c 91#include <asm/bugs.h>
1da177e4 92
46d671b5
YL
93#include <asm/system.h>
94#include <asm/vsyscall.h>
6e5385d4 95#include <asm/cpu.h>
46d671b5
YL
96#include <asm/desc.h>
97#include <asm/dma.h>
46a7fa27 98#include <asm/iommu.h>
1d9b16d1 99#include <asm/gart.h>
46d671b5
YL
100#include <asm/mmu_context.h>
101#include <asm/proto.h>
102
46d671b5 103#include <asm/paravirt.h>
88b094fb 104#include <asm/hypervisor.h>
fd699c76 105#include <asm/olpc_ofw.h>
46d671b5
YL
106
107#include <asm/percpu.h>
46d671b5
YL
108#include <asm/topology.h>
109#include <asm/apicdef.h>
23ac4ae8 110#include <asm/amd_nb.h>
55f26239
YL
111#ifdef CONFIG_X86_64
112#include <asm/numa_64.h>
113#endif
a2202aa2 114#include <asm/mce.h>
f49aa448 115#include <asm/alternative.h>
da6b737b 116#include <asm/prom.h>
46d671b5 117
2b72394e
PE
118/*
119 * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
120 * The direct mapping extends to max_pfn_mapped, so that we can directly access
121 * apertures, ACPI and other tables without having to play with fixmaps.
122 */
123unsigned long max_low_pfn_mapped;
124unsigned long max_pfn_mapped;
125
e808bae2 126#ifdef CONFIG_DMI
796216a5 127RESERVE_BRK(dmi_alloc, 65536);
e808bae2 128#endif
796216a5 129
c0b5842a 130
93dbda7c
JF
131static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
132unsigned long _brk_end = (unsigned long)__brk_base;
133
c0b5842a
IM
134#ifdef CONFIG_X86_64
135int default_cpu_present_to_apicid(int mps_cpu)
136{
137 return __default_cpu_present_to_apicid(mps_cpu);
138}
139
e11dadab 140int default_check_phys_apicid_present(int phys_apicid)
c0b5842a 141{
e11dadab 142 return __default_check_phys_apicid_present(phys_apicid);
c0b5842a
IM
143}
144#endif
145
217b8ce8
YL
146#ifndef CONFIG_DEBUG_BOOT_PARAMS
147struct boot_params __initdata boot_params;
148#else
149struct boot_params boot_params;
150#endif
151
1da177e4
LT
152/*
153 * Machine setup..
154 */
c9cce83d
BW
155static struct resource data_resource = {
156 .name = "Kernel data",
157 .start = 0,
158 .end = 0,
159 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
160};
161
162static struct resource code_resource = {
163 .name = "Kernel code",
164 .start = 0,
165 .end = 0,
166 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
167};
168
169static struct resource bss_resource = {
170 .name = "Kernel bss",
171 .start = 0,
172 .end = 0,
173 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
174};
175
7dea23ec
YL
176
177#ifdef CONFIG_X86_32
1da177e4 178/* cpu data as detected by the assembly code in head.S */
7dea23ec 179struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1};
1da177e4 180/* common cpu data for all cpus */
7dea23ec 181struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1};
129f6946 182EXPORT_SYMBOL(boot_cpu_data);
7dea23ec
YL
183static void set_mca_bus(int x)
184{
185#ifdef CONFIG_MCA
186 MCA_bus = x;
187#endif
188}
1da177e4 189
0c254e38
AS
190unsigned int def_to_bigsmp;
191
1da177e4
LT
192/* for MCA, but anyone else can use it if they want */
193unsigned int machine_id;
194unsigned int machine_submodel_id;
195unsigned int BIOS_revision;
1da177e4 196
7dea23ec
YL
197struct apm_info apm_info;
198EXPORT_SYMBOL(apm_info);
199
200#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
201 defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
202struct ist_info ist_info;
203EXPORT_SYMBOL(ist_info);
204#else
205struct ist_info ist_info;
206#endif
207
208#else
ed26dbe5
JF
209struct cpuinfo_x86 boot_cpu_data __read_mostly = {
210 .x86_phys_bits = MAX_PHYSMEM_BITS,
211};
7dea23ec
YL
212EXPORT_SYMBOL(boot_cpu_data);
213#endif
214
215
216#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
217unsigned long mmu_cr4_features;
218#else
219unsigned long mmu_cr4_features = X86_CR4_PAE;
220#endif
221
5031296c
PA
222/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
223int bootloader_type, bootloader_version;
1da177e4 224
1da177e4
LT
225/*
226 * Setup options
227 */
1da177e4 228struct screen_info screen_info;
129f6946 229EXPORT_SYMBOL(screen_info);
1da177e4 230struct edid_info edid_info;
5e518d76 231EXPORT_SYMBOL_GPL(edid_info);
1da177e4 232
1da177e4
LT
233extern int root_mountflags;
234
e44b7b75 235unsigned long saved_video_mode;
1da177e4 236
cf8fa920 237#define RAMDISK_IMAGE_START_MASK 0x07FF
1da177e4 238#define RAMDISK_PROMPT_FLAG 0x8000
cf8fa920 239#define RAMDISK_LOAD_FLAG 0x4000
1da177e4 240
4e498b66 241static char __initdata command_line[COMMAND_LINE_SIZE];
516cbf37
TB
242#ifdef CONFIG_CMDLINE_BOOL
243static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
244#endif
1da177e4 245
1da177e4
LT
246#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
247struct edd edd;
248#ifdef CONFIG_EDD_MODULE
249EXPORT_SYMBOL(edd);
250#endif
251/**
252 * copy_edd() - Copy the BIOS EDD information
253 * from boot_params into a safe place.
254 *
255 */
9eaa192d 256static inline void __init copy_edd(void)
1da177e4 257{
30c82645
PA
258 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
259 sizeof(edd.mbr_signature));
260 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
261 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
262 edd.edd_info_nr = boot_params.eddbuf_entries;
1da177e4
LT
263}
264#else
9eaa192d 265static inline void __init copy_edd(void)
1da177e4
LT
266{
267}
268#endif
269
5368a2be
PA
270void * __init extend_brk(size_t size, size_t align)
271{
272 size_t mask = align - 1;
273 void *ret;
274
275 BUG_ON(_brk_start == 0);
276 BUG_ON(align & mask);
277
278 _brk_end = (_brk_end + mask) & ~mask;
279 BUG_ON((char *)(_brk_end + size) > __brk_limit);
280
281 ret = (void *)_brk_end;
282 _brk_end += size;
283
284 memset(ret, 0, size);
285
286 return ret;
287}
288
854c879f
PE
289#ifdef CONFIG_X86_64
290static void __init init_gbpages(void)
291{
292 if (direct_gbpages && cpu_has_gbpages)
293 printk(KERN_INFO "Using GB pages for direct mapping\n");
294 else
295 direct_gbpages = 0;
296}
297#else
298static inline void init_gbpages(void)
299{
300}
301#endif
302
5368a2be
PA
303static void __init reserve_brk(void)
304{
305 if (_brk_end > _brk_start)
a9ce6bc1 306 memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK");
5368a2be
PA
307
308 /* Mark brk area as locked down and no longer taking any
309 new allocations */
310 _brk_start = 0;
311}
312
cf8fa920
PA
313#ifdef CONFIG_BLK_DEV_INITRD
314
eb1379cb
YL
315#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
316static void __init relocate_initrd(void)
cf8fa920 317{
c967da6a 318 /* Assume only end is not page aligned */
ba5b14cc
YL
319 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
320 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
c967da6a 321 u64 area_size = PAGE_ALIGN(ramdisk_size);
8c5dd8f4 322 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
ba5b14cc 323 u64 ramdisk_here;
eb1379cb
YL
324 unsigned long slop, clen, mapaddr;
325 char *p, *q;
cf8fa920
PA
326
327 /* We need to move the initrd down into lowmem */
a9ce6bc1 328 ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size,
4e29684c 329 PAGE_SIZE);
cf8fa920 330
a9ce6bc1 331 if (ramdisk_here == MEMBLOCK_ERROR)
3945e2c9
YL
332 panic("Cannot find place for new RAMDISK of size %lld\n",
333 ramdisk_size);
334
cf8fa920
PA
335 /* Note: this includes all the lowmem currently occupied by
336 the initrd, we rely on that fact to keep the data intact. */
a9ce6bc1 337 memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK");
cf8fa920
PA
338 initrd_start = ramdisk_here + PAGE_OFFSET;
339 initrd_end = initrd_start + ramdisk_size;
f0d43100
YL
340 printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
341 ramdisk_here, ramdisk_here + ramdisk_size);
cf8fa920 342
cf8fa920
PA
343 q = (char *)initrd_start;
344
345 /* Copy any lowmem portion of the initrd */
346 if (ramdisk_image < end_of_lowmem) {
347 clen = end_of_lowmem - ramdisk_image;
348 p = (char *)__va(ramdisk_image);
349 memcpy(q, p, clen);
350 q += clen;
351 ramdisk_image += clen;
352 ramdisk_size -= clen;
353 }
354
355 /* Copy the highmem portion of the initrd */
356 while (ramdisk_size) {
357 slop = ramdisk_image & ~PAGE_MASK;
358 clen = ramdisk_size;
359 if (clen > MAX_MAP_CHUNK-slop)
360 clen = MAX_MAP_CHUNK-slop;
361 mapaddr = ramdisk_image & PAGE_MASK;
88b4c146 362 p = early_memremap(mapaddr, clen+slop);
cf8fa920 363 memcpy(q, p+slop, clen);
beacfaac 364 early_iounmap(p, clen+slop);
cf8fa920
PA
365 q += clen;
366 ramdisk_image += clen;
367 ramdisk_size -= clen;
368 }
a4c81cf6 369 /* high pages is not converted by early_res_to_bootmem */
ba5b14cc
YL
370 ramdisk_image = boot_params.hdr.ramdisk_image;
371 ramdisk_size = boot_params.hdr.ramdisk_size;
eb1379cb
YL
372 printk(KERN_INFO "Move RAMDISK from %016llx - %016llx to"
373 " %08llx - %08llx\n",
ba5b14cc
YL
374 ramdisk_image, ramdisk_image + ramdisk_size - 1,
375 ramdisk_here, ramdisk_here + ramdisk_size - 1);
eb1379cb 376}
9a27f5c5 377
eb1379cb
YL
378static void __init reserve_initrd(void)
379{
c967da6a 380 /* Assume only end is not page aligned */
eb1379cb
YL
381 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
382 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
c967da6a 383 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
8c5dd8f4 384 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
eb1379cb
YL
385
386 if (!boot_params.hdr.type_of_loader ||
387 !ramdisk_image || !ramdisk_size)
388 return; /* No initrd provided by bootloader */
389
390 initrd_start = 0;
391
392 if (ramdisk_size >= (end_of_lowmem>>1)) {
a9ce6bc1 393 memblock_x86_free_range(ramdisk_image, ramdisk_end);
eb1379cb
YL
394 printk(KERN_ERR "initrd too large to handle, "
395 "disabling initrd\n");
396 return;
397 }
398
399 printk(KERN_INFO "RAMDISK: %08llx - %08llx\n", ramdisk_image,
400 ramdisk_end);
401
402
403 if (ramdisk_end <= end_of_lowmem) {
404 /* All in lowmem, easy case */
405 /*
406 * don't need to reserve again, already reserved early
407 * in i386_start_kernel
408 */
409 initrd_start = ramdisk_image + PAGE_OFFSET;
410 initrd_end = initrd_start + ramdisk_size;
411 return;
412 }
413
eb1379cb 414 relocate_initrd();
8c5dd8f4 415
a9ce6bc1 416 memblock_x86_free_range(ramdisk_image, ramdisk_end);
cf8fa920 417}
225c37d7 418#else
eb1379cb 419static void __init reserve_initrd(void)
225c37d7
YL
420{
421}
cf8fa920
PA
422#endif /* CONFIG_BLK_DEV_INITRD */
423
29f784e3 424static void __init parse_setup_data(void)
257b0fde
YL
425{
426 struct setup_data *data;
427 u64 pa_data;
428
429 if (boot_params.hdr.version < 0x0209)
430 return;
431 pa_data = boot_params.hdr.setup_data;
432 while (pa_data) {
f1c2b357
SAS
433 u32 data_len, map_len;
434
435 map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
436 (u64)sizeof(struct setup_data));
437 data = early_memremap(pa_data, map_len);
438 data_len = data->len + sizeof(struct setup_data);
439 if (data_len > map_len) {
440 early_iounmap(data, map_len);
441 data = early_memremap(pa_data, data_len);
442 map_len = data_len;
443 }
444
257b0fde
YL
445 switch (data->type) {
446 case SETUP_E820_EXT:
f1c2b357 447 parse_e820_ext(data);
257b0fde 448 break;
da6b737b
SAS
449 case SETUP_DTB:
450 add_dtb(pa_data);
451 break;
257b0fde
YL
452 default:
453 break;
454 }
257b0fde 455 pa_data = data->next;
f1c2b357 456 early_iounmap(data, map_len);
257b0fde
YL
457 }
458}
459
a0a0becd 460static void __init e820_reserve_setup_data(void)
28bb2237
YL
461{
462 struct setup_data *data;
463 u64 pa_data;
d9a81b44 464 int found = 0;
28bb2237
YL
465
466 if (boot_params.hdr.version < 0x0209)
467 return;
468 pa_data = boot_params.hdr.setup_data;
469 while (pa_data) {
88b4c146 470 data = early_memremap(pa_data, sizeof(*data));
28bb2237
YL
471 e820_update_range(pa_data, sizeof(*data)+data->len,
472 E820_RAM, E820_RESERVED_KERN);
d9a81b44 473 found = 1;
28bb2237
YL
474 pa_data = data->next;
475 early_iounmap(data, sizeof(*data));
476 }
d9a81b44
YL
477 if (!found)
478 return;
479
28bb2237 480 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
a0a0becd 481 memcpy(&e820_saved, &e820, sizeof(struct e820map));
28bb2237
YL
482 printk(KERN_INFO "extended physical RAM map:\n");
483 e820_print_map("reserve setup_data");
484}
485
a9ce6bc1 486static void __init memblock_x86_reserve_range_setup_data(void)
a0a0becd
YL
487{
488 struct setup_data *data;
489 u64 pa_data;
490 char buf[32];
491
492 if (boot_params.hdr.version < 0x0209)
493 return;
494 pa_data = boot_params.hdr.setup_data;
495 while (pa_data) {
88b4c146 496 data = early_memremap(pa_data, sizeof(*data));
a0a0becd 497 sprintf(buf, "setup data %x", data->type);
a9ce6bc1 498 memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf);
a0a0becd
YL
499 pa_data = data->next;
500 early_iounmap(data, sizeof(*data));
501 }
502}
503
ccb4defa
YL
504/*
505 * --------- Crashkernel reservation ------------------------------
506 */
507
508#ifdef CONFIG_KEXEC
32105f7f 509
ccb4defa
YL
510static inline unsigned long long get_total_mem(void)
511{
512 unsigned long long total;
513
44280733 514 total = max_pfn - min_low_pfn;
ccb4defa
YL
515
516 return total << PAGE_SHIFT;
517}
518
7f8595bf
PA
519/*
520 * Keep the crash kernel below this limit. On 32 bits earlier kernels
521 * would limit the kernel to the low 512 MiB due to mapping restrictions.
522 * On 64 bits, kexec-tools currently limits us to 896 MiB; increase this
523 * limit once kexec-tools are fixed.
524 */
525#ifdef CONFIG_X86_32
526# define CRASH_KERNEL_ADDR_MAX (512 << 20)
527#else
528# define CRASH_KERNEL_ADDR_MAX (896 << 20)
529#endif
530
29f784e3 531static void __init reserve_crashkernel(void)
ccb4defa
YL
532{
533 unsigned long long total_mem;
534 unsigned long long crash_size, crash_base;
535 int ret;
536
537 total_mem = get_total_mem();
538
539 ret = parse_crashkernel(boot_command_line, total_mem,
540 &crash_size, &crash_base);
32105f7f
BW
541 if (ret != 0 || crash_size <= 0)
542 return;
543
544 /* 0 means: find the address automatically */
545 if (crash_base <= 0) {
44280733
YL
546 const unsigned long long alignment = 16<<20; /* 16M */
547
9f4c1396 548 /*
7f8595bf 549 * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
9f4c1396
YL
550 */
551 crash_base = memblock_find_in_range(alignment,
7f8595bf 552 CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
9f4c1396 553
a9ce6bc1 554 if (crash_base == MEMBLOCK_ERROR) {
44280733 555 pr_info("crashkernel reservation failed - No suitable area found.\n");
ccb4defa
YL
556 return;
557 }
32105f7f 558 } else {
44280733
YL
559 unsigned long long start;
560
9f4c1396
YL
561 start = memblock_find_in_range(crash_base,
562 crash_base + crash_size, crash_size, 1<<20);
44280733
YL
563 if (start != crash_base) {
564 pr_info("crashkernel reservation failed - memory is in use.\n");
ccb4defa
YL
565 return;
566 }
32105f7f 567 }
a9ce6bc1 568 memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL");
ccb4defa 569
32105f7f
BW
570 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
571 "for crashkernel (System RAM: %ldMB)\n",
572 (unsigned long)(crash_size >> 20),
573 (unsigned long)(crash_base >> 20),
574 (unsigned long)(total_mem >> 20));
ccb4defa 575
32105f7f
BW
576 crashk_res.start = crash_base;
577 crashk_res.end = crash_base + crash_size - 1;
578 insert_resource(&iomem_resource, &crashk_res);
ccb4defa
YL
579}
580#else
29f784e3 581static void __init reserve_crashkernel(void)
ccb4defa
YL
582{
583}
584#endif
585
bdba0e70
YL
586static struct resource standard_io_resources[] = {
587 { .name = "dma1", .start = 0x00, .end = 0x1f,
588 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
589 { .name = "pic1", .start = 0x20, .end = 0x21,
590 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
591 { .name = "timer0", .start = 0x40, .end = 0x43,
592 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
593 { .name = "timer1", .start = 0x50, .end = 0x53,
594 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
595 { .name = "keyboard", .start = 0x60, .end = 0x60,
596 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
597 { .name = "keyboard", .start = 0x64, .end = 0x64,
598 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
599 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
600 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
601 { .name = "pic2", .start = 0xa0, .end = 0xa1,
602 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
603 { .name = "dma2", .start = 0xc0, .end = 0xdf,
604 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
605 { .name = "fpu", .start = 0xf0, .end = 0xff,
606 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
607};
608
8fee697d 609void __init reserve_standard_io_resources(void)
bdba0e70
YL
610{
611 int i;
612
613 /* request I/O space for devices used on all i[345]86 PCs */
614 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
615 request_resource(&ioport_resource, &standard_io_resources[i]);
616
617}
618
57cac4d1
VG
619/*
620 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
621 * is_kdump_kernel() to determine if we are booting after a panic. Hence
622 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
623 */
624
625#ifdef CONFIG_CRASH_DUMP
0196bcbb
YL
626/* elfcorehdr= specifies the location of elf core header
627 * stored by the crashed kernel. This option will be passed
628 * by kexec loader to the capture kernel.
629 */
630static int __init setup_elfcorehdr(char *arg)
631{
632 char *end;
633 if (!arg)
634 return -EINVAL;
635 elfcorehdr_addr = memparse(arg, &end);
636 return end > arg ? 0 : -EINVAL;
637}
638early_param("elfcorehdr", setup_elfcorehdr);
639#endif
640
042be38e
YL
641static __init void reserve_ibft_region(void)
642{
643 unsigned long addr, size = 0;
644
645 addr = find_ibft_region(&size);
646
647 if (size)
72d7c3b3 648 memblock_x86_reserve_range(addr, addr + size, "* ibft");
042be38e
YL
649}
650
9ea77bdb 651static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
5649b7c3 652
1b5576e6
YL
653static void __init trim_bios_range(void)
654{
655 /*
656 * A special case is the first 4Kb of memory;
657 * This is a BIOS owned area, not kernel ram, but generally
658 * not listed as such in the E820 table.
d0cd7425
PA
659 *
660 * This typically reserves additional memory (64KiB by default)
661 * since some BIOSes are known to corrupt low memory. See the
9ea77bdb 662 * Kconfig help text for X86_RESERVE_LOW.
1b5576e6 663 */
9ea77bdb 664 e820_update_range(0, ALIGN(reserve_low, PAGE_SIZE),
d0cd7425
PA
665 E820_RAM, E820_RESERVED);
666
1b5576e6
YL
667 /*
668 * special case: Some BIOSen report the PC BIOS
669 * area (640->1Mb) as ram even though it is not.
670 * take them out.
671 */
672 e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
673 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
674}
675
9ea77bdb
PA
676static int __init parse_reservelow(char *p)
677{
678 unsigned long long size;
679
680 if (!p)
681 return -EINVAL;
682
683 size = memparse(p, &p);
684
685 if (size < 4096)
686 size = 4096;
687
688 if (size > 640*1024)
689 size = 640*1024;
690
691 reserve_low = size;
692
693 return 0;
694}
695
696early_param("reservelow", parse_reservelow);
697
72d7c3b3
YL
698static u64 __init get_max_mapped(void)
699{
700 u64 end = max_pfn_mapped;
701
702 end <<= PAGE_SHIFT;
703
704 return end;
705}
706
1da177e4
LT
707/*
708 * Determine if we were loaded by an EFI loader. If so, then we have also been
709 * passed the efi memmap, systab, etc., so we should use these data structures
710 * for initialization. Note, the efi init code path is determined by the
711 * global efi_enabled. This allows the same kernel image to be used on existing
712 * systems (with a traditional BIOS) as well as on EFI systems.
713 */
76934ed4
YL
714/*
715 * setup_arch - architecture-specific boot-time initializations
716 *
717 * Note: On x86_64, fixmaps are ready for use even before this is called.
718 */
719
1da177e4
LT
720void __init setup_arch(char **cmdline_p)
721{
8ee2debc 722 int acpi = 0;
eec1d4fa 723 int amd = 0;
f49aa448 724 unsigned long flags;
8ee2debc 725
76934ed4 726#ifdef CONFIG_X86_32
1da177e4 727 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
3b33553b 728 visws_early_detect();
b40827fa
BP
729
730 /*
731 * copy kernel address range established so far and switch
732 * to the proper swapper page table
733 */
734 clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY,
735 initial_page_table + KERNEL_PGD_BOUNDARY,
736 KERNEL_PGD_PTRS);
737
738 load_cr3(swapper_pg_dir);
739 __flush_tlb_all();
76934ed4
YL
740#else
741 printk(KERN_INFO "Command line: %s\n", boot_command_line);
742#endif
1da177e4 743
9863c90f
AK
744 /*
745 * If we have OLPC OFW, we might end up relocating the fixmap due to
746 * reserve_top(), so do this before touching the ioremap area.
747 */
fd699c76
AS
748 olpc_ofw_detect();
749
29c84391 750 early_trap_init();
9e882c92 751 early_cpu_init();
1a98fd14
JF
752 early_ioremap_init();
753
fd699c76
AS
754 setup_olpc_ofw_pgd();
755
30c82645
PA
756 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
757 screen_info = boot_params.screen_info;
758 edid_info = boot_params.edid_info;
76934ed4 759#ifdef CONFIG_X86_32
30c82645
PA
760 apm_info.bios = boot_params.apm_bios_info;
761 ist_info = boot_params.ist_info;
76934ed4 762 if (boot_params.sys_desc_table.length != 0) {
30c82645
PA
763 set_mca_bus(boot_params.sys_desc_table.table[3] & 0x2);
764 machine_id = boot_params.sys_desc_table.table[0];
765 machine_submodel_id = boot_params.sys_desc_table.table[1];
766 BIOS_revision = boot_params.sys_desc_table.table[2];
1da177e4 767 }
76934ed4
YL
768#endif
769 saved_video_mode = boot_params.hdr.vid_mode;
30c82645 770 bootloader_type = boot_params.hdr.type_of_loader;
5031296c
PA
771 if ((bootloader_type >> 4) == 0xe) {
772 bootloader_type &= 0xf;
773 bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
774 }
775 bootloader_version = bootloader_type & 0xf;
776 bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
1da177e4
LT
777
778#ifdef CONFIG_BLK_DEV_RAM
30c82645
PA
779 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
780 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
781 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
1da177e4 782#endif
7465252e
YL
783#ifdef CONFIG_EFI
784 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
76934ed4
YL
785#ifdef CONFIG_X86_32
786 "EL32",
787#else
788 "EL64",
789#endif
790 4)) {
7465252e 791 efi_enabled = 1;
a9ce6bc1 792 efi_memblock_x86_reserve_range();
7465252e
YL
793 }
794#endif
795
42bbdb43 796 x86_init.oem.arch_setup();
2215e69d 797
419afdf5 798 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
0dbfafa5 799 setup_memory_map();
28bb2237 800 parse_setup_data();
a0a0becd
YL
801 /* update the e820_saved too */
802 e820_reserve_setup_data();
28bb2237 803
1da177e4
LT
804 copy_edd();
805
30c82645 806 if (!boot_params.hdr.root_flags)
1da177e4
LT
807 root_mountflags &= ~MS_RDONLY;
808 init_mm.start_code = (unsigned long) _text;
809 init_mm.end_code = (unsigned long) _etext;
810 init_mm.end_data = (unsigned long) _edata;
93dbda7c 811 init_mm.brk = _brk_end;
1da177e4
LT
812
813 code_resource.start = virt_to_phys(_text);
814 code_resource.end = virt_to_phys(_etext)-1;
815 data_resource.start = virt_to_phys(_etext);
816 data_resource.end = virt_to_phys(_edata)-1;
00bf4098
BW
817 bss_resource.start = virt_to_phys(&__bss_start);
818 bss_resource.end = virt_to_phys(&__bss_stop)-1;
1da177e4 819
516cbf37
TB
820#ifdef CONFIG_CMDLINE_BOOL
821#ifdef CONFIG_CMDLINE_OVERRIDE
822 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
823#else
824 if (builtin_cmdline[0]) {
825 /* append boot loader cmdline to builtin */
826 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
827 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
828 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
829 }
830#endif
831#endif
832
eda6da92
YL
833 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
834 *cmdline_p = command_line;
835
eda6da92 836 /*
4b0f3b81
KC
837 * x86_configure_nx() is called before parse_early_param() to detect
838 * whether hardware doesn't support NX (so that the early EHCI debug
839 * console setup can safely call set_fixmap()). It may then be called
840 * again from within noexec_setup() during parsing early parameters
841 * to honor the respective command line option.
eda6da92 842 */
4763ed4d 843 x86_configure_nx();
eda6da92
YL
844
845 parse_early_param();
846
4b0f3b81 847 x86_report_nx();
0ad5bce7 848
28bb2237 849 /* after early param, so could get panic from serial */
a9ce6bc1 850 memblock_x86_reserve_range_setup_data();
28bb2237 851
76934ed4 852 if (acpi_mps_check()) {
3eb11edc 853#ifdef CONFIG_X86_LOCAL_APIC
76934ed4 854 disable_apic = 1;
3eb11edc 855#endif
988781dc 856 setup_clear_cpu_cap(X86_FEATURE_APIC);
3c999f14
YL
857 }
858
dc7c65db
LT
859#ifdef CONFIG_PCI
860 if (pci_early_dump_regs)
861 early_dump_pci_devices();
862#endif
863
0dbfafa5 864 finish_e820_parsing();
1a3f239d 865
ff0c0874
BM
866 if (efi_enabled)
867 efi_init();
868
2216d199
YL
869 dmi_scan_machine();
870
88b094fb
AK
871 /*
872 * VMware detection requires dmi to be available, so this
873 * needs to be done after dmi_scan_machine, for the BP.
874 */
2d826404 875 init_hypervisor_platform();
88b094fb 876
f7cf5a5b 877 x86_init.resources.probe_roms();
41c094fd
YL
878
879 /* after parse_early_param, so could debug it */
880 insert_resource(&iomem_resource, &code_resource);
881 insert_resource(&iomem_resource, &data_resource);
882 insert_resource(&iomem_resource, &bss_resource);
883
1b5576e6 884 trim_bios_range();
76934ed4 885#ifdef CONFIG_X86_32
cc9f7a0c
YL
886 if (ppro_with_ram_bug()) {
887 e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
888 E820_RESERVED);
889 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
890 printk(KERN_INFO "fixed physical RAM map:\n");
891 e820_print_map("bad_ppro");
892 }
76934ed4
YL
893#else
894 early_gart_iommu_check();
895#endif
cc9f7a0c 896
7b2a0a6c
YL
897 /*
898 * partially used pages are not usable - thus
899 * we are rounding upwards:
900 */
f361a450 901 max_pfn = e820_end_of_ram_pfn();
7b2a0a6c 902
093af8d7
YL
903 /* update e820 for memory not covered by WB MTRRs */
904 mtrr_bp_init();
2dc807b3 905 if (mtrr_trim_uncached_memory(max_pfn))
f361a450 906 max_pfn = e820_end_of_ram_pfn();
76c32418 907
76934ed4 908#ifdef CONFIG_X86_32
4e29684c 909 /* max_low_pfn get updated here */
2ec65f8b 910 find_low_pfn_range();
76934ed4
YL
911#else
912 num_physpages = max_pfn;
913
06cd9a7d 914 check_x2apic();
76934ed4
YL
915
916 /* How many end-of-memory variables you have, grandma! */
917 /* need this before calling reserve_initrd */
f361a450
YL
918 if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
919 max_low_pfn = e820_end_of_low_ram_pfn();
920 else
921 max_low_pfn = max_pfn;
922
76934ed4 923 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
5394f80f
JF
924#endif
925
893f38d1
YL
926 /*
927 * Find and reserve possible boot-time SMP configuration:
928 */
929 find_smp_config();
930
042be38e
YL
931 reserve_ibft_region();
932
72d7c3b3
YL
933 /*
934 * Need to conclude brk, before memblock_x86_fill()
935 * it could use memblock_find_in_range, could overlap with
936 * brk area.
937 */
938 reserve_brk();
939
940 memblock.current_limit = get_max_mapped();
941 memblock_x86_fill();
942
943 /* preallocate 4k for mptable mpc */
944 early_reserve_e820_mpc_new();
945
946#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
947 setup_bios_corruption_check();
948#endif
949
950 printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n",
951 max_pfn_mapped<<PAGE_SHIFT);
952
893f38d1
YL
953 reserve_trampoline_memory();
954
196cf0d6
YL
955#ifdef CONFIG_ACPI_SLEEP
956 /*
957 * Reserve low memory region for sleep support.
958 * even before init_memory_mapping
959 */
960 acpi_reserve_wakeup_memory();
961#endif
854c879f
PE
962 init_gbpages();
963
4e29684c 964 /* max_pfn_mapped is updated here */
f361a450
YL
965 max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
966 max_pfn_mapped = max_low_pfn_mapped;
967
968#ifdef CONFIG_X86_64
969 if (max_pfn > max_low_pfn) {
970 max_pfn_mapped = init_memory_mapping(1UL<<32,
971 max_pfn<<PAGE_SHIFT);
972 /* can we preseve max_low_pfn ?*/
973 max_low_pfn = max_pfn;
974 }
975#endif
72d7c3b3 976 memblock.current_limit = get_max_mapped();
4e29684c 977
e7b37895
YL
978 /*
979 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
980 */
981
982#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
983 if (init_ohci1394_dma_early)
984 init_ohci1394_dma_on_all_controllers();
985#endif
986
2ec65f8b
YL
987 reserve_initrd();
988
44280733
YL
989 reserve_crashkernel();
990
76934ed4 991 vsmp_init();
76934ed4 992
1c6e5503
YL
993 io_delay_init();
994
1c6e5503
YL
995 /*
996 * Parse the ACPI tables for possible boot-time SMP configuration.
997 */
998 acpi_boot_table_init();
1c6e5503 999
2e42060c
JS
1000 early_acpi_boot_init();
1001
1c6e5503 1002#ifdef CONFIG_ACPI_NUMA
f2f865fe
YL
1003 /*
1004 * Parse SRAT to discover nodes.
1005 */
8716273c 1006 acpi = acpi_numa_init();
1c6e5503
YL
1007#endif
1008
eec1d4fa 1009#ifdef CONFIG_AMD_NUMA
8716273c 1010 if (!acpi)
eec1d4fa 1011 amd = !amd_numa_init(0, max_pfn);
8ee2debc
DR
1012#endif
1013
eec1d4fa 1014 initmem_init(0, max_pfn, acpi, amd);
6f2a7536 1015 memblock_find_dma_reserve();
91467bdf 1016 dma32_reserve_bootmem();
91467bdf 1017
790c73f6
GOC
1018#ifdef CONFIG_KVM_CLOCK
1019 kvmclock_init();
1020#endif
1021
030cb6c0 1022 x86_init.paging.pagetable_setup_start(swapper_pg_dir);
1da177e4 1023 paging_init();
030cb6c0 1024 x86_init.paging.pagetable_setup_done(swapper_pg_dir);
f212ec4b 1025
b40827fa
BP
1026#ifdef CONFIG_X86_32
1027 /* sync back kernel address range */
1028 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
1029 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1030 KERNEL_PGD_PTRS);
1031#endif
fd89a137 1032
31625340
JC
1033 tboot_probe();
1034
76934ed4
YL
1035#ifdef CONFIG_X86_64
1036 map_vsyscall();
1037#endif
1038
1a3f239d 1039 generic_apic_probe();
1da177e4 1040
54ef3400 1041 early_quirks();
d44647b0 1042
295deae4
YL
1043 /*
1044 * Read APIC and some other early information from ACPI tables.
1045 */
1da177e4 1046 acpi_boot_init();
efafc8b2 1047 sfi_init();
a906fdaa 1048 x86_dtb_init();
04606618 1049
295deae4
YL
1050 /*
1051 * get boot-time SMP configuration:
1052 */
e0da3364
YL
1053 if (smp_found_config)
1054 get_smp_config();
76934ed4 1055
329513a3 1056 prefill_possible_map();
301e6190 1057
5f4765f9
YL
1058#ifdef CONFIG_X86_64
1059 init_cpu_to_node();
1060#endif
1061
76934ed4 1062 init_apic_mappings();
23f9b267 1063 ioapic_and_gsi_init();
9d6a4d08 1064
295deae4 1065 kvm_guest_init();
1da177e4 1066
41c094fd 1067 e820_reserve_resources();
bf62f398 1068 e820_mark_nosave_regions(max_low_pfn);
1da177e4 1069
8fee697d 1070 x86_init.resources.reserve_resources();
41c094fd
YL
1071
1072 e820_setup_gap();
1073
1da177e4
LT
1074#ifdef CONFIG_VT
1075#if defined(CONFIG_VGA_CONSOLE)
1076 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1077 conswitchp = &vga_con;
1078#elif defined(CONFIG_DUMMY_CONSOLE)
1079 conswitchp = &dummy_con;
1080#endif
1081#endif
6f30c1ac 1082 x86_init.oem.banner();
a2202aa2 1083
6b617e22
FT
1084 x86_init.timers.wallclock_init();
1085
a2202aa2 1086 mcheck_init();
f49aa448
JB
1087
1088 local_irq_save(flags);
1089 arch_init_ideal_nop5();
1090 local_irq_restore(flags);
1da177e4 1091}
5649b7c3 1092
9be1b56a
IM
1093#ifdef CONFIG_X86_32
1094
8fee697d
TG
1095static struct resource video_ram_resource = {
1096 .name = "Video RAM area",
1097 .start = 0xa0000,
1098 .end = 0xbffff,
1099 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
9be1b56a
IM
1100};
1101
8fee697d 1102void __init i386_reserve_resources(void)
9be1b56a 1103{
8fee697d
TG
1104 request_resource(&iomem_resource, &video_ram_resource);
1105 reserve_standard_io_resources();
9be1b56a
IM
1106}
1107
9be1b56a 1108#endif /* CONFIG_X86_32 */