[PATCH] sparsemem memory model for i386
[linux-block.git] / arch / i386 / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/i386/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 *
8 * Memory region support
9 * David Parsons <orc@pell.chi.il.us>, July-August 1999
10 *
11 * Added E820 sanitization routine (removes overlapping memory regions);
12 * Brian Moyle <bmoyle@mvista.com>, February 2001
13 *
14 * Moved CPU detection code to cpu/${cpu}.c
15 * Patrick Mochel <mochel@osdl.org>, March 2002
16 *
17 * Provisions for empty E820 memory regions (reported by certain BIOSes).
18 * Alex Achenbach <xela@slit.de>, December 2002.
19 *
20 */
21
22/*
23 * This file handles the architecture-dependent parts of initialization
24 */
25
26#include <linux/sched.h>
27#include <linux/mm.h>
05b79bdc 28#include <linux/mmzone.h>
1da177e4
LT
29#include <linux/tty.h>
30#include <linux/ioport.h>
31#include <linux/acpi.h>
32#include <linux/apm_bios.h>
33#include <linux/initrd.h>
34#include <linux/bootmem.h>
35#include <linux/seq_file.h>
36#include <linux/console.h>
37#include <linux/mca.h>
38#include <linux/root_dev.h>
39#include <linux/highmem.h>
40#include <linux/module.h>
41#include <linux/efi.h>
42#include <linux/init.h>
43#include <linux/edd.h>
44#include <linux/nodemask.h>
45#include <video/edid.h>
46#include <asm/e820.h>
47#include <asm/mpspec.h>
48#include <asm/setup.h>
49#include <asm/arch_hooks.h>
50#include <asm/sections.h>
51#include <asm/io_apic.h>
52#include <asm/ist.h>
53#include <asm/io.h>
54#include "setup_arch_pre.h"
55#include <bios_ebda.h>
56
57/* This value is set up by the early boot code to point to the value
58 immediately after the boot time page tables. It contains a *physical*
59 address, and must not be in the .bss segment! */
60unsigned long init_pg_tables_end __initdata = ~0UL;
61
62int disable_pse __initdata = 0;
63
64/*
65 * Machine setup..
66 */
67
68#ifdef CONFIG_EFI
69int efi_enabled = 0;
70EXPORT_SYMBOL(efi_enabled);
71#endif
72
73/* cpu data as detected by the assembly code in head.S */
74struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
75/* common cpu data for all cpus */
76struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
77
78unsigned long mmu_cr4_features;
79
80#ifdef CONFIG_ACPI_INTERPRETER
81 int acpi_disabled = 0;
82#else
83 int acpi_disabled = 1;
84#endif
85EXPORT_SYMBOL(acpi_disabled);
86
87#ifdef CONFIG_ACPI_BOOT
88int __initdata acpi_force = 0;
89extern acpi_interrupt_flags acpi_sci_flags;
90#endif
91
92/* for MCA, but anyone else can use it if they want */
93unsigned int machine_id;
94unsigned int machine_submodel_id;
95unsigned int BIOS_revision;
96unsigned int mca_pentium_flag;
97
98/* For PCI or other memory-mapped resources */
99unsigned long pci_mem_start = 0x10000000;
100
101/* Boot loader ID as an integer, for the benefit of proc_dointvec */
102int bootloader_type;
103
104/* user-defined highmem size */
105static unsigned int highmem_pages = -1;
106
107/*
108 * Setup options
109 */
110struct drive_info_struct { char dummy[32]; } drive_info;
111struct screen_info screen_info;
112struct apm_info apm_info;
113struct sys_desc_table_struct {
114 unsigned short length;
115 unsigned char table[0];
116};
117struct edid_info edid_info;
118struct ist_info ist_info;
119struct e820map e820;
120
121extern void early_cpu_init(void);
122extern void dmi_scan_machine(void);
123extern void generic_apic_probe(char *);
124extern int root_mountflags;
125
126unsigned long saved_videomode;
127
128#define RAMDISK_IMAGE_START_MASK 0x07FF
129#define RAMDISK_PROMPT_FLAG 0x8000
130#define RAMDISK_LOAD_FLAG 0x4000
131
132static char command_line[COMMAND_LINE_SIZE];
133
134unsigned char __initdata boot_params[PARAM_SIZE];
135
136static struct resource data_resource = {
137 .name = "Kernel data",
138 .start = 0,
139 .end = 0,
140 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
141};
142
143static struct resource code_resource = {
144 .name = "Kernel code",
145 .start = 0,
146 .end = 0,
147 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
148};
149
150static struct resource system_rom_resource = {
151 .name = "System ROM",
152 .start = 0xf0000,
153 .end = 0xfffff,
154 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
155};
156
157static struct resource extension_rom_resource = {
158 .name = "Extension ROM",
159 .start = 0xe0000,
160 .end = 0xeffff,
161 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
162};
163
164static struct resource adapter_rom_resources[] = { {
165 .name = "Adapter ROM",
166 .start = 0xc8000,
167 .end = 0,
168 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
169}, {
170 .name = "Adapter ROM",
171 .start = 0,
172 .end = 0,
173 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
174}, {
175 .name = "Adapter ROM",
176 .start = 0,
177 .end = 0,
178 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
179}, {
180 .name = "Adapter ROM",
181 .start = 0,
182 .end = 0,
183 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
184}, {
185 .name = "Adapter ROM",
186 .start = 0,
187 .end = 0,
188 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
189}, {
190 .name = "Adapter ROM",
191 .start = 0,
192 .end = 0,
193 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
194} };
195
196#define ADAPTER_ROM_RESOURCES \
197 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
198
199static struct resource video_rom_resource = {
200 .name = "Video ROM",
201 .start = 0xc0000,
202 .end = 0xc7fff,
203 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
204};
205
206static struct resource video_ram_resource = {
207 .name = "Video RAM area",
208 .start = 0xa0000,
209 .end = 0xbffff,
210 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
211};
212
213static struct resource standard_io_resources[] = { {
214 .name = "dma1",
215 .start = 0x0000,
216 .end = 0x001f,
217 .flags = IORESOURCE_BUSY | IORESOURCE_IO
218}, {
219 .name = "pic1",
220 .start = 0x0020,
221 .end = 0x0021,
222 .flags = IORESOURCE_BUSY | IORESOURCE_IO
223}, {
224 .name = "timer0",
225 .start = 0x0040,
226 .end = 0x0043,
227 .flags = IORESOURCE_BUSY | IORESOURCE_IO
228}, {
229 .name = "timer1",
230 .start = 0x0050,
231 .end = 0x0053,
232 .flags = IORESOURCE_BUSY | IORESOURCE_IO
233}, {
234 .name = "keyboard",
235 .start = 0x0060,
236 .end = 0x006f,
237 .flags = IORESOURCE_BUSY | IORESOURCE_IO
238}, {
239 .name = "dma page reg",
240 .start = 0x0080,
241 .end = 0x008f,
242 .flags = IORESOURCE_BUSY | IORESOURCE_IO
243}, {
244 .name = "pic2",
245 .start = 0x00a0,
246 .end = 0x00a1,
247 .flags = IORESOURCE_BUSY | IORESOURCE_IO
248}, {
249 .name = "dma2",
250 .start = 0x00c0,
251 .end = 0x00df,
252 .flags = IORESOURCE_BUSY | IORESOURCE_IO
253}, {
254 .name = "fpu",
255 .start = 0x00f0,
256 .end = 0x00ff,
257 .flags = IORESOURCE_BUSY | IORESOURCE_IO
258} };
259
260#define STANDARD_IO_RESOURCES \
261 (sizeof standard_io_resources / sizeof standard_io_resources[0])
262
263#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
264
265static int __init romchecksum(unsigned char *rom, unsigned long length)
266{
267 unsigned char *p, sum = 0;
268
269 for (p = rom; p < rom + length; p++)
270 sum += *p;
271 return sum == 0;
272}
273
274static void __init probe_roms(void)
275{
276 unsigned long start, length, upper;
277 unsigned char *rom;
278 int i;
279
280 /* video rom */
281 upper = adapter_rom_resources[0].start;
282 for (start = video_rom_resource.start; start < upper; start += 2048) {
283 rom = isa_bus_to_virt(start);
284 if (!romsignature(rom))
285 continue;
286
287 video_rom_resource.start = start;
288
289 /* 0 < length <= 0x7f * 512, historically */
290 length = rom[2] * 512;
291
292 /* if checksum okay, trust length byte */
293 if (length && romchecksum(rom, length))
294 video_rom_resource.end = start + length - 1;
295
296 request_resource(&iomem_resource, &video_rom_resource);
297 break;
298 }
299
300 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
301 if (start < upper)
302 start = upper;
303
304 /* system rom */
305 request_resource(&iomem_resource, &system_rom_resource);
306 upper = system_rom_resource.start;
307
308 /* check for extension rom (ignore length byte!) */
309 rom = isa_bus_to_virt(extension_rom_resource.start);
310 if (romsignature(rom)) {
311 length = extension_rom_resource.end - extension_rom_resource.start + 1;
312 if (romchecksum(rom, length)) {
313 request_resource(&iomem_resource, &extension_rom_resource);
314 upper = extension_rom_resource.start;
315 }
316 }
317
318 /* check for adapter roms on 2k boundaries */
319 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
320 rom = isa_bus_to_virt(start);
321 if (!romsignature(rom))
322 continue;
323
324 /* 0 < length <= 0x7f * 512, historically */
325 length = rom[2] * 512;
326
327 /* but accept any length that fits if checksum okay */
328 if (!length || start + length > upper || !romchecksum(rom, length))
329 continue;
330
331 adapter_rom_resources[i].start = start;
332 adapter_rom_resources[i].end = start + length - 1;
333 request_resource(&iomem_resource, &adapter_rom_resources[i]);
334
335 start = adapter_rom_resources[i++].end & ~2047UL;
336 }
337}
338
339static void __init limit_regions(unsigned long long size)
340{
341 unsigned long long current_addr = 0;
342 int i;
343
344 if (efi_enabled) {
345 for (i = 0; i < memmap.nr_map; i++) {
346 current_addr = memmap.map[i].phys_addr +
347 (memmap.map[i].num_pages << 12);
348 if (memmap.map[i].type == EFI_CONVENTIONAL_MEMORY) {
349 if (current_addr >= size) {
350 memmap.map[i].num_pages -=
351 (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
352 memmap.nr_map = i + 1;
353 return;
354 }
355 }
356 }
357 }
358 for (i = 0; i < e820.nr_map; i++) {
359 if (e820.map[i].type == E820_RAM) {
360 current_addr = e820.map[i].addr + e820.map[i].size;
361 if (current_addr >= size) {
362 e820.map[i].size -= current_addr-size;
363 e820.nr_map = i + 1;
364 return;
365 }
366 }
367 }
368}
369
370static void __init add_memory_region(unsigned long long start,
371 unsigned long long size, int type)
372{
373 int x;
374
375 if (!efi_enabled) {
376 x = e820.nr_map;
377
378 if (x == E820MAX) {
379 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
380 return;
381 }
382
383 e820.map[x].addr = start;
384 e820.map[x].size = size;
385 e820.map[x].type = type;
386 e820.nr_map++;
387 }
388} /* add_memory_region */
389
390#define E820_DEBUG 1
391
392static void __init print_memory_map(char *who)
393{
394 int i;
395
396 for (i = 0; i < e820.nr_map; i++) {
397 printk(" %s: %016Lx - %016Lx ", who,
398 e820.map[i].addr,
399 e820.map[i].addr + e820.map[i].size);
400 switch (e820.map[i].type) {
401 case E820_RAM: printk("(usable)\n");
402 break;
403 case E820_RESERVED:
404 printk("(reserved)\n");
405 break;
406 case E820_ACPI:
407 printk("(ACPI data)\n");
408 break;
409 case E820_NVS:
410 printk("(ACPI NVS)\n");
411 break;
412 default: printk("type %lu\n", e820.map[i].type);
413 break;
414 }
415 }
416}
417
418/*
419 * Sanitize the BIOS e820 map.
420 *
421 * Some e820 responses include overlapping entries. The following
422 * replaces the original e820 map with a new one, removing overlaps.
423 *
424 */
425struct change_member {
426 struct e820entry *pbios; /* pointer to original bios entry */
427 unsigned long long addr; /* address for this change point */
428};
429static struct change_member change_point_list[2*E820MAX] __initdata;
430static struct change_member *change_point[2*E820MAX] __initdata;
431static struct e820entry *overlap_list[E820MAX] __initdata;
432static struct e820entry new_bios[E820MAX] __initdata;
433
434static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
435{
436 struct change_member *change_tmp;
437 unsigned long current_type, last_type;
438 unsigned long long last_addr;
439 int chgidx, still_changing;
440 int overlap_entries;
441 int new_bios_entry;
442 int old_nr, new_nr, chg_nr;
443 int i;
444
445 /*
446 Visually we're performing the following (1,2,3,4 = memory types)...
447
448 Sample memory map (w/overlaps):
449 ____22__________________
450 ______________________4_
451 ____1111________________
452 _44_____________________
453 11111111________________
454 ____________________33__
455 ___________44___________
456 __________33333_________
457 ______________22________
458 ___________________2222_
459 _________111111111______
460 _____________________11_
461 _________________4______
462
463 Sanitized equivalent (no overlap):
464 1_______________________
465 _44_____________________
466 ___1____________________
467 ____22__________________
468 ______11________________
469 _________1______________
470 __________3_____________
471 ___________44___________
472 _____________33_________
473 _______________2________
474 ________________1_______
475 _________________4______
476 ___________________2____
477 ____________________33__
478 ______________________4_
479 */
480
481 /* if there's only one memory region, don't bother */
482 if (*pnr_map < 2)
483 return -1;
484
485 old_nr = *pnr_map;
486
487 /* bail out if we find any unreasonable addresses in bios map */
488 for (i=0; i<old_nr; i++)
489 if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
490 return -1;
491
492 /* create pointers for initial change-point information (for sorting) */
493 for (i=0; i < 2*old_nr; i++)
494 change_point[i] = &change_point_list[i];
495
496 /* record all known change-points (starting and ending addresses),
497 omitting those that are for empty memory regions */
498 chgidx = 0;
499 for (i=0; i < old_nr; i++) {
500 if (biosmap[i].size != 0) {
501 change_point[chgidx]->addr = biosmap[i].addr;
502 change_point[chgidx++]->pbios = &biosmap[i];
503 change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
504 change_point[chgidx++]->pbios = &biosmap[i];
505 }
506 }
507 chg_nr = chgidx; /* true number of change-points */
508
509 /* sort change-point list by memory addresses (low -> high) */
510 still_changing = 1;
511 while (still_changing) {
512 still_changing = 0;
513 for (i=1; i < chg_nr; i++) {
514 /* if <current_addr> > <last_addr>, swap */
515 /* or, if current=<start_addr> & last=<end_addr>, swap */
516 if ((change_point[i]->addr < change_point[i-1]->addr) ||
517 ((change_point[i]->addr == change_point[i-1]->addr) &&
518 (change_point[i]->addr == change_point[i]->pbios->addr) &&
519 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
520 )
521 {
522 change_tmp = change_point[i];
523 change_point[i] = change_point[i-1];
524 change_point[i-1] = change_tmp;
525 still_changing=1;
526 }
527 }
528 }
529
530 /* create a new bios memory map, removing overlaps */
531 overlap_entries=0; /* number of entries in the overlap table */
532 new_bios_entry=0; /* index for creating new bios map entries */
533 last_type = 0; /* start with undefined memory type */
534 last_addr = 0; /* start with 0 as last starting address */
535 /* loop through change-points, determining affect on the new bios map */
536 for (chgidx=0; chgidx < chg_nr; chgidx++)
537 {
538 /* keep track of all overlapping bios entries */
539 if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
540 {
541 /* add map entry to overlap list (> 1 entry implies an overlap) */
542 overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
543 }
544 else
545 {
546 /* remove entry from list (order independent, so swap with last) */
547 for (i=0; i<overlap_entries; i++)
548 {
549 if (overlap_list[i] == change_point[chgidx]->pbios)
550 overlap_list[i] = overlap_list[overlap_entries-1];
551 }
552 overlap_entries--;
553 }
554 /* if there are overlapping entries, decide which "type" to use */
555 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
556 current_type = 0;
557 for (i=0; i<overlap_entries; i++)
558 if (overlap_list[i]->type > current_type)
559 current_type = overlap_list[i]->type;
560 /* continue building up new bios map based on this information */
561 if (current_type != last_type) {
562 if (last_type != 0) {
563 new_bios[new_bios_entry].size =
564 change_point[chgidx]->addr - last_addr;
565 /* move forward only if the new size was non-zero */
566 if (new_bios[new_bios_entry].size != 0)
567 if (++new_bios_entry >= E820MAX)
568 break; /* no more space left for new bios entries */
569 }
570 if (current_type != 0) {
571 new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
572 new_bios[new_bios_entry].type = current_type;
573 last_addr=change_point[chgidx]->addr;
574 }
575 last_type = current_type;
576 }
577 }
578 new_nr = new_bios_entry; /* retain count for new bios entries */
579
580 /* copy new bios mapping into original location */
581 memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
582 *pnr_map = new_nr;
583
584 return 0;
585}
586
587/*
588 * Copy the BIOS e820 map into a safe place.
589 *
590 * Sanity-check it while we're at it..
591 *
592 * If we're lucky and live on a modern system, the setup code
593 * will have given us a memory map that we can use to properly
594 * set up memory. If we aren't, we'll fake a memory map.
595 *
596 * We check to see that the memory map contains at least 2 elements
597 * before we'll use it, because the detection code in setup.S may
598 * not be perfect and most every PC known to man has two memory
599 * regions: one from 0 to 640k, and one from 1mb up. (The IBM
600 * thinkpad 560x, for example, does not cooperate with the memory
601 * detection code.)
602 */
603static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
604{
605 /* Only one memory region (or negative)? Ignore it */
606 if (nr_map < 2)
607 return -1;
608
609 do {
610 unsigned long long start = biosmap->addr;
611 unsigned long long size = biosmap->size;
612 unsigned long long end = start + size;
613 unsigned long type = biosmap->type;
614
615 /* Overflow in 64 bits? Ignore the memory map. */
616 if (start > end)
617 return -1;
618
619 /*
620 * Some BIOSes claim RAM in the 640k - 1M region.
621 * Not right. Fix it up.
622 */
623 if (type == E820_RAM) {
624 if (start < 0x100000ULL && end > 0xA0000ULL) {
625 if (start < 0xA0000ULL)
626 add_memory_region(start, 0xA0000ULL-start, type);
627 if (end <= 0x100000ULL)
628 continue;
629 start = 0x100000ULL;
630 size = end - start;
631 }
632 }
633 add_memory_region(start, size, type);
634 } while (biosmap++,--nr_map);
635 return 0;
636}
637
638#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
639struct edd edd;
640#ifdef CONFIG_EDD_MODULE
641EXPORT_SYMBOL(edd);
642#endif
643/**
644 * copy_edd() - Copy the BIOS EDD information
645 * from boot_params into a safe place.
646 *
647 */
648static inline void copy_edd(void)
649{
650 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
651 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
652 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
653 edd.edd_info_nr = EDD_NR;
654}
655#else
656static inline void copy_edd(void)
657{
658}
659#endif
660
661/*
662 * Do NOT EVER look at the BIOS memory size location.
663 * It does not work on many machines.
664 */
665#define LOWMEMSIZE() (0x9f000)
666
667static void __init parse_cmdline_early (char ** cmdline_p)
668{
669 char c = ' ', *to = command_line, *from = saved_command_line;
670 int len = 0;
671 int userdef = 0;
672
673 /* Save unparsed command line copy for /proc/cmdline */
674 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
675
676 for (;;) {
677 if (c != ' ')
678 goto next_char;
679 /*
680 * "mem=nopentium" disables the 4MB page tables.
681 * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
682 * to <mem>, overriding the bios size.
683 * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
684 * <start> to <start>+<mem>, overriding the bios size.
685 *
686 * HPA tells me bootloaders need to parse mem=, so no new
687 * option should be mem= [also see Documentation/i386/boot.txt]
688 */
689 if (!memcmp(from, "mem=", 4)) {
690 if (to != command_line)
691 to--;
692 if (!memcmp(from+4, "nopentium", 9)) {
693 from += 9+4;
694 clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
695 disable_pse = 1;
696 } else {
697 /* If the user specifies memory size, we
698 * limit the BIOS-provided memory map to
699 * that size. exactmap can be used to specify
700 * the exact map. mem=number can be used to
701 * trim the existing memory map.
702 */
703 unsigned long long mem_size;
704
705 mem_size = memparse(from+4, &from);
706 limit_regions(mem_size);
707 userdef=1;
708 }
709 }
710
711 else if (!memcmp(from, "memmap=", 7)) {
712 if (to != command_line)
713 to--;
714 if (!memcmp(from+7, "exactmap", 8)) {
715 from += 8+7;
716 e820.nr_map = 0;
717 userdef = 1;
718 } else {
719 /* If the user specifies memory size, we
720 * limit the BIOS-provided memory map to
721 * that size. exactmap can be used to specify
722 * the exact map. mem=number can be used to
723 * trim the existing memory map.
724 */
725 unsigned long long start_at, mem_size;
726
727 mem_size = memparse(from+7, &from);
728 if (*from == '@') {
729 start_at = memparse(from+1, &from);
730 add_memory_region(start_at, mem_size, E820_RAM);
731 } else if (*from == '#') {
732 start_at = memparse(from+1, &from);
733 add_memory_region(start_at, mem_size, E820_ACPI);
734 } else if (*from == '$') {
735 start_at = memparse(from+1, &from);
736 add_memory_region(start_at, mem_size, E820_RESERVED);
737 } else {
738 limit_regions(mem_size);
739 userdef=1;
740 }
741 }
742 }
743
744 else if (!memcmp(from, "noexec=", 7))
745 noexec_setup(from + 7);
746
747
748#ifdef CONFIG_X86_SMP
749 /*
750 * If the BIOS enumerates physical processors before logical,
751 * maxcpus=N at enumeration-time can be used to disable HT.
752 */
753 else if (!memcmp(from, "maxcpus=", 8)) {
754 extern unsigned int maxcpus;
755
756 maxcpus = simple_strtoul(from + 8, NULL, 0);
757 }
758#endif
759
760#ifdef CONFIG_ACPI_BOOT
761 /* "acpi=off" disables both ACPI table parsing and interpreter */
762 else if (!memcmp(from, "acpi=off", 8)) {
763 disable_acpi();
764 }
765
766 /* acpi=force to over-ride black-list */
767 else if (!memcmp(from, "acpi=force", 10)) {
768 acpi_force = 1;
769 acpi_ht = 1;
770 acpi_disabled = 0;
771 }
772
773 /* acpi=strict disables out-of-spec workarounds */
774 else if (!memcmp(from, "acpi=strict", 11)) {
775 acpi_strict = 1;
776 }
777
778 /* Limit ACPI just to boot-time to enable HT */
779 else if (!memcmp(from, "acpi=ht", 7)) {
780 if (!acpi_force)
781 disable_acpi();
782 acpi_ht = 1;
783 }
784
785 /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
786 else if (!memcmp(from, "pci=noacpi", 10)) {
787 acpi_disable_pci();
788 }
789 /* "acpi=noirq" disables ACPI interrupt routing */
790 else if (!memcmp(from, "acpi=noirq", 10)) {
791 acpi_noirq_set();
792 }
793
794 else if (!memcmp(from, "acpi_sci=edge", 13))
795 acpi_sci_flags.trigger = 1;
796
797 else if (!memcmp(from, "acpi_sci=level", 14))
798 acpi_sci_flags.trigger = 3;
799
800 else if (!memcmp(from, "acpi_sci=high", 13))
801 acpi_sci_flags.polarity = 1;
802
803 else if (!memcmp(from, "acpi_sci=low", 12))
804 acpi_sci_flags.polarity = 3;
805
806#ifdef CONFIG_X86_IO_APIC
807 else if (!memcmp(from, "acpi_skip_timer_override", 24))
808 acpi_skip_timer_override = 1;
809#endif
810
811#ifdef CONFIG_X86_LOCAL_APIC
812 /* disable IO-APIC */
813 else if (!memcmp(from, "noapic", 6))
814 disable_ioapic_setup();
815#endif /* CONFIG_X86_LOCAL_APIC */
816#endif /* CONFIG_ACPI_BOOT */
817
818 /*
819 * highmem=size forces highmem to be exactly 'size' bytes.
820 * This works even on boxes that have no highmem otherwise.
821 * This also works to reduce highmem size on bigger boxes.
822 */
823 else if (!memcmp(from, "highmem=", 8))
824 highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
825
826 /*
827 * vmalloc=size forces the vmalloc area to be exactly 'size'
828 * bytes. This can be used to increase (or decrease) the
829 * vmalloc area - the default is 128m.
830 */
831 else if (!memcmp(from, "vmalloc=", 8))
832 __VMALLOC_RESERVE = memparse(from+8, &from);
833
834 next_char:
835 c = *(from++);
836 if (!c)
837 break;
838 if (COMMAND_LINE_SIZE <= ++len)
839 break;
840 *(to++) = c;
841 }
842 *to = '\0';
843 *cmdline_p = command_line;
844 if (userdef) {
845 printk(KERN_INFO "user-defined physical RAM map:\n");
846 print_memory_map("user");
847 }
848}
849
850/*
851 * Callback for efi_memory_walk.
852 */
853static int __init
854efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
855{
856 unsigned long *max_pfn = arg, pfn;
857
858 if (start < end) {
859 pfn = PFN_UP(end -1);
860 if (pfn > *max_pfn)
861 *max_pfn = pfn;
862 }
863 return 0;
864}
865
866
867/*
868 * Find the highest page frame number we have available
869 */
870void __init find_max_pfn(void)
871{
872 int i;
873
874 max_pfn = 0;
875 if (efi_enabled) {
876 efi_memmap_walk(efi_find_max_pfn, &max_pfn);
877 return;
878 }
879
880 for (i = 0; i < e820.nr_map; i++) {
881 unsigned long start, end;
882 /* RAM? */
883 if (e820.map[i].type != E820_RAM)
884 continue;
885 start = PFN_UP(e820.map[i].addr);
886 end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
887 if (start >= end)
888 continue;
889 if (end > max_pfn)
890 max_pfn = end;
891 }
892}
893
894/*
895 * Determine low and high memory ranges:
896 */
897unsigned long __init find_max_low_pfn(void)
898{
899 unsigned long max_low_pfn;
900
901 max_low_pfn = max_pfn;
902 if (max_low_pfn > MAXMEM_PFN) {
903 if (highmem_pages == -1)
904 highmem_pages = max_pfn - MAXMEM_PFN;
905 if (highmem_pages + MAXMEM_PFN < max_pfn)
906 max_pfn = MAXMEM_PFN + highmem_pages;
907 if (highmem_pages + MAXMEM_PFN > max_pfn) {
908 printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
909 highmem_pages = 0;
910 }
911 max_low_pfn = MAXMEM_PFN;
912#ifndef CONFIG_HIGHMEM
913 /* Maximum memory usable is what is directly addressable */
914 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
915 MAXMEM>>20);
916 if (max_pfn > MAX_NONPAE_PFN)
917 printk(KERN_WARNING "Use a PAE enabled kernel.\n");
918 else
919 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
920 max_pfn = MAXMEM_PFN;
921#else /* !CONFIG_HIGHMEM */
922#ifndef CONFIG_X86_PAE
923 if (max_pfn > MAX_NONPAE_PFN) {
924 max_pfn = MAX_NONPAE_PFN;
925 printk(KERN_WARNING "Warning only 4GB will be used.\n");
926 printk(KERN_WARNING "Use a PAE enabled kernel.\n");
927 }
928#endif /* !CONFIG_X86_PAE */
929#endif /* !CONFIG_HIGHMEM */
930 } else {
931 if (highmem_pages == -1)
932 highmem_pages = 0;
933#ifdef CONFIG_HIGHMEM
934 if (highmem_pages >= max_pfn) {
935 printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
936 highmem_pages = 0;
937 }
938 if (highmem_pages) {
939 if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
940 printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
941 highmem_pages = 0;
942 }
943 max_low_pfn -= highmem_pages;
944 }
945#else
946 if (highmem_pages)
947 printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
948#endif
949 }
950 return max_low_pfn;
951}
952
953/*
954 * Free all available memory for boot time allocation. Used
955 * as a callback function by efi_memory_walk()
956 */
957
958static int __init
959free_available_memory(unsigned long start, unsigned long end, void *arg)
960{
961 /* check max_low_pfn */
962 if (start >= ((max_low_pfn + 1) << PAGE_SHIFT))
963 return 0;
964 if (end >= ((max_low_pfn + 1) << PAGE_SHIFT))
965 end = (max_low_pfn + 1) << PAGE_SHIFT;
966 if (start < end)
967 free_bootmem(start, end - start);
968
969 return 0;
970}
971/*
972 * Register fully available low RAM pages with the bootmem allocator.
973 */
974static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
975{
976 int i;
977
978 if (efi_enabled) {
979 efi_memmap_walk(free_available_memory, NULL);
980 return;
981 }
982 for (i = 0; i < e820.nr_map; i++) {
983 unsigned long curr_pfn, last_pfn, size;
984 /*
985 * Reserve usable low memory
986 */
987 if (e820.map[i].type != E820_RAM)
988 continue;
989 /*
990 * We are rounding up the start address of usable memory:
991 */
992 curr_pfn = PFN_UP(e820.map[i].addr);
993 if (curr_pfn >= max_low_pfn)
994 continue;
995 /*
996 * ... and at the end of the usable range downwards:
997 */
998 last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
999
1000 if (last_pfn > max_low_pfn)
1001 last_pfn = max_low_pfn;
1002
1003 /*
1004 * .. finally, did all the rounding and playing
1005 * around just make the area go away?
1006 */
1007 if (last_pfn <= curr_pfn)
1008 continue;
1009
1010 size = last_pfn - curr_pfn;
1011 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
1012 }
1013}
1014
1015/*
1016 * workaround for Dell systems that neglect to reserve EBDA
1017 */
1018static void __init reserve_ebda_region(void)
1019{
1020 unsigned int addr;
1021 addr = get_bios_ebda();
1022 if (addr)
1023 reserve_bootmem(addr, PAGE_SIZE);
1024}
1025
05b79bdc 1026#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
1027void __init setup_bootmem_allocator(void);
1028static unsigned long __init setup_memory(void)
1029{
1030 /*
1031 * partially used pages are not usable - thus
1032 * we are rounding upwards:
1033 */
1034 min_low_pfn = PFN_UP(init_pg_tables_end);
1035
1036 find_max_pfn();
1037
1038 max_low_pfn = find_max_low_pfn();
1039
1040#ifdef CONFIG_HIGHMEM
1041 highstart_pfn = highend_pfn = max_pfn;
1042 if (max_pfn > max_low_pfn) {
1043 highstart_pfn = max_low_pfn;
1044 }
1045 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
1046 pages_to_mb(highend_pfn - highstart_pfn));
1047#endif
1048 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
1049 pages_to_mb(max_low_pfn));
1050
1051 setup_bootmem_allocator();
1052
1053 return max_low_pfn;
1054}
1055
1056void __init zone_sizes_init(void)
1057{
1058 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
1059 unsigned int max_dma, low;
1060
1061 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
1062 low = max_low_pfn;
1063
1064 if (low < max_dma)
1065 zones_size[ZONE_DMA] = low;
1066 else {
1067 zones_size[ZONE_DMA] = max_dma;
1068 zones_size[ZONE_NORMAL] = low - max_dma;
1069#ifdef CONFIG_HIGHMEM
1070 zones_size[ZONE_HIGHMEM] = highend_pfn - low;
1071#endif
1072 }
1073 free_area_init(zones_size);
1074}
1075#else
05b79bdc 1076extern unsigned long __init setup_memory(void);
1da177e4 1077extern void zone_sizes_init(void);
05b79bdc 1078#endif /* !CONFIG_NEED_MULTIPLE_NODES */
1da177e4
LT
1079
1080void __init setup_bootmem_allocator(void)
1081{
1082 unsigned long bootmap_size;
1083 /*
1084 * Initialize the boot-time allocator (with low memory only):
1085 */
1086 bootmap_size = init_bootmem(min_low_pfn, max_low_pfn);
1087
1088 register_bootmem_low_pages(max_low_pfn);
1089
1090 /*
1091 * Reserve the bootmem bitmap itself as well. We do this in two
1092 * steps (first step was init_bootmem()) because this catches
1093 * the (very unlikely) case of us accidentally initializing the
1094 * bootmem allocator with an invalid RAM area.
1095 */
1096 reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(min_low_pfn) +
1097 bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
1098
1099 /*
1100 * reserve physical page 0 - it's a special BIOS page on many boxes,
1101 * enabling clean reboots, SMP operation, laptop functions.
1102 */
1103 reserve_bootmem(0, PAGE_SIZE);
1104
1105 /* reserve EBDA region, it's a 4K region */
1106 reserve_ebda_region();
1107
1108 /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent
1109 PCI prefetch into it (errata #56). Usually the page is reserved anyways,
1110 unless you have no PS/2 mouse plugged in. */
1111 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
1112 boot_cpu_data.x86 == 6)
1113 reserve_bootmem(0xa0000 - 4096, 4096);
1114
1115#ifdef CONFIG_SMP
1116 /*
1117 * But first pinch a few for the stack/trampoline stuff
1118 * FIXME: Don't need the extra page at 4K, but need to fix
1119 * trampoline before removing it. (see the GDT stuff)
1120 */
1121 reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
1122#endif
1123#ifdef CONFIG_ACPI_SLEEP
1124 /*
1125 * Reserve low memory region for sleep support.
1126 */
1127 acpi_reserve_bootmem();
1128#endif
1129#ifdef CONFIG_X86_FIND_SMP_CONFIG
1130 /*
1131 * Find and reserve possible boot-time SMP configuration:
1132 */
1133 find_smp_config();
1134#endif
1135
1136#ifdef CONFIG_BLK_DEV_INITRD
1137 if (LOADER_TYPE && INITRD_START) {
1138 if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
1139 reserve_bootmem(INITRD_START, INITRD_SIZE);
1140 initrd_start =
1141 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
1142 initrd_end = initrd_start+INITRD_SIZE;
1143 }
1144 else {
1145 printk(KERN_ERR "initrd extends beyond end of memory "
1146 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
1147 INITRD_START + INITRD_SIZE,
1148 max_low_pfn << PAGE_SHIFT);
1149 initrd_start = 0;
1150 }
1151 }
1152#endif
1153}
1154
1155/*
1156 * The node 0 pgdat is initialized before all of these because
1157 * it's needed for bootmem. node>0 pgdats have their virtual
1158 * space allocated before the pagetables are in place to access
1159 * them, so they can't be cleared then.
1160 *
1161 * This should all compile down to nothing when NUMA is off.
1162 */
1163void __init remapped_pgdat_init(void)
1164{
1165 int nid;
1166
1167 for_each_online_node(nid) {
1168 if (nid != 0)
1169 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
1170 }
1171}
1172
1173/*
1174 * Request address space for all standard RAM and ROM resources
1175 * and also for regions reported as reserved by the e820.
1176 */
1177static void __init
1178legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
1179{
1180 int i;
1181
1182 probe_roms();
1183 for (i = 0; i < e820.nr_map; i++) {
1184 struct resource *res;
1185 if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
1186 continue;
1187 res = alloc_bootmem_low(sizeof(struct resource));
1188 switch (e820.map[i].type) {
1189 case E820_RAM: res->name = "System RAM"; break;
1190 case E820_ACPI: res->name = "ACPI Tables"; break;
1191 case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
1192 default: res->name = "reserved";
1193 }
1194 res->start = e820.map[i].addr;
1195 res->end = res->start + e820.map[i].size - 1;
1196 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
1197 request_resource(&iomem_resource, res);
1198 if (e820.map[i].type == E820_RAM) {
1199 /*
1200 * We don't know which RAM region contains kernel data,
1201 * so we try it repeatedly and let the resource manager
1202 * test it.
1203 */
1204 request_resource(res, code_resource);
1205 request_resource(res, data_resource);
1206 }
1207 }
1208}
1209
1210/*
1211 * Request address space for all standard resources
1212 */
1213static void __init register_memory(void)
1214{
1215 unsigned long gapstart, gapsize;
1216 unsigned long long last;
1217 int i;
1218
1219 if (efi_enabled)
1220 efi_initialize_iomem_resources(&code_resource, &data_resource);
1221 else
1222 legacy_init_iomem_resources(&code_resource, &data_resource);
1223
1224 /* EFI systems may still have VGA */
1225 request_resource(&iomem_resource, &video_ram_resource);
1226
1227 /* request I/O space for devices used on all i[345]86 PCs */
1228 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
1229 request_resource(&ioport_resource, &standard_io_resources[i]);
1230
1231 /*
1232 * Search for the bigest gap in the low 32 bits of the e820
1233 * memory space.
1234 */
1235 last = 0x100000000ull;
1236 gapstart = 0x10000000;
1237 gapsize = 0x400000;
1238 i = e820.nr_map;
1239 while (--i >= 0) {
1240 unsigned long long start = e820.map[i].addr;
1241 unsigned long long end = start + e820.map[i].size;
1242
1243 /*
1244 * Since "last" is at most 4GB, we know we'll
1245 * fit in 32 bits if this condition is true
1246 */
1247 if (last > end) {
1248 unsigned long gap = last - end;
1249
1250 if (gap > gapsize) {
1251 gapsize = gap;
1252 gapstart = end;
1253 }
1254 }
1255 if (start < last)
1256 last = start;
1257 }
1258
1259 /*
1260 * Start allocating dynamic PCI memory a bit into the gap,
1261 * aligned up to the nearest megabyte.
1262 *
1263 * Question: should we try to pad it up a bit (do something
1264 * like " + (gapsize >> 3)" in there too?). We now have the
1265 * technology.
1266 */
1267 pci_mem_start = (gapstart + 0xfffff) & ~0xfffff;
1268
1269 printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
1270 pci_mem_start, gapstart, gapsize);
1271}
1272
1273/* Use inline assembly to define this because the nops are defined
1274 as inline assembly strings in the include files and we cannot
1275 get them easily into strings. */
1276asm("\t.data\nintelnops: "
1277 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
1278 GENERIC_NOP7 GENERIC_NOP8);
1279asm("\t.data\nk8nops: "
1280 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
1281 K8_NOP7 K8_NOP8);
1282asm("\t.data\nk7nops: "
1283 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
1284 K7_NOP7 K7_NOP8);
1285
1286extern unsigned char intelnops[], k8nops[], k7nops[];
1287static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
1288 NULL,
1289 intelnops,
1290 intelnops + 1,
1291 intelnops + 1 + 2,
1292 intelnops + 1 + 2 + 3,
1293 intelnops + 1 + 2 + 3 + 4,
1294 intelnops + 1 + 2 + 3 + 4 + 5,
1295 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
1296 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1297};
1298static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
1299 NULL,
1300 k8nops,
1301 k8nops + 1,
1302 k8nops + 1 + 2,
1303 k8nops + 1 + 2 + 3,
1304 k8nops + 1 + 2 + 3 + 4,
1305 k8nops + 1 + 2 + 3 + 4 + 5,
1306 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
1307 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1308};
1309static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
1310 NULL,
1311 k7nops,
1312 k7nops + 1,
1313 k7nops + 1 + 2,
1314 k7nops + 1 + 2 + 3,
1315 k7nops + 1 + 2 + 3 + 4,
1316 k7nops + 1 + 2 + 3 + 4 + 5,
1317 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
1318 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1319};
1320static struct nop {
1321 int cpuid;
1322 unsigned char **noptable;
1323} noptypes[] = {
1324 { X86_FEATURE_K8, k8_nops },
1325 { X86_FEATURE_K7, k7_nops },
1326 { -1, NULL }
1327};
1328
1329/* Replace instructions with better alternatives for this CPU type.
1330
1331 This runs before SMP is initialized to avoid SMP problems with
1332 self modifying code. This implies that assymetric systems where
1333 APs have less capabilities than the boot processor are not handled.
1334 In this case boot with "noreplacement". */
1335void apply_alternatives(void *start, void *end)
1336{
1337 struct alt_instr *a;
1338 int diff, i, k;
1339 unsigned char **noptable = intel_nops;
1340 for (i = 0; noptypes[i].cpuid >= 0; i++) {
1341 if (boot_cpu_has(noptypes[i].cpuid)) {
1342 noptable = noptypes[i].noptable;
1343 break;
1344 }
1345 }
1346 for (a = start; (void *)a < end; a++) {
1347 if (!boot_cpu_has(a->cpuid))
1348 continue;
1349 BUG_ON(a->replacementlen > a->instrlen);
1350 memcpy(a->instr, a->replacement, a->replacementlen);
1351 diff = a->instrlen - a->replacementlen;
1352 /* Pad the rest with nops */
1353 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
1354 k = diff;
1355 if (k > ASM_NOP_MAX)
1356 k = ASM_NOP_MAX;
1357 memcpy(a->instr + i, noptable[k], k);
1358 }
1359 }
1360}
1361
1362static int no_replacement __initdata = 0;
1363
1364void __init alternative_instructions(void)
1365{
1366 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
1367 if (no_replacement)
1368 return;
1369 apply_alternatives(__alt_instructions, __alt_instructions_end);
1370}
1371
1372static int __init noreplacement_setup(char *s)
1373{
1374 no_replacement = 1;
1375 return 0;
1376}
1377
1378__setup("noreplacement", noreplacement_setup);
1379
1380static char * __init machine_specific_memory_setup(void);
1381
1382#ifdef CONFIG_MCA
1383static void set_mca_bus(int x)
1384{
1385 MCA_bus = x;
1386}
1387#else
1388static void set_mca_bus(int x) { }
1389#endif
1390
1391/*
1392 * Determine if we were loaded by an EFI loader. If so, then we have also been
1393 * passed the efi memmap, systab, etc., so we should use these data structures
1394 * for initialization. Note, the efi init code path is determined by the
1395 * global efi_enabled. This allows the same kernel image to be used on existing
1396 * systems (with a traditional BIOS) as well as on EFI systems.
1397 */
1398void __init setup_arch(char **cmdline_p)
1399{
1400 unsigned long max_low_pfn;
1401
1402 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
1403 pre_setup_arch_hook();
1404 early_cpu_init();
1405
1406 /*
1407 * FIXME: This isn't an official loader_type right
1408 * now but does currently work with elilo.
1409 * If we were configured as an EFI kernel, check to make
1410 * sure that we were loaded correctly from elilo and that
1411 * the system table is valid. If not, then initialize normally.
1412 */
1413#ifdef CONFIG_EFI
1414 if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
1415 efi_enabled = 1;
1416#endif
1417
1418 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
1419 drive_info = DRIVE_INFO;
1420 screen_info = SCREEN_INFO;
1421 edid_info = EDID_INFO;
1422 apm_info.bios = APM_BIOS_INFO;
1423 ist_info = IST_INFO;
1424 saved_videomode = VIDEO_MODE;
1425 if( SYS_DESC_TABLE.length != 0 ) {
1426 set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
1427 machine_id = SYS_DESC_TABLE.table[0];
1428 machine_submodel_id = SYS_DESC_TABLE.table[1];
1429 BIOS_revision = SYS_DESC_TABLE.table[2];
1430 }
1431 bootloader_type = LOADER_TYPE;
1432
1433#ifdef CONFIG_BLK_DEV_RAM
1434 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
1435 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
1436 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
1437#endif
1438 ARCH_SETUP
1439 if (efi_enabled)
1440 efi_init();
1441 else {
1442 printk(KERN_INFO "BIOS-provided physical RAM map:\n");
1443 print_memory_map(machine_specific_memory_setup());
1444 }
1445
1446 copy_edd();
1447
1448 if (!MOUNT_ROOT_RDONLY)
1449 root_mountflags &= ~MS_RDONLY;
1450 init_mm.start_code = (unsigned long) _text;
1451 init_mm.end_code = (unsigned long) _etext;
1452 init_mm.end_data = (unsigned long) _edata;
1453 init_mm.brk = init_pg_tables_end + PAGE_OFFSET;
1454
1455 code_resource.start = virt_to_phys(_text);
1456 code_resource.end = virt_to_phys(_etext)-1;
1457 data_resource.start = virt_to_phys(_etext);
1458 data_resource.end = virt_to_phys(_edata)-1;
1459
1460 parse_cmdline_early(cmdline_p);
1461
1462 max_low_pfn = setup_memory();
1463
1464 /*
1465 * NOTE: before this point _nobody_ is allowed to allocate
1466 * any memory using the bootmem allocator. Although the
1467 * alloctor is now initialised only the first 8Mb of the kernel
1468 * virtual address space has been mapped. All allocations before
1469 * paging_init() has completed must use the alloc_bootmem_low_pages()
1470 * variant (which allocates DMA'able memory) and care must be taken
1471 * not to exceed the 8Mb limit.
1472 */
1473
1474#ifdef CONFIG_SMP
1475 smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
1476#endif
1477 paging_init();
1478 remapped_pgdat_init();
05b79bdc 1479 sparse_init();
1da177e4
LT
1480 zone_sizes_init();
1481
1482 /*
1483 * NOTE: at this point the bootmem allocator is fully available.
1484 */
1485
1486#ifdef CONFIG_EARLY_PRINTK
1487 {
1488 char *s = strstr(*cmdline_p, "earlyprintk=");
1489 if (s) {
1490 extern void setup_early_printk(char *);
1491
1492 setup_early_printk(s);
1493 printk("early console enabled\n");
1494 }
1495 }
1496#endif
1497
1498
1499 dmi_scan_machine();
1500
1501#ifdef CONFIG_X86_GENERICARCH
1502 generic_apic_probe(*cmdline_p);
1503#endif
1504 if (efi_enabled)
1505 efi_map_memmap();
1506
adaa765d 1507#ifdef CONFIG_ACPI_BOOT
1da177e4
LT
1508 /*
1509 * Parse the ACPI tables for possible boot-time SMP configuration.
1510 */
1511 acpi_boot_table_init();
1512 acpi_boot_init();
adaa765d 1513#endif
1da177e4
LT
1514
1515#ifdef CONFIG_X86_LOCAL_APIC
1516 if (smp_found_config)
1517 get_smp_config();
1518#endif
1519
1520 register_memory();
1521
1522#ifdef CONFIG_VT
1523#if defined(CONFIG_VGA_CONSOLE)
1524 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1525 conswitchp = &vga_con;
1526#elif defined(CONFIG_DUMMY_CONSOLE)
1527 conswitchp = &dummy_con;
1528#endif
1529#endif
1530}
1531
1532#include "setup_arch_post.h"
1533/*
1534 * Local Variables:
1535 * mode:c
1536 * c-file-style:"k&r"
1537 * c-basic-offset:8
1538 * End:
1539 */