x86, realmode: Move kernel/realmode.c to realmode/init.c
[linux-2.6-block.git] / arch / x86 / realmode / init.c
CommitLineData
084ee1c6
JS
1#include <linux/io.h>
2#include <linux/memblock.h>
3
4#include <asm/cacheflush.h>
5#include <asm/pgtable.h>
6#include <asm/realmode.h>
7
b429dbf6 8struct real_mode_header *real_mode_header;
cda846f1 9u32 *trampoline_cr4_features;
084ee1c6
JS
10
11void __init setup_real_mode(void)
12{
13 phys_addr_t mem;
14 u16 real_mode_seg;
15 u32 *rel;
16 u32 count;
17 u32 *ptr;
18 u16 *seg;
19 int i;
b429dbf6 20 unsigned char *base;
f37240f1 21 struct trampoline_header *trampoline_header;
b429dbf6 22 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
f37240f1
JS
23#ifdef CONFIG_X86_64
24 u64 *trampoline_pgd;
79603879 25 u32 efer_low, efer_high;
f37240f1 26#endif
084ee1c6
JS
27
28 /* Has to be in very low memory so we can execute real-mode AP code. */
29 mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
30 if (!mem)
31 panic("Cannot allocate trampoline\n");
32
b429dbf6 33 base = __va(mem);
084ee1c6 34 memblock_reserve(mem, size);
b429dbf6 35 real_mode_header = (struct real_mode_header *) base;
084ee1c6 36 printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
b429dbf6 37 base, (unsigned long long)mem, size);
084ee1c6 38
b429dbf6 39 memcpy(base, real_mode_blob, size);
084ee1c6 40
b429dbf6 41 real_mode_seg = __pa(base) >> 4;
084ee1c6
JS
42 rel = (u32 *) real_mode_relocs;
43
44 /* 16-bit segment relocations. */
45 count = rel[0];
46 rel = &rel[1];
47 for (i = 0; i < count; i++) {
b429dbf6 48 seg = (u16 *) (base + rel[i]);
084ee1c6
JS
49 *seg = real_mode_seg;
50 }
51
52 /* 32-bit linear relocations. */
53 count = rel[i];
54 rel = &rel[i + 1];
55 for (i = 0; i < count; i++) {
b429dbf6
JS
56 ptr = (u32 *) (base + rel[i]);
57 *ptr += __pa(base);
084ee1c6
JS
58 }
59
f37240f1
JS
60 /* Must be perfomed *after* relocation. */
61 trampoline_header = (struct trampoline_header *)
62 __va(real_mode_header->trampoline_header);
63
48927bbb 64#ifdef CONFIG_X86_32
f37240f1
JS
65 trampoline_header->start = __pa(startup_32_smp);
66 trampoline_header->gdt_limit = __BOOT_DS + 7;
67 trampoline_header->gdt_base = __pa(boot_gdt);
48927bbb 68#else
79603879
PA
69 /*
70 * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
71 * so we need to mask it out.
72 */
73 rdmsr(MSR_EFER, efer_low, efer_high);
74 trampoline_header->efer_low = efer_low & ~EFER_LMA;
75 trampoline_header->efer_high = efer_high;
cda846f1 76
f37240f1 77 trampoline_header->start = (u64) secondary_startup_64;
cda846f1
JS
78 trampoline_cr4_features = &trampoline_header->cr4;
79 *trampoline_cr4_features = read_cr4();
80
f37240f1
JS
81 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
82 trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
83 trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE;
48927bbb 84#endif
084ee1c6
JS
85}
86
87/*
88 * set_real_mode_permissions() gets called very early, to guarantee the
89 * availability of low memory. This is before the proper kernel page
90 * tables are set up, so we cannot set page permissions in that
91 * function. Thus, we use an arch_initcall instead.
92 */
93static int __init set_real_mode_permissions(void)
94{
b429dbf6
JS
95 unsigned char *base = (unsigned char *) real_mode_header;
96 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
084ee1c6 97
f156ffc4 98 size_t ro_size =
b429dbf6
JS
99 PAGE_ALIGN(real_mode_header->ro_end) -
100 __pa(base);
f156ffc4
JS
101
102 size_t text_size =
b429dbf6
JS
103 PAGE_ALIGN(real_mode_header->ro_end) -
104 real_mode_header->text_start;
f156ffc4
JS
105
106 unsigned long text_start =
b429dbf6 107 (unsigned long) __va(real_mode_header->text_start);
f156ffc4 108
b429dbf6
JS
109 set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
110 set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
f156ffc4
JS
111 set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
112
084ee1c6
JS
113 return 0;
114}
115
116arch_initcall(set_real_mode_permissions);