x86: remove pointless comments
[linux-2.6-block.git] / arch / x86 / mm / init_32.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 *
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 */
7
1da177e4
LT
8#include <linux/module.h>
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/hugetlb.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/highmem.h>
23#include <linux/pagemap.h>
6fb14755 24#include <linux/pfn.h>
c9cf5528 25#include <linux/poison.h>
1da177e4
LT
26#include <linux/bootmem.h>
27#include <linux/slab.h>
28#include <linux/proc_fs.h>
05039b92 29#include <linux/memory_hotplug.h>
27d99f7e 30#include <linux/initrd.h>
55b2355e 31#include <linux/cpumask.h>
1da177e4 32
f832ff18 33#include <asm/asm.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <asm/system.h>
36#include <asm/uaccess.h>
37#include <asm/pgtable.h>
38#include <asm/dma.h>
39#include <asm/fixmap.h>
40#include <asm/e820.h>
41#include <asm/apic.h>
8550eb99 42#include <asm/bugs.h>
1da177e4
LT
43#include <asm/tlb.h>
44#include <asm/tlbflush.h>
a5a19c63 45#include <asm/pgalloc.h>
1da177e4 46#include <asm/sections.h>
b239fb25 47#include <asm/paravirt.h>
551889a6 48#include <asm/setup.h>
7bfeab9a 49#include <asm/cacheflush.h>
1da177e4
LT
50
51unsigned int __VMALLOC_RESERVE = 128 << 20;
52
67794292 53unsigned long max_pfn_mapped;
7d1116a9 54
1da177e4
LT
55DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
56unsigned long highstart_pfn, highend_pfn;
57
8550eb99 58static noinline int do_test_wp_bit(void);
1da177e4
LT
59
60/*
61 * Creates a middle page table and puts a pointer to it in the
62 * given global directory entry. This only returns the gd entry
63 * in non-PAE compilation mode, since the middle layer is folded.
64 */
65static pmd_t * __init one_md_table_init(pgd_t *pgd)
66{
67 pud_t *pud;
68 pmd_t *pmd_table;
8550eb99 69
1da177e4 70#ifdef CONFIG_X86_PAE
b239fb25
JF
71 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
72 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
73
6c435456 74 paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
b239fb25
JF
75 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
76 pud = pud_offset(pgd, 0);
8550eb99 77 BUG_ON(pmd_table != pmd_offset(pud, 0));
b239fb25
JF
78 }
79#endif
1da177e4
LT
80 pud = pud_offset(pgd, 0);
81 pmd_table = pmd_offset(pud, 0);
8550eb99 82
1da177e4
LT
83 return pmd_table;
84}
85
86/*
87 * Create a page table and place a pointer to it in a middle page
8550eb99 88 * directory entry:
1da177e4
LT
89 */
90static pte_t * __init one_page_table_init(pmd_t *pmd)
91{
b239fb25 92 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
509a80c4
IM
93 pte_t *page_table = NULL;
94
95#ifdef CONFIG_DEBUG_PAGEALLOC
96 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
97#endif
8550eb99 98 if (!page_table) {
509a80c4
IM
99 page_table =
100 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
8550eb99 101 }
b239fb25 102
fdb4c338 103 paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
1da177e4 104 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
b239fb25 105 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
1da177e4 106 }
509a80c4 107
1da177e4
LT
108 return pte_offset_kernel(pmd, 0);
109}
110
111/*
8550eb99 112 * This function initializes a certain range of kernel virtual memory
1da177e4
LT
113 * with new bootmem page tables, everywhere page tables are missing in
114 * the given range.
8550eb99
IM
115 *
116 * NOTE: The pagetables are allocated contiguous on the physical space
117 * so we can cache the place of the first one and move around without
1da177e4
LT
118 * checking the pgd every time.
119 */
8550eb99
IM
120static void __init
121page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
1da177e4 122{
1da177e4
LT
123 int pgd_idx, pmd_idx;
124 unsigned long vaddr;
8550eb99
IM
125 pgd_t *pgd;
126 pmd_t *pmd;
1da177e4
LT
127
128 vaddr = start;
129 pgd_idx = pgd_index(vaddr);
130 pmd_idx = pmd_index(vaddr);
131 pgd = pgd_base + pgd_idx;
132
133 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
b239fb25
JF
134 pmd = one_md_table_init(pgd);
135 pmd = pmd + pmd_index(vaddr);
8550eb99
IM
136 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
137 pmd++, pmd_idx++) {
b239fb25 138 one_page_table_init(pmd);
1da177e4
LT
139
140 vaddr += PMD_SIZE;
141 }
142 pmd_idx = 0;
143 }
144}
145
146static inline int is_kernel_text(unsigned long addr)
147{
148 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
149 return 1;
150 return 0;
151}
152
153/*
8550eb99
IM
154 * This maps the physical memory to kernel virtual address space, a total
155 * of max_low_pfn pages, by creating page tables starting from address
156 * PAGE_OFFSET:
1da177e4
LT
157 */
158static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
159{
8550eb99 160 int pgd_idx, pmd_idx, pte_ofs;
1da177e4
LT
161 unsigned long pfn;
162 pgd_t *pgd;
163 pmd_t *pmd;
164 pte_t *pte;
1da177e4
LT
165
166 pgd_idx = pgd_index(PAGE_OFFSET);
167 pgd = pgd_base + pgd_idx;
168 pfn = 0;
169
170 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
171 pmd = one_md_table_init(pgd);
172 if (pfn >= max_low_pfn)
173 continue;
8550eb99 174
f3f20de8
JF
175 for (pmd_idx = 0;
176 pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
177 pmd++, pmd_idx++) {
8550eb99 178 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
1da177e4 179
8550eb99
IM
180 /*
181 * Map with big pages if possible, otherwise
182 * create normal page tables:
f5c24a7f
AK
183 *
184 * Don't use a large page for the first 2/4MB of memory
185 * because there are often fixed size MTRRs in there
186 * and overlapping MTRRs into large pages can cause
187 * slowdowns.
8550eb99 188 */
f5c24a7f 189 if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) {
8550eb99 190 unsigned int addr2;
f3f20de8
JF
191 pgprot_t prot = PAGE_KERNEL_LARGE;
192
8550eb99 193 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
f3f20de8
JF
194 PAGE_OFFSET + PAGE_SIZE-1;
195
8550eb99
IM
196 if (is_kernel_text(addr) ||
197 is_kernel_text(addr2))
f3f20de8
JF
198 prot = PAGE_KERNEL_LARGE_EXEC;
199
200 set_pmd(pmd, pfn_pmd(pfn, prot));
b239fb25 201
1da177e4 202 pfn += PTRS_PER_PTE;
67794292 203 max_pfn_mapped = pfn;
8550eb99
IM
204 continue;
205 }
206 pte = one_page_table_init(pmd);
1da177e4 207
8550eb99
IM
208 for (pte_ofs = 0;
209 pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
210 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
211 pgprot_t prot = PAGE_KERNEL;
f3f20de8 212
8550eb99
IM
213 if (is_kernel_text(addr))
214 prot = PAGE_KERNEL_EXEC;
f3f20de8 215
8550eb99 216 set_pte(pte, pfn_pte(pfn, prot));
1da177e4 217 }
67794292 218 max_pfn_mapped = pfn;
1da177e4
LT
219 }
220 }
221}
222
223static inline int page_kills_ppro(unsigned long pagenr)
224{
225 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
226 return 1;
227 return 0;
228}
229
1da177e4
LT
230#ifdef CONFIG_HIGHMEM
231pte_t *kmap_pte;
232pgprot_t kmap_prot;
233
8550eb99
IM
234static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
235{
236 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
237 vaddr), vaddr), vaddr);
238}
1da177e4
LT
239
240static void __init kmap_init(void)
241{
242 unsigned long kmap_vstart;
243
8550eb99
IM
244 /*
245 * Cache the first kmap pte:
246 */
1da177e4
LT
247 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
248 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
249
250 kmap_prot = PAGE_KERNEL;
251}
252
253static void __init permanent_kmaps_init(pgd_t *pgd_base)
254{
8550eb99 255 unsigned long vaddr;
1da177e4
LT
256 pgd_t *pgd;
257 pud_t *pud;
258 pmd_t *pmd;
259 pte_t *pte;
1da177e4
LT
260
261 vaddr = PKMAP_BASE;
262 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
263
264 pgd = swapper_pg_dir + pgd_index(vaddr);
265 pud = pud_offset(pgd, vaddr);
266 pmd = pmd_offset(pud, vaddr);
267 pte = pte_offset_kernel(pmd, vaddr);
8550eb99 268 pkmap_page_table = pte;
1da177e4
LT
269}
270
c09b4240 271static void __meminit free_new_highpage(struct page *page)
05039b92 272{
7835e98b 273 init_page_count(page);
05039b92
DH
274 __free_page(page);
275 totalhigh_pages++;
276}
277
278void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
1da177e4
LT
279{
280 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
281 ClearPageReserved(page);
05039b92 282 free_new_highpage(page);
1da177e4
LT
283 } else
284 SetPageReserved(page);
285}
286
8550eb99
IM
287static int __meminit
288add_one_highpage_hotplug(struct page *page, unsigned long pfn)
05039b92
DH
289{
290 free_new_highpage(page);
291 totalram_pages++;
292#ifdef CONFIG_FLATMEM
293 max_mapnr = max(pfn, max_mapnr);
294#endif
295 num_physpages++;
8550eb99 296
05039b92
DH
297 return 0;
298}
299
300/*
301 * Not currently handling the NUMA case.
302 * Assuming single node and all memory that
303 * has been added dynamically that would be
8550eb99 304 * onlined here is in HIGHMEM.
05039b92 305 */
0e0be25d 306void __meminit online_page(struct page *page)
05039b92
DH
307{
308 ClearPageReserved(page);
309 add_one_highpage_hotplug(page, page_to_pfn(page));
310}
311
8550eb99 312#ifndef CONFIG_NUMA
1da177e4
LT
313static void __init set_highmem_pages_init(int bad_ppro)
314{
315 int pfn;
8550eb99 316
23be8c7d
IM
317 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
318 /*
319 * Holes under sparsemem might not have no mem_map[]:
320 */
321 if (pfn_valid(pfn))
322 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
323 }
1da177e4
LT
324 totalram_pages += totalhigh_pages;
325}
8550eb99 326#endif /* !CONFIG_NUMA */
1da177e4
LT
327
328#else
8550eb99
IM
329# define kmap_init() do { } while (0)
330# define permanent_kmaps_init(pgd_base) do { } while (0)
331# define set_highmem_pages_init(bad_ppro) do { } while (0)
1da177e4
LT
332#endif /* CONFIG_HIGHMEM */
333
c93c82bb 334pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
129f6946 335EXPORT_SYMBOL(__PAGE_KERNEL);
1da177e4 336
8550eb99 337pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
1da177e4 338
b239fb25 339void __init native_pagetable_setup_start(pgd_t *base)
1da177e4 340{
551889a6
IC
341 unsigned long pfn, va;
342 pgd_t *pgd;
343 pud_t *pud;
344 pmd_t *pmd;
345 pte_t *pte;
b239fb25
JF
346
347 /*
551889a6
IC
348 * Remove any mappings which extend past the end of physical
349 * memory from the boot time page table:
b239fb25 350 */
551889a6
IC
351 for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
352 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
353 pgd = base + pgd_index(va);
354 if (!pgd_present(*pgd))
355 break;
356
357 pud = pud_offset(pgd, va);
358 pmd = pmd_offset(pud, va);
359 if (!pmd_present(*pmd))
360 break;
361
362 pte = pte_offset_kernel(pmd, va);
363 if (!pte_present(*pte))
364 break;
365
366 pte_clear(NULL, va, pte);
367 }
6c435456 368 paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT);
b239fb25
JF
369}
370
371void __init native_pagetable_setup_done(pgd_t *base)
372{
b239fb25
JF
373}
374
375/*
376 * Build a proper pagetable for the kernel mappings. Up until this
377 * point, we've been running on some set of pagetables constructed by
378 * the boot process.
379 *
380 * If we're booting on native hardware, this will be a pagetable
551889a6
IC
381 * constructed in arch/x86/kernel/head_32.S. The root of the
382 * pagetable will be swapper_pg_dir.
b239fb25
JF
383 *
384 * If we're booting paravirtualized under a hypervisor, then there are
385 * more options: we may already be running PAE, and the pagetable may
386 * or may not be based in swapper_pg_dir. In any case,
387 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
388 * appropriately for the rest of the initialization to work.
389 *
390 * In general, pagetable_init() assumes that the pagetable may already
391 * be partially populated, and so it avoids stomping on any existing
392 * mappings.
393 */
8550eb99 394static void __init pagetable_init(void)
b239fb25 395{
b239fb25 396 pgd_t *pgd_base = swapper_pg_dir;
8550eb99 397 unsigned long vaddr, end;
b239fb25
JF
398
399 paravirt_pagetable_setup_start(pgd_base);
1da177e4
LT
400
401 /* Enable PSE if available */
b239fb25 402 if (cpu_has_pse)
1da177e4 403 set_in_cr4(X86_CR4_PSE);
1da177e4
LT
404
405 /* Enable PGE if available */
406 if (cpu_has_pge) {
407 set_in_cr4(X86_CR4_PGE);
408 __PAGE_KERNEL |= _PAGE_GLOBAL;
409 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
410 }
411
412 kernel_physical_mapping_init(pgd_base);
413 remap_numa_kva();
414
415 /*
416 * Fixed mappings, only the page table structure has to be
417 * created - mappings will be set by set_fixmap():
418 */
beacfaac 419 early_ioremap_clear();
1da177e4 420 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
b239fb25
JF
421 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
422 page_table_range_init(vaddr, end, pgd_base);
beacfaac 423 early_ioremap_reset();
1da177e4
LT
424
425 permanent_kmaps_init(pgd_base);
426
b239fb25 427 paravirt_pagetable_setup_done(pgd_base);
1da177e4
LT
428}
429
a6eb84bc 430#ifdef CONFIG_ACPI_SLEEP
1da177e4 431/*
a6eb84bc 432 * ACPI suspend needs this for resume, because things like the intel-agp
1da177e4
LT
433 * driver might have split up a kernel 4MB mapping.
434 */
a6eb84bc 435char swsusp_pg_dir[PAGE_SIZE]
8550eb99 436 __attribute__ ((aligned(PAGE_SIZE)));
1da177e4
LT
437
438static inline void save_pg_dir(void)
439{
440 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
441}
a6eb84bc 442#else /* !CONFIG_ACPI_SLEEP */
1da177e4
LT
443static inline void save_pg_dir(void)
444{
445}
a6eb84bc 446#endif /* !CONFIG_ACPI_SLEEP */
1da177e4 447
8550eb99 448void zap_low_mappings(void)
1da177e4
LT
449{
450 int i;
451
452 save_pg_dir();
453
454 /*
455 * Zap initial low-memory mappings.
456 *
457 * Note that "pgd_clear()" doesn't do it for
458 * us, because pgd_clear() is a no-op on i386.
459 */
8550eb99 460 for (i = 0; i < USER_PTRS_PER_PGD; i++) {
1da177e4
LT
461#ifdef CONFIG_X86_PAE
462 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
463#else
464 set_pgd(swapper_pg_dir+i, __pgd(0));
465#endif
8550eb99 466 }
1da177e4
LT
467 flush_tlb_all();
468}
469
8550eb99 470int nx_enabled;
d5321abe 471
6fdc05d4
JF
472pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
473EXPORT_SYMBOL_GPL(__supported_pte_mask);
474
d5321abe
JB
475#ifdef CONFIG_X86_PAE
476
8550eb99 477static int disable_nx __initdata;
1da177e4
LT
478
479/*
480 * noexec = on|off
481 *
482 * Control non executable mappings.
483 *
484 * on Enable
485 * off Disable
486 */
1a3f239d 487static int __init noexec_setup(char *str)
1da177e4 488{
1a3f239d
RR
489 if (!str || !strcmp(str, "on")) {
490 if (cpu_has_nx) {
491 __supported_pte_mask |= _PAGE_NX;
492 disable_nx = 0;
493 }
8550eb99
IM
494 } else {
495 if (!strcmp(str, "off")) {
496 disable_nx = 1;
497 __supported_pte_mask &= ~_PAGE_NX;
498 } else {
499 return -EINVAL;
500 }
501 }
1a3f239d
RR
502
503 return 0;
1da177e4 504}
1a3f239d 505early_param("noexec", noexec_setup);
1da177e4 506
1da177e4
LT
507static void __init set_nx(void)
508{
509 unsigned int v[4], l, h;
510
511 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
512 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
8550eb99 513
1da177e4
LT
514 if ((v[3] & (1 << 20)) && !disable_nx) {
515 rdmsr(MSR_EFER, l, h);
516 l |= EFER_NX;
517 wrmsr(MSR_EFER, l, h);
518 nx_enabled = 1;
519 __supported_pte_mask |= _PAGE_NX;
520 }
521 }
522}
1da177e4
LT
523#endif
524
525/*
526 * paging_init() sets up the page tables - note that the first 8MB are
527 * already mapped by head.S.
528 *
529 * This routines also unmaps the page at virtual kernel address 0, so
530 * that we can trap those pesky NULL-reference errors in the kernel.
531 */
532void __init paging_init(void)
533{
534#ifdef CONFIG_X86_PAE
535 set_nx();
536 if (nx_enabled)
d7d119d7 537 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
1da177e4 538#endif
1da177e4
LT
539 pagetable_init();
540
541 load_cr3(swapper_pg_dir);
542
1da177e4
LT
543 __flush_tlb_all();
544
545 kmap_init();
546}
547
548/*
549 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
550 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
551 * used to involve black magic jumps to work around some nasty CPU bugs,
552 * but fortunately the switch to using exceptions got rid of all that.
553 */
1da177e4
LT
554static void __init test_wp_bit(void)
555{
d7d119d7
IM
556 printk(KERN_INFO
557 "Checking if this processor honours the WP bit even in supervisor mode...");
1da177e4
LT
558
559 /* Any page-aligned address will do, the test is non-destructive */
560 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
561 boot_cpu_data.wp_works_ok = do_test_wp_bit();
562 clear_fixmap(FIX_WP_TEST);
563
564 if (!boot_cpu_data.wp_works_ok) {
d7d119d7 565 printk(KERN_CONT "No.\n");
1da177e4 566#ifdef CONFIG_X86_WP_WORKS_OK
d7d119d7
IM
567 panic(
568 "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
1da177e4
LT
569#endif
570 } else {
d7d119d7 571 printk(KERN_CONT "Ok.\n");
1da177e4
LT
572 }
573}
574
8550eb99 575static struct kcore_list kcore_mem, kcore_vmalloc;
1da177e4
LT
576
577void __init mem_init(void)
578{
1da177e4 579 int codesize, reservedpages, datasize, initsize;
8550eb99 580 int tmp, bad_ppro;
1da177e4 581
05b79bdc 582#ifdef CONFIG_FLATMEM
8d8f3cbe 583 BUG_ON(!mem_map);
1da177e4 584#endif
1da177e4
LT
585 bad_ppro = ppro_with_ram_bug();
586
587#ifdef CONFIG_HIGHMEM
588 /* check that fixmap and pkmap do not overlap */
d7d119d7
IM
589 if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
590 printk(KERN_ERR
591 "fixmap and kmap areas overlap - this will crash\n");
1da177e4 592 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
d7d119d7
IM
593 PKMAP_BASE, PKMAP_BASE + LAST_PKMAP*PAGE_SIZE,
594 FIXADDR_START);
1da177e4
LT
595 BUG();
596 }
597#endif
1da177e4
LT
598 /* this will put all low memory onto the freelists */
599 totalram_pages += free_all_bootmem();
600
601 reservedpages = 0;
602 for (tmp = 0; tmp < max_low_pfn; tmp++)
603 /*
8550eb99 604 * Only count reserved RAM pages:
1da177e4
LT
605 */
606 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
607 reservedpages++;
608
609 set_highmem_pages_init(bad_ppro);
610
611 codesize = (unsigned long) &_etext - (unsigned long) &_text;
612 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
613 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
614
8550eb99
IM
615 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
616 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
1da177e4
LT
617 VMALLOC_END-VMALLOC_START);
618
8550eb99
IM
619 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
620 "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
1da177e4
LT
621 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
622 num_physpages << (PAGE_SHIFT-10),
623 codesize >> 10,
624 reservedpages << (PAGE_SHIFT-10),
625 datasize >> 10,
626 initsize >> 10,
627 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
628 );
629
052e7994 630#if 1 /* double-sanity-check paranoia */
d7d119d7 631 printk(KERN_INFO "virtual kernel memory layout:\n"
8550eb99 632 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
052e7994 633#ifdef CONFIG_HIGHMEM
8550eb99 634 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
052e7994 635#endif
8550eb99
IM
636 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
637 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
638 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
639 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
640 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
641 FIXADDR_START, FIXADDR_TOP,
642 (FIXADDR_TOP - FIXADDR_START) >> 10,
052e7994
JF
643
644#ifdef CONFIG_HIGHMEM
8550eb99
IM
645 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
646 (LAST_PKMAP*PAGE_SIZE) >> 10,
052e7994
JF
647#endif
648
8550eb99
IM
649 VMALLOC_START, VMALLOC_END,
650 (VMALLOC_END - VMALLOC_START) >> 20,
052e7994 651
8550eb99
IM
652 (unsigned long)__va(0), (unsigned long)high_memory,
653 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
052e7994 654
8550eb99
IM
655 (unsigned long)&__init_begin, (unsigned long)&__init_end,
656 ((unsigned long)&__init_end -
657 (unsigned long)&__init_begin) >> 10,
052e7994 658
8550eb99
IM
659 (unsigned long)&_etext, (unsigned long)&_edata,
660 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
052e7994 661
8550eb99
IM
662 (unsigned long)&_text, (unsigned long)&_etext,
663 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
052e7994
JF
664
665#ifdef CONFIG_HIGHMEM
8550eb99
IM
666 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
667 BUG_ON(VMALLOC_END > PKMAP_BASE);
052e7994 668#endif
8550eb99
IM
669 BUG_ON(VMALLOC_START > VMALLOC_END);
670 BUG_ON((unsigned long)high_memory > VMALLOC_START);
052e7994
JF
671#endif /* double-sanity-check paranoia */
672
1da177e4
LT
673 if (boot_cpu_data.wp_works_ok < 0)
674 test_wp_bit();
675
76ebd054
TG
676 cpa_init();
677
1da177e4
LT
678 /*
679 * Subtle. SMP is doing it's boot stuff late (because it has to
680 * fork idle threads) - but it also needs low mappings for the
681 * protected-mode entry to work. We zap these entries only after
682 * the WP-bit has been tested.
683 */
684#ifndef CONFIG_SMP
685 zap_low_mappings();
686#endif
687}
688
ad8f5797 689#ifdef CONFIG_MEMORY_HOTPLUG
bc02af93 690int arch_add_memory(int nid, u64 start, u64 size)
05039b92 691{
7c7e9425 692 struct pglist_data *pgdata = NODE_DATA(nid);
776ed98b 693 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
05039b92
DH
694 unsigned long start_pfn = start >> PAGE_SHIFT;
695 unsigned long nr_pages = size >> PAGE_SHIFT;
696
697 return __add_pages(zone, start_pfn, nr_pages);
698}
9d99aaa3 699#endif
05039b92 700
1da177e4
LT
701/*
702 * This function cannot be __init, since exceptions don't work in that
703 * section. Put this after the callers, so that it cannot be inlined.
704 */
8550eb99 705static noinline int do_test_wp_bit(void)
1da177e4
LT
706{
707 char tmp_reg;
708 int flag;
709
710 __asm__ __volatile__(
8550eb99
IM
711 " movb %0, %1 \n"
712 "1: movb %1, %0 \n"
713 " xorl %2, %2 \n"
1da177e4 714 "2: \n"
f832ff18 715 _ASM_EXTABLE(1b,2b)
1da177e4
LT
716 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
717 "=q" (tmp_reg),
718 "=r" (flag)
719 :"2" (1)
720 :"memory");
8550eb99 721
1da177e4
LT
722 return flag;
723}
724
63aaf308 725#ifdef CONFIG_DEBUG_RODATA
edeed305
AV
726const int rodata_test_data = 0xC3;
727EXPORT_SYMBOL_GPL(rodata_test_data);
63aaf308 728
63aaf308
AV
729void mark_rodata_ro(void)
730{
6fb14755
JB
731 unsigned long start = PFN_ALIGN(_text);
732 unsigned long size = PFN_ALIGN(_etext) - start;
63aaf308 733
4e4eee0e
MD
734 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
735 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
736 size >> 10);
0c42f392
AK
737
738#ifdef CONFIG_CPA_DEBUG
4e4eee0e
MD
739 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
740 start, start+size);
741 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
0c42f392 742
4e4eee0e
MD
743 printk(KERN_INFO "Testing CPA: write protecting again\n");
744 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
602033ed 745#endif
6fb14755
JB
746 start += size;
747 size = (unsigned long)__end_rodata - start;
6d238cc4 748 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
d7d119d7
IM
749 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
750 size >> 10);
edeed305 751 rodata_test();
63aaf308 752
0c42f392 753#ifdef CONFIG_CPA_DEBUG
d7d119d7 754 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
6d238cc4 755 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
0c42f392 756
d7d119d7 757 printk(KERN_INFO "Testing CPA: write protecting again\n");
6d238cc4 758 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
0c42f392 759#endif
63aaf308
AV
760}
761#endif
762
9a0b5817
GH
763void free_init_pages(char *what, unsigned long begin, unsigned long end)
764{
ee01f112
IM
765#ifdef CONFIG_DEBUG_PAGEALLOC
766 /*
767 * If debugging page accesses then do not free this memory but
768 * mark them not present - any buggy init-section access will
769 * create a kernel page fault:
770 */
771 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
772 begin, PAGE_ALIGN(end));
773 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
774#else
86f03989
IM
775 unsigned long addr;
776
3c1df68b
AV
777 /*
778 * We just marked the kernel text read only above, now that
779 * we are going to free part of that, we need to make that
780 * writeable first.
781 */
782 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
783
9a0b5817 784 for (addr = begin; addr < end; addr += PAGE_SIZE) {
e3ebadd9
LT
785 ClearPageReserved(virt_to_page(addr));
786 init_page_count(virt_to_page(addr));
787 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
788 free_page(addr);
9a0b5817
GH
789 totalram_pages++;
790 }
6fb14755 791 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
ee01f112 792#endif
9a0b5817
GH
793}
794
795void free_initmem(void)
796{
797 free_init_pages("unused kernel memory",
e3ebadd9
LT
798 (unsigned long)(&__init_begin),
799 (unsigned long)(&__init_end));
9a0b5817 800}
63aaf308 801
1da177e4
LT
802#ifdef CONFIG_BLK_DEV_INITRD
803void free_initrd_mem(unsigned long start, unsigned long end)
804{
e3ebadd9 805 free_init_pages("initrd memory", start, end);
1da177e4
LT
806}
807#endif