mm: Remove slab destructors from kmem_cache_create().
[linux-2.6-block.git] / arch / i386 / mm / init.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/i386/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
8
1da177e4
LT
9#include <linux/module.h>
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/hugetlb.h>
20#include <linux/swap.h>
21#include <linux/smp.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
24#include <linux/pagemap.h>
6fb14755 25#include <linux/pfn.h>
c9cf5528 26#include <linux/poison.h>
1da177e4
LT
27#include <linux/bootmem.h>
28#include <linux/slab.h>
29#include <linux/proc_fs.h>
30#include <linux/efi.h>
05039b92 31#include <linux/memory_hotplug.h>
27d99f7e 32#include <linux/initrd.h>
55b2355e 33#include <linux/cpumask.h>
1da177e4
LT
34
35#include <asm/processor.h>
36#include <asm/system.h>
37#include <asm/uaccess.h>
38#include <asm/pgtable.h>
39#include <asm/dma.h>
40#include <asm/fixmap.h>
41#include <asm/e820.h>
42#include <asm/apic.h>
43#include <asm/tlb.h>
44#include <asm/tlbflush.h>
45#include <asm/sections.h>
b239fb25 46#include <asm/paravirt.h>
1da177e4
LT
47
48unsigned int __VMALLOC_RESERVE = 128 << 20;
49
50DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
51unsigned long highstart_pfn, highend_pfn;
52
53static int noinline do_test_wp_bit(void);
54
55/*
56 * Creates a middle page table and puts a pointer to it in the
57 * given global directory entry. This only returns the gd entry
58 * in non-PAE compilation mode, since the middle layer is folded.
59 */
60static pmd_t * __init one_md_table_init(pgd_t *pgd)
61{
62 pud_t *pud;
63 pmd_t *pmd_table;
64
65#ifdef CONFIG_X86_PAE
b239fb25
JF
66 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
67 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
68
69 paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT);
70 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
71 pud = pud_offset(pgd, 0);
72 if (pmd_table != pmd_offset(pud, 0))
73 BUG();
74 }
75#endif
1da177e4
LT
76 pud = pud_offset(pgd, 0);
77 pmd_table = pmd_offset(pud, 0);
1da177e4
LT
78 return pmd_table;
79}
80
81/*
82 * Create a page table and place a pointer to it in a middle page
83 * directory entry.
84 */
85static pte_t * __init one_page_table_init(pmd_t *pmd)
86{
b239fb25 87 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
1da177e4 88 pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
b239fb25 89
fdb4c338 90 paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
1da177e4 91 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
b239fb25 92 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
1da177e4
LT
93 }
94
95 return pte_offset_kernel(pmd, 0);
96}
97
98/*
99 * This function initializes a certain range of kernel virtual memory
100 * with new bootmem page tables, everywhere page tables are missing in
101 * the given range.
102 */
103
104/*
105 * NOTE: The pagetables are allocated contiguous on the physical space
106 * so we can cache the place of the first one and move around without
107 * checking the pgd every time.
108 */
109static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
110{
111 pgd_t *pgd;
1da177e4
LT
112 pmd_t *pmd;
113 int pgd_idx, pmd_idx;
114 unsigned long vaddr;
115
116 vaddr = start;
117 pgd_idx = pgd_index(vaddr);
118 pmd_idx = pmd_index(vaddr);
119 pgd = pgd_base + pgd_idx;
120
121 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
b239fb25
JF
122 pmd = one_md_table_init(pgd);
123 pmd = pmd + pmd_index(vaddr);
1da177e4 124 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
b239fb25 125 one_page_table_init(pmd);
1da177e4
LT
126
127 vaddr += PMD_SIZE;
128 }
129 pmd_idx = 0;
130 }
131}
132
133static inline int is_kernel_text(unsigned long addr)
134{
135 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
136 return 1;
137 return 0;
138}
139
140/*
141 * This maps the physical memory to kernel virtual address space, a total
142 * of max_low_pfn pages, by creating page tables starting from address
143 * PAGE_OFFSET.
144 */
145static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
146{
147 unsigned long pfn;
148 pgd_t *pgd;
149 pmd_t *pmd;
150 pte_t *pte;
151 int pgd_idx, pmd_idx, pte_ofs;
152
153 pgd_idx = pgd_index(PAGE_OFFSET);
154 pgd = pgd_base + pgd_idx;
155 pfn = 0;
156
157 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
158 pmd = one_md_table_init(pgd);
159 if (pfn >= max_low_pfn)
160 continue;
161 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
162 unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
163
164 /* Map with big pages if possible, otherwise create normal page tables. */
165 if (cpu_has_pse) {
166 unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
1da177e4
LT
167 if (is_kernel_text(address) || is_kernel_text(address2))
168 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
169 else
170 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
b239fb25 171
1da177e4
LT
172 pfn += PTRS_PER_PTE;
173 } else {
174 pte = one_page_table_init(pmd);
175
b239fb25
JF
176 for (pte_ofs = 0;
177 pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
178 pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
179 if (is_kernel_text(address))
180 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
181 else
182 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
1da177e4
LT
183 }
184 }
185 }
186 }
187}
188
189static inline int page_kills_ppro(unsigned long pagenr)
190{
191 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
192 return 1;
193 return 0;
194}
195
5b505b90 196int page_is_ram(unsigned long pagenr)
1da177e4
LT
197{
198 int i;
199 unsigned long addr, end;
200
201 if (efi_enabled) {
202 efi_memory_desc_t *md;
7ae65fd3 203 void *p;
1da177e4 204
7ae65fd3
MT
205 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
206 md = p;
1da177e4
LT
207 if (!is_available_memory(md))
208 continue;
209 addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
210 end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
211
212 if ((pagenr >= addr) && (pagenr < end))
213 return 1;
214 }
215 return 0;
216 }
217
218 for (i = 0; i < e820.nr_map; i++) {
219
220 if (e820.map[i].type != E820_RAM) /* not usable memory */
221 continue;
222 /*
223 * !!!FIXME!!! Some BIOSen report areas as RAM that
224 * are not. Notably the 640->1Mb area. We need a sanity
225 * check here.
226 */
227 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
228 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
229 if ((pagenr >= addr) && (pagenr < end))
230 return 1;
231 }
232 return 0;
233}
234
235#ifdef CONFIG_HIGHMEM
236pte_t *kmap_pte;
237pgprot_t kmap_prot;
238
239#define kmap_get_fixmap_pte(vaddr) \
240 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
241
242static void __init kmap_init(void)
243{
244 unsigned long kmap_vstart;
245
246 /* cache the first kmap pte */
247 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
248 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
249
250 kmap_prot = PAGE_KERNEL;
251}
252
253static void __init permanent_kmaps_init(pgd_t *pgd_base)
254{
255 pgd_t *pgd;
256 pud_t *pud;
257 pmd_t *pmd;
258 pte_t *pte;
259 unsigned long vaddr;
260
261 vaddr = PKMAP_BASE;
262 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
263
264 pgd = swapper_pg_dir + pgd_index(vaddr);
265 pud = pud_offset(pgd, vaddr);
266 pmd = pmd_offset(pud, vaddr);
267 pte = pte_offset_kernel(pmd, vaddr);
268 pkmap_page_table = pte;
269}
270
c09b4240 271static void __meminit free_new_highpage(struct page *page)
05039b92 272{
7835e98b 273 init_page_count(page);
05039b92
DH
274 __free_page(page);
275 totalhigh_pages++;
276}
277
278void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
1da177e4
LT
279{
280 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
281 ClearPageReserved(page);
05039b92 282 free_new_highpage(page);
1da177e4
LT
283 } else
284 SetPageReserved(page);
285}
286
0e0be25d 287static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn)
05039b92
DH
288{
289 free_new_highpage(page);
290 totalram_pages++;
291#ifdef CONFIG_FLATMEM
292 max_mapnr = max(pfn, max_mapnr);
293#endif
294 num_physpages++;
295 return 0;
296}
297
298/*
299 * Not currently handling the NUMA case.
300 * Assuming single node and all memory that
301 * has been added dynamically that would be
302 * onlined here is in HIGHMEM
303 */
0e0be25d 304void __meminit online_page(struct page *page)
05039b92
DH
305{
306 ClearPageReserved(page);
307 add_one_highpage_hotplug(page, page_to_pfn(page));
308}
309
310
05b79bdc
AW
311#ifdef CONFIG_NUMA
312extern void set_highmem_pages_init(int);
313#else
1da177e4
LT
314static void __init set_highmem_pages_init(int bad_ppro)
315{
316 int pfn;
317 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
05039b92 318 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
1da177e4
LT
319 totalram_pages += totalhigh_pages;
320}
05b79bdc 321#endif /* CONFIG_FLATMEM */
1da177e4
LT
322
323#else
324#define kmap_init() do { } while (0)
325#define permanent_kmaps_init(pgd_base) do { } while (0)
326#define set_highmem_pages_init(bad_ppro) do { } while (0)
327#endif /* CONFIG_HIGHMEM */
328
329unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
129f6946 330EXPORT_SYMBOL(__PAGE_KERNEL);
1da177e4
LT
331unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
332
05b79bdc 333#ifdef CONFIG_NUMA
1da177e4 334extern void __init remap_numa_kva(void);
05b79bdc
AW
335#else
336#define remap_numa_kva() do {} while (0)
1da177e4
LT
337#endif
338
b239fb25 339void __init native_pagetable_setup_start(pgd_t *base)
1da177e4 340{
1da177e4
LT
341#ifdef CONFIG_X86_PAE
342 int i;
b239fb25
JF
343
344 /*
345 * Init entries of the first-level page table to the
346 * zero page, if they haven't already been set up.
347 *
348 * In a normal native boot, we'll be running on a
349 * pagetable rooted in swapper_pg_dir, but not in PAE
350 * mode, so this will end up clobbering the mappings
351 * for the lower 24Mbytes of the address space,
352 * without affecting the kernel address space.
353 */
354 for (i = 0; i < USER_PTRS_PER_PGD; i++)
355 set_pgd(&base[i],
356 __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
357
358 /* Make sure kernel address space is empty so that a pagetable
359 will be allocated for it. */
360 memset(&base[USER_PTRS_PER_PGD], 0,
361 KERNEL_PGD_PTRS * sizeof(pgd_t));
c119ecce
ZA
362#else
363 paravirt_alloc_pd(__pa(swapper_pg_dir) >> PAGE_SHIFT);
1da177e4 364#endif
b239fb25
JF
365}
366
367void __init native_pagetable_setup_done(pgd_t *base)
368{
369#ifdef CONFIG_X86_PAE
370 /*
371 * Add low memory identity-mappings - SMP needs it when
372 * starting up on an AP from real-mode. In the non-PAE
373 * case we already have these mappings through head.S.
374 * All user-space mappings are explicitly cleared after
375 * SMP startup.
376 */
377 set_pgd(&base[0], base[USER_PTRS_PER_PGD]);
378#endif
379}
380
381/*
382 * Build a proper pagetable for the kernel mappings. Up until this
383 * point, we've been running on some set of pagetables constructed by
384 * the boot process.
385 *
386 * If we're booting on native hardware, this will be a pagetable
387 * constructed in arch/i386/kernel/head.S, and not running in PAE mode
388 * (even if we'll end up running in PAE). The root of the pagetable
389 * will be swapper_pg_dir.
390 *
391 * If we're booting paravirtualized under a hypervisor, then there are
392 * more options: we may already be running PAE, and the pagetable may
393 * or may not be based in swapper_pg_dir. In any case,
394 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
395 * appropriately for the rest of the initialization to work.
396 *
397 * In general, pagetable_init() assumes that the pagetable may already
398 * be partially populated, and so it avoids stomping on any existing
399 * mappings.
400 */
401static void __init pagetable_init (void)
402{
403 unsigned long vaddr, end;
404 pgd_t *pgd_base = swapper_pg_dir;
405
406 paravirt_pagetable_setup_start(pgd_base);
1da177e4
LT
407
408 /* Enable PSE if available */
b239fb25 409 if (cpu_has_pse)
1da177e4 410 set_in_cr4(X86_CR4_PSE);
1da177e4
LT
411
412 /* Enable PGE if available */
413 if (cpu_has_pge) {
414 set_in_cr4(X86_CR4_PGE);
415 __PAGE_KERNEL |= _PAGE_GLOBAL;
416 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
417 }
418
419 kernel_physical_mapping_init(pgd_base);
420 remap_numa_kva();
421
422 /*
423 * Fixed mappings, only the page table structure has to be
424 * created - mappings will be set by set_fixmap():
425 */
426 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
b239fb25
JF
427 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
428 page_table_range_init(vaddr, end, pgd_base);
1da177e4
LT
429
430 permanent_kmaps_init(pgd_base);
431
b239fb25 432 paravirt_pagetable_setup_done(pgd_base);
1da177e4
LT
433}
434
55b2355e 435#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
1da177e4
LT
436/*
437 * Swap suspend & friends need this for resume because things like the intel-agp
438 * driver might have split up a kernel 4MB mapping.
439 */
440char __nosavedata swsusp_pg_dir[PAGE_SIZE]
441 __attribute__ ((aligned (PAGE_SIZE)));
442
443static inline void save_pg_dir(void)
444{
445 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
446}
447#else
448static inline void save_pg_dir(void)
449{
450}
451#endif
452
453void zap_low_mappings (void)
454{
455 int i;
456
457 save_pg_dir();
458
459 /*
460 * Zap initial low-memory mappings.
461 *
462 * Note that "pgd_clear()" doesn't do it for
463 * us, because pgd_clear() is a no-op on i386.
464 */
465 for (i = 0; i < USER_PTRS_PER_PGD; i++)
466#ifdef CONFIG_X86_PAE
467 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
468#else
469 set_pgd(swapper_pg_dir+i, __pgd(0));
470#endif
471 flush_tlb_all();
472}
473
474static int disable_nx __initdata = 0;
6c231b7b 475u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
bdef40a6 476EXPORT_SYMBOL_GPL(__supported_pte_mask);
1da177e4
LT
477
478/*
479 * noexec = on|off
480 *
481 * Control non executable mappings.
482 *
483 * on Enable
484 * off Disable
485 */
1a3f239d 486static int __init noexec_setup(char *str)
1da177e4 487{
1a3f239d
RR
488 if (!str || !strcmp(str, "on")) {
489 if (cpu_has_nx) {
490 __supported_pte_mask |= _PAGE_NX;
491 disable_nx = 0;
492 }
493 } else if (!strcmp(str,"off")) {
1da177e4
LT
494 disable_nx = 1;
495 __supported_pte_mask &= ~_PAGE_NX;
1a3f239d
RR
496 } else
497 return -EINVAL;
498
499 return 0;
1da177e4 500}
1a3f239d 501early_param("noexec", noexec_setup);
1da177e4
LT
502
503int nx_enabled = 0;
504#ifdef CONFIG_X86_PAE
505
506static void __init set_nx(void)
507{
508 unsigned int v[4], l, h;
509
510 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
511 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
512 if ((v[3] & (1 << 20)) && !disable_nx) {
513 rdmsr(MSR_EFER, l, h);
514 l |= EFER_NX;
515 wrmsr(MSR_EFER, l, h);
516 nx_enabled = 1;
517 __supported_pte_mask |= _PAGE_NX;
518 }
519 }
520}
521
522/*
523 * Enables/disables executability of a given kernel page and
524 * returns the previous setting.
525 */
526int __init set_kernel_exec(unsigned long vaddr, int enable)
527{
528 pte_t *pte;
529 int ret = 1;
530
531 if (!nx_enabled)
532 goto out;
533
534 pte = lookup_address(vaddr);
535 BUG_ON(!pte);
536
537 if (!pte_exec_kernel(*pte))
538 ret = 0;
539
540 if (enable)
541 pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
542 else
543 pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
789e6ac0 544 pte_update_defer(&init_mm, vaddr, pte);
1da177e4
LT
545 __flush_tlb_all();
546out:
547 return ret;
548}
549
550#endif
551
552/*
553 * paging_init() sets up the page tables - note that the first 8MB are
554 * already mapped by head.S.
555 *
556 * This routines also unmaps the page at virtual kernel address 0, so
557 * that we can trap those pesky NULL-reference errors in the kernel.
558 */
559void __init paging_init(void)
560{
561#ifdef CONFIG_X86_PAE
562 set_nx();
563 if (nx_enabled)
564 printk("NX (Execute Disable) protection: active\n");
565#endif
566
567 pagetable_init();
568
569 load_cr3(swapper_pg_dir);
570
571#ifdef CONFIG_X86_PAE
572 /*
573 * We will bail out later - printk doesn't work right now so
574 * the user would just see a hanging kernel.
575 */
576 if (cpu_has_pae)
577 set_in_cr4(X86_CR4_PAE);
578#endif
579 __flush_tlb_all();
580
581 kmap_init();
582}
583
584/*
585 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
586 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
587 * used to involve black magic jumps to work around some nasty CPU bugs,
588 * but fortunately the switch to using exceptions got rid of all that.
589 */
590
591static void __init test_wp_bit(void)
592{
593 printk("Checking if this processor honours the WP bit even in supervisor mode... ");
594
595 /* Any page-aligned address will do, the test is non-destructive */
596 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
597 boot_cpu_data.wp_works_ok = do_test_wp_bit();
598 clear_fixmap(FIX_WP_TEST);
599
600 if (!boot_cpu_data.wp_works_ok) {
601 printk("No.\n");
602#ifdef CONFIG_X86_WP_WORKS_OK
603 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
604#endif
605 } else {
606 printk("Ok.\n");
607 }
608}
609
1da177e4
LT
610static struct kcore_list kcore_mem, kcore_vmalloc;
611
612void __init mem_init(void)
613{
614 extern int ppro_with_ram_bug(void);
615 int codesize, reservedpages, datasize, initsize;
616 int tmp;
617 int bad_ppro;
618
05b79bdc 619#ifdef CONFIG_FLATMEM
8d8f3cbe 620 BUG_ON(!mem_map);
1da177e4
LT
621#endif
622
623 bad_ppro = ppro_with_ram_bug();
624
625#ifdef CONFIG_HIGHMEM
626 /* check that fixmap and pkmap do not overlap */
627 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
628 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
629 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
630 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
631 BUG();
632 }
633#endif
634
1da177e4
LT
635 /* this will put all low memory onto the freelists */
636 totalram_pages += free_all_bootmem();
637
638 reservedpages = 0;
639 for (tmp = 0; tmp < max_low_pfn; tmp++)
640 /*
641 * Only count reserved RAM pages
642 */
643 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
644 reservedpages++;
645
646 set_highmem_pages_init(bad_ppro);
647
648 codesize = (unsigned long) &_etext - (unsigned long) &_text;
649 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
650 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
651
652 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
653 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
654 VMALLOC_END-VMALLOC_START);
655
656 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
657 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
658 num_physpages << (PAGE_SHIFT-10),
659 codesize >> 10,
660 reservedpages << (PAGE_SHIFT-10),
661 datasize >> 10,
662 initsize >> 10,
663 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
664 );
665
052e7994
JF
666#if 1 /* double-sanity-check paranoia */
667 printk("virtual kernel memory layout:\n"
668 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
669#ifdef CONFIG_HIGHMEM
670 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
671#endif
672 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
673 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
674 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
675 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
676 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
677 FIXADDR_START, FIXADDR_TOP,
678 (FIXADDR_TOP - FIXADDR_START) >> 10,
679
680#ifdef CONFIG_HIGHMEM
681 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
682 (LAST_PKMAP*PAGE_SIZE) >> 10,
683#endif
684
685 VMALLOC_START, VMALLOC_END,
686 (VMALLOC_END - VMALLOC_START) >> 20,
687
688 (unsigned long)__va(0), (unsigned long)high_memory,
689 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
690
691 (unsigned long)&__init_begin, (unsigned long)&__init_end,
692 ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
693
694 (unsigned long)&_etext, (unsigned long)&_edata,
695 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
696
697 (unsigned long)&_text, (unsigned long)&_etext,
698 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
699
700#ifdef CONFIG_HIGHMEM
701 BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
702 BUG_ON(VMALLOC_END > PKMAP_BASE);
703#endif
704 BUG_ON(VMALLOC_START > VMALLOC_END);
705 BUG_ON((unsigned long)high_memory > VMALLOC_START);
706#endif /* double-sanity-check paranoia */
707
1da177e4
LT
708#ifdef CONFIG_X86_PAE
709 if (!cpu_has_pae)
710 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
711#endif
712 if (boot_cpu_data.wp_works_ok < 0)
713 test_wp_bit();
714
715 /*
716 * Subtle. SMP is doing it's boot stuff late (because it has to
717 * fork idle threads) - but it also needs low mappings for the
718 * protected-mode entry to work. We zap these entries only after
719 * the WP-bit has been tested.
720 */
721#ifndef CONFIG_SMP
722 zap_low_mappings();
723#endif
724}
725
ad8f5797 726#ifdef CONFIG_MEMORY_HOTPLUG
bc02af93 727int arch_add_memory(int nid, u64 start, u64 size)
05039b92 728{
7c7e9425 729 struct pglist_data *pgdata = NODE_DATA(nid);
776ed98b 730 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
05039b92
DH
731 unsigned long start_pfn = start >> PAGE_SHIFT;
732 unsigned long nr_pages = size >> PAGE_SHIFT;
733
734 return __add_pages(zone, start_pfn, nr_pages);
735}
736
737int remove_memory(u64 start, u64 size)
738{
739 return -EINVAL;
740}
7c7e9425 741EXPORT_SYMBOL_GPL(remove_memory);
9d99aaa3 742#endif
05039b92 743
e18b890b 744struct kmem_cache *pmd_cache;
1da177e4
LT
745
746void __init pgtable_cache_init(void)
747{
5311ab62
JF
748 size_t pgd_size = PTRS_PER_PGD*sizeof(pgd_t);
749
1da177e4
LT
750 if (PTRS_PER_PMD > 1) {
751 pmd_cache = kmem_cache_create("pmd",
752 PTRS_PER_PMD*sizeof(pmd_t),
753 PTRS_PER_PMD*sizeof(pmd_t),
0e6b9c98 754 SLAB_PANIC,
20c2df83 755 pmd_ctor);
5311ab62
JF
756 if (!SHARED_KERNEL_PMD) {
757 /* If we're in PAE mode and have a non-shared
758 kernel pmd, then the pgd size must be a
759 page size. This is because the pgd_list
760 links through the page structure, so there
761 can only be one pgd per page for this to
762 work. */
763 pgd_size = PAGE_SIZE;
764 }
1da177e4 765 }
1da177e4
LT
766}
767
768/*
769 * This function cannot be __init, since exceptions don't work in that
770 * section. Put this after the callers, so that it cannot be inlined.
771 */
772static int noinline do_test_wp_bit(void)
773{
774 char tmp_reg;
775 int flag;
776
777 __asm__ __volatile__(
778 " movb %0,%1 \n"
779 "1: movb %1,%0 \n"
780 " xorl %2,%2 \n"
781 "2: \n"
782 ".section __ex_table,\"a\"\n"
783 " .align 4 \n"
784 " .long 1b,2b \n"
785 ".previous \n"
786 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
787 "=q" (tmp_reg),
788 "=r" (flag)
789 :"2" (1)
790 :"memory");
791
792 return flag;
793}
794
63aaf308
AV
795#ifdef CONFIG_DEBUG_RODATA
796
63aaf308
AV
797void mark_rodata_ro(void)
798{
6fb14755
JB
799 unsigned long start = PFN_ALIGN(_text);
800 unsigned long size = PFN_ALIGN(_etext) - start;
63aaf308 801
0864a4e2 802#ifndef CONFIG_KPROBES
6fb14755
JB
803#ifdef CONFIG_HOTPLUG_CPU
804 /* It must still be possible to apply SMP alternatives. */
805 if (num_possible_cpus() <= 1)
806#endif
807 {
808 change_page_attr(virt_to_page(start),
809 size >> PAGE_SHIFT, PAGE_KERNEL_RX);
810 printk("Write protecting the kernel text: %luk\n", size >> 10);
811 }
0864a4e2 812#endif
6fb14755
JB
813 start += size;
814 size = (unsigned long)__end_rodata - start;
815 change_page_attr(virt_to_page(start),
816 size >> PAGE_SHIFT, PAGE_KERNEL_RO);
817 printk("Write protecting the kernel read-only data: %luk\n",
818 size >> 10);
63aaf308
AV
819
820 /*
821 * change_page_attr() requires a global_flush_tlb() call after it.
822 * We do this after the printk so that if something went wrong in the
823 * change, the printk gets out at least to give a better debug hint
824 * of who is the culprit.
825 */
826 global_flush_tlb();
827}
828#endif
829
9a0b5817
GH
830void free_init_pages(char *what, unsigned long begin, unsigned long end)
831{
832 unsigned long addr;
833
834 for (addr = begin; addr < end; addr += PAGE_SIZE) {
e3ebadd9
LT
835 ClearPageReserved(virt_to_page(addr));
836 init_page_count(virt_to_page(addr));
837 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
838 free_page(addr);
9a0b5817
GH
839 totalram_pages++;
840 }
6fb14755 841 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
9a0b5817
GH
842}
843
844void free_initmem(void)
845{
846 free_init_pages("unused kernel memory",
e3ebadd9
LT
847 (unsigned long)(&__init_begin),
848 (unsigned long)(&__init_end));
9a0b5817 849}
63aaf308 850
1da177e4
LT
851#ifdef CONFIG_BLK_DEV_INITRD
852void free_initrd_mem(unsigned long start, unsigned long end)
853{
e3ebadd9 854 free_init_pages("initrd memory", start, end);
1da177e4
LT
855}
856#endif
9a0b5817 857