[PATCH] x86_64: Mark mce_amd cpu notifier __cpuinit/__cpuinitdata
[linux-2.6-block.git] / arch / x86_64 / mm / init.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
9#include <linux/config.h>
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/proc_fs.h>
59170891 25#include <linux/pci.h>
17a941d8 26#include <linux/dma-mapping.h>
44df75e6
MT
27#include <linux/module.h>
28#include <linux/memory_hotplug.h>
1da177e4
LT
29
30#include <asm/processor.h>
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/pgtable.h>
34#include <asm/pgalloc.h>
35#include <asm/dma.h>
36#include <asm/fixmap.h>
37#include <asm/e820.h>
38#include <asm/apic.h>
39#include <asm/tlb.h>
40#include <asm/mmu_context.h>
41#include <asm/proto.h>
42#include <asm/smp.h>
2bc0414e 43#include <asm/sections.h>
1da177e4
LT
44
45#ifndef Dprintk
46#define Dprintk(x...)
47#endif
48
17a941d8
MBY
49struct dma_mapping_ops* dma_ops;
50EXPORT_SYMBOL(dma_ops);
51
e18c6874
AK
52static unsigned long dma_reserve __initdata;
53
1da177e4
LT
54DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
55
56/*
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
60 */
61
62void show_mem(void)
63{
e92343cc
AK
64 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
1da177e4
LT
66 pg_data_t *pgdat;
67 struct page *page;
68
e92343cc 69 printk(KERN_INFO "Mem-info:\n");
1da177e4 70 show_free_areas();
e92343cc 71 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
1da177e4 72
ec936fc5 73 for_each_online_pgdat(pgdat) {
1da177e4
LT
74 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
76 total++;
e92343cc
AK
77 if (PageReserved(page))
78 reserved++;
79 else if (PageSwapCache(page))
80 cached++;
81 else if (page_count(page))
82 shared += page_count(page) - 1;
1da177e4
LT
83 }
84 }
e92343cc
AK
85 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
1da177e4
LT
89}
90
91/* References to section boundaries */
92
1da177e4
LT
93int after_bootmem;
94
5f44a669 95static __init void *spp_getpage(void)
1da177e4
LT
96{
97 void *ptr;
98 if (after_bootmem)
99 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
100 else
101 ptr = alloc_bootmem_pages(PAGE_SIZE);
102 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
103 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
104
105 Dprintk("spp_getpage %p\n", ptr);
106 return ptr;
107}
108
5f44a669 109static __init void set_pte_phys(unsigned long vaddr,
1da177e4
LT
110 unsigned long phys, pgprot_t prot)
111{
112 pgd_t *pgd;
113 pud_t *pud;
114 pmd_t *pmd;
115 pte_t *pte, new_pte;
116
117 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
118
119 pgd = pgd_offset_k(vaddr);
120 if (pgd_none(*pgd)) {
121 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
122 return;
123 }
124 pud = pud_offset(pgd, vaddr);
125 if (pud_none(*pud)) {
126 pmd = (pmd_t *) spp_getpage();
127 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
128 if (pmd != pmd_offset(pud, 0)) {
129 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
130 return;
131 }
132 }
133 pmd = pmd_offset(pud, vaddr);
134 if (pmd_none(*pmd)) {
135 pte = (pte_t *) spp_getpage();
136 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
137 if (pte != pte_offset_kernel(pmd, 0)) {
138 printk("PAGETABLE BUG #02!\n");
139 return;
140 }
141 }
142 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
143
144 pte = pte_offset_kernel(pmd, vaddr);
145 if (!pte_none(*pte) &&
146 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
147 pte_ERROR(*pte);
148 set_pte(pte, new_pte);
149
150 /*
151 * It's enough to flush this one mapping.
152 * (PGE mappings get flushed as well)
153 */
154 __flush_tlb_one(vaddr);
155}
156
157/* NOTE: this is meant to be run only at boot */
5f44a669
AK
158void __init
159__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
1da177e4
LT
160{
161 unsigned long address = __fix_to_virt(idx);
162
163 if (idx >= __end_of_fixed_addresses) {
164 printk("Invalid __set_fixmap\n");
165 return;
166 }
167 set_pte_phys(address, phys, prot);
168}
169
170unsigned long __initdata table_start, table_end;
171
172extern pmd_t temp_boot_pmds[];
173
174static struct temp_map {
175 pmd_t *pmd;
176 void *address;
177 int allocated;
178} temp_mappings[] __initdata = {
179 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
180 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
181 {}
182};
183
44df75e6 184static __meminit void *alloc_low_page(int *index, unsigned long *phys)
1da177e4
LT
185{
186 struct temp_map *ti;
187 int i;
188 unsigned long pfn = table_end++, paddr;
189 void *adr;
190
44df75e6
MT
191 if (after_bootmem) {
192 adr = (void *)get_zeroed_page(GFP_ATOMIC);
193 *phys = __pa(adr);
194 return adr;
195 }
196
1da177e4
LT
197 if (pfn >= end_pfn)
198 panic("alloc_low_page: ran out of memory");
199 for (i = 0; temp_mappings[i].allocated; i++) {
200 if (!temp_mappings[i].pmd)
201 panic("alloc_low_page: ran out of temp mappings");
202 }
203 ti = &temp_mappings[i];
204 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
205 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
206 ti->allocated = 1;
207 __flush_tlb();
208 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
44df75e6 209 memset(adr, 0, PAGE_SIZE);
1da177e4
LT
210 *index = i;
211 *phys = pfn * PAGE_SIZE;
212 return adr;
213}
214
44df75e6 215static __meminit void unmap_low_page(int i)
1da177e4 216{
44df75e6
MT
217 struct temp_map *ti;
218
219 if (after_bootmem)
220 return;
221
222 ti = &temp_mappings[i];
1da177e4
LT
223 set_pmd(ti->pmd, __pmd(0));
224 ti->allocated = 0;
225}
226
f2d3efed
AK
227/* Must run before zap_low_mappings */
228__init void *early_ioremap(unsigned long addr, unsigned long size)
229{
230 unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
231
232 /* actually usually some more */
233 if (size >= LARGE_PAGE_SIZE) {
234 printk("SMBIOS area too long %lu\n", size);
235 return NULL;
236 }
237 set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
238 map += LARGE_PAGE_SIZE;
239 set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
240 __flush_tlb();
241 return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
242}
243
244/* To avoid virtual aliases later */
245__init void early_iounmap(void *addr, unsigned long size)
246{
247 if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
248 printk("early_iounmap: bad address %p\n", addr);
249 set_pmd(temp_mappings[0].pmd, __pmd(0));
250 set_pmd(temp_mappings[1].pmd, __pmd(0));
251 __flush_tlb();
252}
253
44df75e6
MT
254static void __meminit
255phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
256{
257 int i;
258
259 for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
260 unsigned long entry;
261
262 if (address > end) {
263 for (; i < PTRS_PER_PMD; i++, pmd++)
264 set_pmd(pmd, __pmd(0));
265 break;
266 }
267 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
268 entry &= __supported_pte_mask;
269 set_pmd(pmd, __pmd(entry));
270 }
271}
272
273static void __meminit
274phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
275{
276 pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
277
278 if (pmd_none(*pmd)) {
279 spin_lock(&init_mm.page_table_lock);
280 phys_pmd_init(pmd, address, end);
281 spin_unlock(&init_mm.page_table_lock);
282 __flush_tlb_all();
283 }
284}
285
286static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
1da177e4 287{
44df75e6 288 long i = pud_index(address);
1da177e4 289
1da177e4 290 pud = pud + i;
44df75e6
MT
291
292 if (after_bootmem && pud_val(*pud)) {
293 phys_pmd_update(pud, address, end);
294 return;
295 }
296
1da177e4
LT
297 for (; i < PTRS_PER_PUD; pud++, i++) {
298 int map;
299 unsigned long paddr, pmd_phys;
300 pmd_t *pmd;
301
44df75e6
MT
302 paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
303 if (paddr >= end)
1da177e4 304 break;
1da177e4 305
eee5a9fa 306 if (!after_bootmem && !e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) {
1da177e4
LT
307 set_pud(pud, __pud(0));
308 continue;
309 }
310
311 pmd = alloc_low_page(&map, &pmd_phys);
44df75e6 312 spin_lock(&init_mm.page_table_lock);
1da177e4 313 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
44df75e6
MT
314 phys_pmd_init(pmd, paddr, end);
315 spin_unlock(&init_mm.page_table_lock);
1da177e4
LT
316 unmap_low_page(map);
317 }
318 __flush_tlb();
319}
320
321static void __init find_early_table_space(unsigned long end)
322{
6c5acd16 323 unsigned long puds, pmds, tables, start;
1da177e4
LT
324
325 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
326 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
327 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
328 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
329
ee408c79
AK
330 /* RED-PEN putting page tables only on node 0 could
331 cause a hotspot and fill up ZONE_DMA. The page tables
332 need roughly 0.5KB per GB. */
333 start = 0x8000;
334 table_start = find_e820_area(start, end, tables);
1da177e4
LT
335 if (table_start == -1UL)
336 panic("Cannot find space for the kernel page tables");
337
338 table_start >>= PAGE_SHIFT;
339 table_end = table_start;
44df75e6
MT
340
341 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
342 end, table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
1da177e4
LT
343}
344
345/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
346 This runs before bootmem is initialized and gets pages directly from the
347 physical memory. To access them they are temporarily mapped. */
44df75e6 348void __meminit init_memory_mapping(unsigned long start, unsigned long end)
1da177e4
LT
349{
350 unsigned long next;
351
352 Dprintk("init_memory_mapping\n");
353
354 /*
355 * Find space for the kernel direct mapping tables.
356 * Later we should allocate these tables in the local node of the memory
357 * mapped. Unfortunately this is done currently before the nodes are
358 * discovered.
359 */
44df75e6
MT
360 if (!after_bootmem)
361 find_early_table_space(end);
1da177e4
LT
362
363 start = (unsigned long)__va(start);
364 end = (unsigned long)__va(end);
365
366 for (; start < end; start = next) {
367 int map;
368 unsigned long pud_phys;
44df75e6
MT
369 pgd_t *pgd = pgd_offset_k(start);
370 pud_t *pud;
371
372 if (after_bootmem)
d2ae5b5f 373 pud = pud_offset(pgd, start & PGDIR_MASK);
44df75e6
MT
374 else
375 pud = alloc_low_page(&map, &pud_phys);
376
1da177e4
LT
377 next = start + PGDIR_SIZE;
378 if (next > end)
379 next = end;
380 phys_pud_init(pud, __pa(start), __pa(next));
44df75e6
MT
381 if (!after_bootmem)
382 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
1da177e4
LT
383 unmap_low_page(map);
384 }
385
44df75e6
MT
386 if (!after_bootmem)
387 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
1da177e4 388 __flush_tlb_all();
1da177e4
LT
389}
390
f6c2e333 391void __cpuinit zap_low_mappings(int cpu)
1da177e4 392{
f6c2e333
SS
393 if (cpu == 0) {
394 pgd_t *pgd = pgd_offset_k(0UL);
395 pgd_clear(pgd);
396 } else {
397 /*
398 * For AP's, zap the low identity mappings by changing the cr3
399 * to init_level4_pgt and doing local flush tlb all
400 */
401 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
402 }
403 __flush_tlb_all();
1da177e4
LT
404}
405
a2f1b424
AK
406/* Compute zone sizes for the DMA and DMA32 zones in a node. */
407__init void
408size_zones(unsigned long *z, unsigned long *h,
409 unsigned long start_pfn, unsigned long end_pfn)
410{
411 int i;
412 unsigned long w;
413
414 for (i = 0; i < MAX_NR_ZONES; i++)
415 z[i] = 0;
416
417 if (start_pfn < MAX_DMA_PFN)
418 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
419 if (start_pfn < MAX_DMA32_PFN) {
420 unsigned long dma32_pfn = MAX_DMA32_PFN;
421 if (dma32_pfn > end_pfn)
422 dma32_pfn = end_pfn;
423 z[ZONE_DMA32] = dma32_pfn - start_pfn;
424 }
425 z[ZONE_NORMAL] = end_pfn - start_pfn;
426
427 /* Remove lower zones from higher ones. */
428 w = 0;
429 for (i = 0; i < MAX_NR_ZONES; i++) {
430 if (z[i])
431 z[i] -= w;
432 w += z[i];
433 }
434
435 /* Compute holes */
576fc097 436 w = start_pfn;
a2f1b424
AK
437 for (i = 0; i < MAX_NR_ZONES; i++) {
438 unsigned long s = w;
439 w += z[i];
440 h[i] = e820_hole_size(s, w);
441 }
e18c6874
AK
442
443 /* Add the space pace needed for mem_map to the holes too. */
444 for (i = 0; i < MAX_NR_ZONES; i++)
445 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
446
447 /* The 16MB DMA zone has the kernel and other misc mappings.
448 Account them too */
449 if (h[ZONE_DMA]) {
450 h[ZONE_DMA] += dma_reserve;
451 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
452 printk(KERN_WARNING
453 "Kernel too large and filling up ZONE_DMA?\n");
454 h[ZONE_DMA] = z[ZONE_DMA];
455 }
456 }
a2f1b424
AK
457}
458
2b97690f 459#ifndef CONFIG_NUMA
1da177e4
LT
460void __init paging_init(void)
461{
a2f1b424 462 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
44df75e6
MT
463
464 memory_present(0, 0, end_pfn);
465 sparse_init();
a2f1b424
AK
466 size_zones(zones, holes, 0, end_pfn);
467 free_area_init_node(0, NODE_DATA(0), zones,
468 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
1da177e4
LT
469}
470#endif
471
472/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
473 from the CPU leading to inconsistent cache lines. address and size
474 must be aligned to 2MB boundaries.
475 Does nothing when the mapping doesn't exist. */
476void __init clear_kernel_mapping(unsigned long address, unsigned long size)
477{
478 unsigned long end = address + size;
479
480 BUG_ON(address & ~LARGE_PAGE_MASK);
481 BUG_ON(size & ~LARGE_PAGE_MASK);
482
483 for (; address < end; address += LARGE_PAGE_SIZE) {
484 pgd_t *pgd = pgd_offset_k(address);
485 pud_t *pud;
486 pmd_t *pmd;
487 if (pgd_none(*pgd))
488 continue;
489 pud = pud_offset(pgd, address);
490 if (pud_none(*pud))
491 continue;
492 pmd = pmd_offset(pud, address);
493 if (!pmd || pmd_none(*pmd))
494 continue;
495 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
496 /* Could handle this, but it should not happen currently. */
497 printk(KERN_ERR
498 "clear_kernel_mapping: mapping has been split. will leak memory\n");
499 pmd_ERROR(*pmd);
500 }
501 set_pmd(pmd, __pmd(0));
502 }
503 __flush_tlb_all();
504}
505
44df75e6
MT
506/*
507 * Memory hotplug specific functions
44df75e6 508 */
9d99aaa3 509#if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
44df75e6
MT
510
511void online_page(struct page *page)
512{
513 ClearPageReserved(page);
7835e98b 514 init_page_count(page);
44df75e6
MT
515 __free_page(page);
516 totalram_pages++;
517 num_physpages++;
518}
519
9d99aaa3
AK
520#ifndef CONFIG_MEMORY_HOTPLUG
521/*
522 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
523 * just online the pages.
524 */
525int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
526{
527 int err = -EIO;
528 unsigned long pfn;
529 unsigned long total = 0, mem = 0;
530 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
68a3a7fe 531 if (pfn_valid(pfn)) {
9d99aaa3
AK
532 online_page(pfn_to_page(pfn));
533 err = 0;
534 mem++;
535 }
536 total++;
537 }
538 if (!err) {
539 z->spanned_pages += total;
540 z->present_pages += mem;
541 z->zone_pgdat->node_spanned_pages += total;
542 z->zone_pgdat->node_present_pages += mem;
543 }
544 return err;
545}
546#endif
547
548/*
549 * Memory is added always to NORMAL zone. This means you will never get
550 * additional DMA/DMA32 memory.
551 */
44df75e6
MT
552int add_memory(u64 start, u64 size)
553{
554 struct pglist_data *pgdat = NODE_DATA(0);
555 struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
556 unsigned long start_pfn = start >> PAGE_SHIFT;
557 unsigned long nr_pages = size >> PAGE_SHIFT;
558 int ret;
559
560 ret = __add_pages(zone, start_pfn, nr_pages);
561 if (ret)
562 goto error;
563
564 init_memory_mapping(start, (start + size -1));
565
566 return ret;
567error:
568 printk("%s: Problem encountered in __add_pages!\n", __func__);
569 return ret;
570}
571EXPORT_SYMBOL_GPL(add_memory);
572
573int remove_memory(u64 start, u64 size)
574{
575 return -EINVAL;
576}
577EXPORT_SYMBOL_GPL(remove_memory);
578
579#endif
580
1da177e4
LT
581static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
582 kcore_vsyscall;
583
584void __init mem_init(void)
585{
0a43e4bf 586 long codesize, reservedpages, datasize, initsize;
1da177e4 587
0dc243ae 588 pci_iommu_alloc();
1da177e4
LT
589
590 /* How many end-of-memory variables you have, grandma! */
591 max_low_pfn = end_pfn;
592 max_pfn = end_pfn;
593 num_physpages = end_pfn;
594 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
595
596 /* clear the zero-page */
597 memset(empty_zero_page, 0, PAGE_SIZE);
598
599 reservedpages = 0;
600
601 /* this will put all low memory onto the freelists */
2b97690f 602#ifdef CONFIG_NUMA
0a43e4bf 603 totalram_pages = numa_free_all_bootmem();
1da177e4 604#else
0a43e4bf 605 totalram_pages = free_all_bootmem();
1da177e4 606#endif
0a43e4bf 607 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
1da177e4
LT
608
609 after_bootmem = 1;
610
611 codesize = (unsigned long) &_etext - (unsigned long) &_text;
612 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
613 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
614
615 /* Register memory areas for /proc/kcore */
616 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
617 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
618 VMALLOC_END-VMALLOC_START);
619 kclist_add(&kcore_kernel, &_stext, _end - _stext);
620 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
621 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
622 VSYSCALL_END - VSYSCALL_START);
623
0a43e4bf 624 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
1da177e4
LT
625 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
626 end_pfn << (PAGE_SHIFT-10),
627 codesize >> 10,
628 reservedpages << (PAGE_SHIFT-10),
629 datasize >> 10,
630 initsize >> 10);
631
f6c2e333 632#ifdef CONFIG_SMP
1da177e4 633 /*
f6c2e333
SS
634 * Sync boot_level4_pgt mappings with the init_level4_pgt
635 * except for the low identity mappings which are already zapped
636 * in init_level4_pgt. This sync-up is essential for AP's bringup
1da177e4 637 */
f6c2e333 638 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
1da177e4
LT
639#endif
640}
641
d167a518 642void free_init_pages(char *what, unsigned long begin, unsigned long end)
1da177e4
LT
643{
644 unsigned long addr;
645
d167a518
GH
646 if (begin >= end)
647 return;
648
649 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
650 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1da177e4 651 ClearPageReserved(virt_to_page(addr));
7835e98b 652 init_page_count(virt_to_page(addr));
1da177e4
LT
653 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
654 free_page(addr);
655 totalram_pages++;
656 }
d167a518
GH
657}
658
659void free_initmem(void)
660{
1da177e4 661 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
d167a518
GH
662 free_init_pages("unused kernel memory",
663 (unsigned long)(&__init_begin),
664 (unsigned long)(&__init_end));
1da177e4
LT
665}
666
67df197b
AV
667#ifdef CONFIG_DEBUG_RODATA
668
669extern char __start_rodata, __end_rodata;
670void mark_rodata_ro(void)
671{
672 unsigned long addr = (unsigned long)&__start_rodata;
673
674 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
675 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
676
677 printk ("Write protecting the kernel read-only data: %luk\n",
678 (&__end_rodata - &__start_rodata) >> 10);
679
680 /*
681 * change_page_attr_addr() requires a global_flush_tlb() call after it.
682 * We do this after the printk so that if something went wrong in the
683 * change, the printk gets out at least to give a better debug hint
684 * of who is the culprit.
685 */
686 global_flush_tlb();
687}
688#endif
689
1da177e4
LT
690#ifdef CONFIG_BLK_DEV_INITRD
691void free_initrd_mem(unsigned long start, unsigned long end)
692{
d167a518 693 free_init_pages("initrd memory", start, end);
1da177e4
LT
694}
695#endif
696
697void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
698{
699 /* Should check here against the e820 map to avoid double free */
2b97690f 700#ifdef CONFIG_NUMA
1da177e4
LT
701 int nid = phys_to_nid(phys);
702 reserve_bootmem_node(NODE_DATA(nid), phys, len);
703#else
704 reserve_bootmem(phys, len);
705#endif
e18c6874
AK
706 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
707 dma_reserve += len / PAGE_SIZE;
1da177e4
LT
708}
709
710int kern_addr_valid(unsigned long addr)
711{
712 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
713 pgd_t *pgd;
714 pud_t *pud;
715 pmd_t *pmd;
716 pte_t *pte;
717
718 if (above != 0 && above != -1UL)
719 return 0;
720
721 pgd = pgd_offset_k(addr);
722 if (pgd_none(*pgd))
723 return 0;
724
725 pud = pud_offset(pgd, addr);
726 if (pud_none(*pud))
727 return 0;
728
729 pmd = pmd_offset(pud, addr);
730 if (pmd_none(*pmd))
731 return 0;
732 if (pmd_large(*pmd))
733 return pfn_valid(pmd_pfn(*pmd));
734
735 pte = pte_offset_kernel(pmd, addr);
736 if (pte_none(*pte))
737 return 0;
738 return pfn_valid(pte_pfn(*pte));
739}
740
741#ifdef CONFIG_SYSCTL
742#include <linux/sysctl.h>
743
744extern int exception_trace, page_fault_trace;
745
746static ctl_table debug_table2[] = {
747 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
748 proc_dointvec },
1da177e4
LT
749 { 0, }
750};
751
752static ctl_table debug_root_table2[] = {
753 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
754 .child = debug_table2 },
755 { 0 },
756};
757
758static __init int x8664_sysctl_init(void)
759{
760 register_sysctl_table(debug_root_table2, 1);
761 return 0;
762}
763__initcall(x8664_sysctl_init);
764#endif
765
1e014410
AK
766/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
767 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
768 not need special handling anymore. */
1da177e4
LT
769
770static struct vm_area_struct gate_vma = {
771 .vm_start = VSYSCALL_START,
772 .vm_end = VSYSCALL_END,
773 .vm_page_prot = PAGE_READONLY
774};
775
1da177e4
LT
776struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
777{
778#ifdef CONFIG_IA32_EMULATION
1e014410
AK
779 if (test_tsk_thread_flag(tsk, TIF_IA32))
780 return NULL;
1da177e4
LT
781#endif
782 return &gate_vma;
783}
784
785int in_gate_area(struct task_struct *task, unsigned long addr)
786{
787 struct vm_area_struct *vma = get_gate_vma(task);
1e014410
AK
788 if (!vma)
789 return 0;
1da177e4
LT
790 return (addr >= vma->vm_start) && (addr < vma->vm_end);
791}
792
793/* Use this when you have no reliable task/vma, typically from interrupt
794 * context. It is less reliable than using the task's vma and may give
795 * false positives.
796 */
797int in_gate_area_no_task(unsigned long addr)
798{
1e014410 799 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
1da177e4 800}