[PATCH] pgdat allocation for new node add (specify node id)
[linux-block.git] / arch / x86_64 / mm / init.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
9#include <linux/config.h>
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/proc_fs.h>
59170891 25#include <linux/pci.h>
17a941d8 26#include <linux/dma-mapping.h>
44df75e6
MT
27#include <linux/module.h>
28#include <linux/memory_hotplug.h>
1da177e4
LT
29
30#include <asm/processor.h>
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/pgtable.h>
34#include <asm/pgalloc.h>
35#include <asm/dma.h>
36#include <asm/fixmap.h>
37#include <asm/e820.h>
38#include <asm/apic.h>
39#include <asm/tlb.h>
40#include <asm/mmu_context.h>
41#include <asm/proto.h>
42#include <asm/smp.h>
2bc0414e 43#include <asm/sections.h>
1da177e4
LT
44
45#ifndef Dprintk
46#define Dprintk(x...)
47#endif
48
17a941d8
MBY
49struct dma_mapping_ops* dma_ops;
50EXPORT_SYMBOL(dma_ops);
51
e18c6874
AK
52static unsigned long dma_reserve __initdata;
53
1da177e4
LT
54DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
55
56/*
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
60 */
61
62void show_mem(void)
63{
e92343cc
AK
64 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
1da177e4
LT
66 pg_data_t *pgdat;
67 struct page *page;
68
e92343cc 69 printk(KERN_INFO "Mem-info:\n");
1da177e4 70 show_free_areas();
e92343cc 71 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
1da177e4 72
ec936fc5 73 for_each_online_pgdat(pgdat) {
1da177e4
LT
74 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
76 total++;
e92343cc
AK
77 if (PageReserved(page))
78 reserved++;
79 else if (PageSwapCache(page))
80 cached++;
81 else if (page_count(page))
82 shared += page_count(page) - 1;
1da177e4
LT
83 }
84 }
e92343cc
AK
85 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
1da177e4
LT
89}
90
1da177e4
LT
91int after_bootmem;
92
5f44a669 93static __init void *spp_getpage(void)
1da177e4
LT
94{
95 void *ptr;
96 if (after_bootmem)
97 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
98 else
99 ptr = alloc_bootmem_pages(PAGE_SIZE);
100 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
101 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
102
103 Dprintk("spp_getpage %p\n", ptr);
104 return ptr;
105}
106
5f44a669 107static __init void set_pte_phys(unsigned long vaddr,
1da177e4
LT
108 unsigned long phys, pgprot_t prot)
109{
110 pgd_t *pgd;
111 pud_t *pud;
112 pmd_t *pmd;
113 pte_t *pte, new_pte;
114
115 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
116
117 pgd = pgd_offset_k(vaddr);
118 if (pgd_none(*pgd)) {
119 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
120 return;
121 }
122 pud = pud_offset(pgd, vaddr);
123 if (pud_none(*pud)) {
124 pmd = (pmd_t *) spp_getpage();
125 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
126 if (pmd != pmd_offset(pud, 0)) {
127 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
128 return;
129 }
130 }
131 pmd = pmd_offset(pud, vaddr);
132 if (pmd_none(*pmd)) {
133 pte = (pte_t *) spp_getpage();
134 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
135 if (pte != pte_offset_kernel(pmd, 0)) {
136 printk("PAGETABLE BUG #02!\n");
137 return;
138 }
139 }
140 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
141
142 pte = pte_offset_kernel(pmd, vaddr);
143 if (!pte_none(*pte) &&
144 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
145 pte_ERROR(*pte);
146 set_pte(pte, new_pte);
147
148 /*
149 * It's enough to flush this one mapping.
150 * (PGE mappings get flushed as well)
151 */
152 __flush_tlb_one(vaddr);
153}
154
155/* NOTE: this is meant to be run only at boot */
5f44a669
AK
156void __init
157__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
1da177e4
LT
158{
159 unsigned long address = __fix_to_virt(idx);
160
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
163 return;
164 }
165 set_pte_phys(address, phys, prot);
166}
167
168unsigned long __initdata table_start, table_end;
169
170extern pmd_t temp_boot_pmds[];
171
172static struct temp_map {
173 pmd_t *pmd;
174 void *address;
175 int allocated;
176} temp_mappings[] __initdata = {
177 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
178 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
179 {}
180};
181
44df75e6 182static __meminit void *alloc_low_page(int *index, unsigned long *phys)
1da177e4
LT
183{
184 struct temp_map *ti;
185 int i;
186 unsigned long pfn = table_end++, paddr;
187 void *adr;
188
44df75e6
MT
189 if (after_bootmem) {
190 adr = (void *)get_zeroed_page(GFP_ATOMIC);
191 *phys = __pa(adr);
192 return adr;
193 }
194
1da177e4
LT
195 if (pfn >= end_pfn)
196 panic("alloc_low_page: ran out of memory");
197 for (i = 0; temp_mappings[i].allocated; i++) {
198 if (!temp_mappings[i].pmd)
199 panic("alloc_low_page: ran out of temp mappings");
200 }
201 ti = &temp_mappings[i];
202 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
203 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
204 ti->allocated = 1;
205 __flush_tlb();
206 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
44df75e6 207 memset(adr, 0, PAGE_SIZE);
1da177e4
LT
208 *index = i;
209 *phys = pfn * PAGE_SIZE;
210 return adr;
211}
212
44df75e6 213static __meminit void unmap_low_page(int i)
1da177e4 214{
44df75e6
MT
215 struct temp_map *ti;
216
217 if (after_bootmem)
218 return;
219
220 ti = &temp_mappings[i];
1da177e4
LT
221 set_pmd(ti->pmd, __pmd(0));
222 ti->allocated = 0;
223}
224
f2d3efed
AK
225/* Must run before zap_low_mappings */
226__init void *early_ioremap(unsigned long addr, unsigned long size)
227{
228 unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
229
230 /* actually usually some more */
231 if (size >= LARGE_PAGE_SIZE) {
232 printk("SMBIOS area too long %lu\n", size);
233 return NULL;
234 }
235 set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
236 map += LARGE_PAGE_SIZE;
237 set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
238 __flush_tlb();
239 return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
240}
241
242/* To avoid virtual aliases later */
243__init void early_iounmap(void *addr, unsigned long size)
244{
245 if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
246 printk("early_iounmap: bad address %p\n", addr);
247 set_pmd(temp_mappings[0].pmd, __pmd(0));
248 set_pmd(temp_mappings[1].pmd, __pmd(0));
249 __flush_tlb();
250}
251
44df75e6
MT
252static void __meminit
253phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
254{
255 int i;
256
257 for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
258 unsigned long entry;
259
5f51e139
JB
260 if (address >= end) {
261 if (!after_bootmem)
262 for (; i < PTRS_PER_PMD; i++, pmd++)
263 set_pmd(pmd, __pmd(0));
44df75e6
MT
264 break;
265 }
266 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
267 entry &= __supported_pte_mask;
268 set_pmd(pmd, __pmd(entry));
269 }
270}
271
272static void __meminit
273phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
274{
275 pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
276
277 if (pmd_none(*pmd)) {
278 spin_lock(&init_mm.page_table_lock);
279 phys_pmd_init(pmd, address, end);
280 spin_unlock(&init_mm.page_table_lock);
281 __flush_tlb_all();
282 }
283}
284
285static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
1da177e4 286{
44df75e6 287 long i = pud_index(address);
1da177e4 288
1da177e4 289 pud = pud + i;
44df75e6
MT
290
291 if (after_bootmem && pud_val(*pud)) {
292 phys_pmd_update(pud, address, end);
293 return;
294 }
295
1da177e4
LT
296 for (; i < PTRS_PER_PUD; pud++, i++) {
297 int map;
298 unsigned long paddr, pmd_phys;
299 pmd_t *pmd;
300
44df75e6
MT
301 paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
302 if (paddr >= end)
1da177e4 303 break;
1da177e4 304
eee5a9fa 305 if (!after_bootmem && !e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) {
1da177e4
LT
306 set_pud(pud, __pud(0));
307 continue;
308 }
309
310 pmd = alloc_low_page(&map, &pmd_phys);
44df75e6 311 spin_lock(&init_mm.page_table_lock);
1da177e4 312 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
44df75e6
MT
313 phys_pmd_init(pmd, paddr, end);
314 spin_unlock(&init_mm.page_table_lock);
1da177e4
LT
315 unmap_low_page(map);
316 }
317 __flush_tlb();
318}
319
320static void __init find_early_table_space(unsigned long end)
321{
6c5acd16 322 unsigned long puds, pmds, tables, start;
1da177e4
LT
323
324 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
325 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
326 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
327 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
328
ee408c79
AK
329 /* RED-PEN putting page tables only on node 0 could
330 cause a hotspot and fill up ZONE_DMA. The page tables
331 need roughly 0.5KB per GB. */
332 start = 0x8000;
333 table_start = find_e820_area(start, end, tables);
1da177e4
LT
334 if (table_start == -1UL)
335 panic("Cannot find space for the kernel page tables");
336
337 table_start >>= PAGE_SHIFT;
338 table_end = table_start;
44df75e6
MT
339
340 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
5f51e139
JB
341 end, table_start << PAGE_SHIFT,
342 (table_start << PAGE_SHIFT) + tables);
1da177e4
LT
343}
344
345/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
346 This runs before bootmem is initialized and gets pages directly from the
347 physical memory. To access them they are temporarily mapped. */
44df75e6 348void __meminit init_memory_mapping(unsigned long start, unsigned long end)
1da177e4
LT
349{
350 unsigned long next;
351
352 Dprintk("init_memory_mapping\n");
353
354 /*
355 * Find space for the kernel direct mapping tables.
356 * Later we should allocate these tables in the local node of the memory
357 * mapped. Unfortunately this is done currently before the nodes are
358 * discovered.
359 */
44df75e6
MT
360 if (!after_bootmem)
361 find_early_table_space(end);
1da177e4
LT
362
363 start = (unsigned long)__va(start);
364 end = (unsigned long)__va(end);
365
366 for (; start < end; start = next) {
367 int map;
368 unsigned long pud_phys;
44df75e6
MT
369 pgd_t *pgd = pgd_offset_k(start);
370 pud_t *pud;
371
372 if (after_bootmem)
d2ae5b5f 373 pud = pud_offset(pgd, start & PGDIR_MASK);
44df75e6
MT
374 else
375 pud = alloc_low_page(&map, &pud_phys);
376
1da177e4
LT
377 next = start + PGDIR_SIZE;
378 if (next > end)
379 next = end;
380 phys_pud_init(pud, __pa(start), __pa(next));
44df75e6
MT
381 if (!after_bootmem)
382 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
1da177e4
LT
383 unmap_low_page(map);
384 }
385
44df75e6
MT
386 if (!after_bootmem)
387 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
1da177e4 388 __flush_tlb_all();
1da177e4
LT
389}
390
f6c2e333 391void __cpuinit zap_low_mappings(int cpu)
1da177e4 392{
f6c2e333
SS
393 if (cpu == 0) {
394 pgd_t *pgd = pgd_offset_k(0UL);
395 pgd_clear(pgd);
396 } else {
397 /*
398 * For AP's, zap the low identity mappings by changing the cr3
399 * to init_level4_pgt and doing local flush tlb all
400 */
401 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
402 }
403 __flush_tlb_all();
1da177e4
LT
404}
405
a2f1b424
AK
406/* Compute zone sizes for the DMA and DMA32 zones in a node. */
407__init void
408size_zones(unsigned long *z, unsigned long *h,
409 unsigned long start_pfn, unsigned long end_pfn)
410{
411 int i;
412 unsigned long w;
413
414 for (i = 0; i < MAX_NR_ZONES; i++)
415 z[i] = 0;
416
417 if (start_pfn < MAX_DMA_PFN)
418 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
419 if (start_pfn < MAX_DMA32_PFN) {
420 unsigned long dma32_pfn = MAX_DMA32_PFN;
421 if (dma32_pfn > end_pfn)
422 dma32_pfn = end_pfn;
423 z[ZONE_DMA32] = dma32_pfn - start_pfn;
424 }
425 z[ZONE_NORMAL] = end_pfn - start_pfn;
426
427 /* Remove lower zones from higher ones. */
428 w = 0;
429 for (i = 0; i < MAX_NR_ZONES; i++) {
430 if (z[i])
431 z[i] -= w;
432 w += z[i];
433 }
434
435 /* Compute holes */
576fc097 436 w = start_pfn;
a2f1b424
AK
437 for (i = 0; i < MAX_NR_ZONES; i++) {
438 unsigned long s = w;
439 w += z[i];
440 h[i] = e820_hole_size(s, w);
441 }
e18c6874
AK
442
443 /* Add the space pace needed for mem_map to the holes too. */
444 for (i = 0; i < MAX_NR_ZONES; i++)
445 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
446
447 /* The 16MB DMA zone has the kernel and other misc mappings.
448 Account them too */
449 if (h[ZONE_DMA]) {
450 h[ZONE_DMA] += dma_reserve;
451 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
452 printk(KERN_WARNING
453 "Kernel too large and filling up ZONE_DMA?\n");
454 h[ZONE_DMA] = z[ZONE_DMA];
455 }
456 }
a2f1b424
AK
457}
458
2b97690f 459#ifndef CONFIG_NUMA
1da177e4
LT
460void __init paging_init(void)
461{
a2f1b424 462 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
44df75e6
MT
463
464 memory_present(0, 0, end_pfn);
465 sparse_init();
a2f1b424
AK
466 size_zones(zones, holes, 0, end_pfn);
467 free_area_init_node(0, NODE_DATA(0), zones,
468 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
1da177e4
LT
469}
470#endif
471
472/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
473 from the CPU leading to inconsistent cache lines. address and size
474 must be aligned to 2MB boundaries.
475 Does nothing when the mapping doesn't exist. */
476void __init clear_kernel_mapping(unsigned long address, unsigned long size)
477{
478 unsigned long end = address + size;
479
480 BUG_ON(address & ~LARGE_PAGE_MASK);
481 BUG_ON(size & ~LARGE_PAGE_MASK);
482
483 for (; address < end; address += LARGE_PAGE_SIZE) {
484 pgd_t *pgd = pgd_offset_k(address);
485 pud_t *pud;
486 pmd_t *pmd;
487 if (pgd_none(*pgd))
488 continue;
489 pud = pud_offset(pgd, address);
490 if (pud_none(*pud))
491 continue;
492 pmd = pmd_offset(pud, address);
493 if (!pmd || pmd_none(*pmd))
494 continue;
495 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
496 /* Could handle this, but it should not happen currently. */
497 printk(KERN_ERR
498 "clear_kernel_mapping: mapping has been split. will leak memory\n");
499 pmd_ERROR(*pmd);
500 }
501 set_pmd(pmd, __pmd(0));
502 }
503 __flush_tlb_all();
504}
505
44df75e6
MT
506/*
507 * Memory hotplug specific functions
44df75e6 508 */
44df75e6
MT
509void online_page(struct page *page)
510{
511 ClearPageReserved(page);
7835e98b 512 init_page_count(page);
44df75e6
MT
513 __free_page(page);
514 totalram_pages++;
515 num_physpages++;
516}
517
bc02af93 518#ifdef CONFIG_MEMORY_HOTPLUG
9d99aaa3 519/*
bc02af93
YG
520 * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
521 * via probe interface of sysfs. If acpi notifies hot-add event, then it
522 * can tell node id by searching dsdt. But, probe interface doesn't have
523 * node id. So, return 0 as node id at this time.
9d99aaa3 524 */
bc02af93
YG
525#ifdef CONFIG_NUMA
526int memory_add_physaddr_to_nid(u64 start)
9d99aaa3 527{
bc02af93 528 return 0;
9d99aaa3
AK
529}
530#endif
531
532/*
533 * Memory is added always to NORMAL zone. This means you will never get
534 * additional DMA/DMA32 memory.
535 */
bc02af93 536int arch_add_memory(int nid, u64 start, u64 size)
44df75e6 537{
bc02af93 538 struct pglist_data *pgdat = NODE_DATA(nid);
44df75e6
MT
539 struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
540 unsigned long start_pfn = start >> PAGE_SHIFT;
541 unsigned long nr_pages = size >> PAGE_SHIFT;
542 int ret;
543
544 ret = __add_pages(zone, start_pfn, nr_pages);
545 if (ret)
546 goto error;
547
548 init_memory_mapping(start, (start + size -1));
549
550 return ret;
551error:
552 printk("%s: Problem encountered in __add_pages!\n", __func__);
553 return ret;
554}
bc02af93 555EXPORT_SYMBOL_GPL(arch_add_memory);
44df75e6
MT
556
557int remove_memory(u64 start, u64 size)
558{
559 return -EINVAL;
560}
561EXPORT_SYMBOL_GPL(remove_memory);
562
bc02af93
YG
563#else /* CONFIG_MEMORY_HOTPLUG */
564/*
565 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
566 * just online the pages.
567 */
568int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
569{
570 int err = -EIO;
571 unsigned long pfn;
572 unsigned long total = 0, mem = 0;
573 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
574 if (pfn_valid(pfn)) {
575 online_page(pfn_to_page(pfn));
576 err = 0;
577 mem++;
578 }
579 total++;
580 }
581 if (!err) {
582 z->spanned_pages += total;
583 z->present_pages += mem;
584 z->zone_pgdat->node_spanned_pages += total;
585 z->zone_pgdat->node_present_pages += mem;
586 }
587 return err;
588}
589#endif /* CONFIG_MEMORY_HOTPLUG */
44df75e6 590
1da177e4
LT
591static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
592 kcore_vsyscall;
593
594void __init mem_init(void)
595{
0a43e4bf 596 long codesize, reservedpages, datasize, initsize;
1da177e4 597
0dc243ae 598 pci_iommu_alloc();
1da177e4
LT
599
600 /* How many end-of-memory variables you have, grandma! */
601 max_low_pfn = end_pfn;
602 max_pfn = end_pfn;
603 num_physpages = end_pfn;
604 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
605
606 /* clear the zero-page */
607 memset(empty_zero_page, 0, PAGE_SIZE);
608
609 reservedpages = 0;
610
611 /* this will put all low memory onto the freelists */
2b97690f 612#ifdef CONFIG_NUMA
0a43e4bf 613 totalram_pages = numa_free_all_bootmem();
1da177e4 614#else
0a43e4bf 615 totalram_pages = free_all_bootmem();
1da177e4 616#endif
0a43e4bf 617 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
1da177e4
LT
618
619 after_bootmem = 1;
620
621 codesize = (unsigned long) &_etext - (unsigned long) &_text;
622 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
623 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
624
625 /* Register memory areas for /proc/kcore */
626 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
627 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
628 VMALLOC_END-VMALLOC_START);
629 kclist_add(&kcore_kernel, &_stext, _end - _stext);
630 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
631 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
632 VSYSCALL_END - VSYSCALL_START);
633
0a43e4bf 634 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
1da177e4
LT
635 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
636 end_pfn << (PAGE_SHIFT-10),
637 codesize >> 10,
638 reservedpages << (PAGE_SHIFT-10),
639 datasize >> 10,
640 initsize >> 10);
641
f6c2e333 642#ifdef CONFIG_SMP
1da177e4 643 /*
f6c2e333
SS
644 * Sync boot_level4_pgt mappings with the init_level4_pgt
645 * except for the low identity mappings which are already zapped
646 * in init_level4_pgt. This sync-up is essential for AP's bringup
1da177e4 647 */
f6c2e333 648 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
1da177e4
LT
649#endif
650}
651
d167a518 652void free_init_pages(char *what, unsigned long begin, unsigned long end)
1da177e4
LT
653{
654 unsigned long addr;
655
d167a518
GH
656 if (begin >= end)
657 return;
658
659 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
660 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1da177e4 661 ClearPageReserved(virt_to_page(addr));
7835e98b 662 init_page_count(virt_to_page(addr));
1da177e4
LT
663 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
664 free_page(addr);
665 totalram_pages++;
666 }
d167a518
GH
667}
668
669void free_initmem(void)
670{
1da177e4 671 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
d167a518
GH
672 free_init_pages("unused kernel memory",
673 (unsigned long)(&__init_begin),
674 (unsigned long)(&__init_end));
1da177e4
LT
675}
676
67df197b
AV
677#ifdef CONFIG_DEBUG_RODATA
678
679extern char __start_rodata, __end_rodata;
680void mark_rodata_ro(void)
681{
682 unsigned long addr = (unsigned long)&__start_rodata;
683
684 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
685 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
686
687 printk ("Write protecting the kernel read-only data: %luk\n",
688 (&__end_rodata - &__start_rodata) >> 10);
689
690 /*
691 * change_page_attr_addr() requires a global_flush_tlb() call after it.
692 * We do this after the printk so that if something went wrong in the
693 * change, the printk gets out at least to give a better debug hint
694 * of who is the culprit.
695 */
696 global_flush_tlb();
697}
698#endif
699
1da177e4
LT
700#ifdef CONFIG_BLK_DEV_INITRD
701void free_initrd_mem(unsigned long start, unsigned long end)
702{
d167a518 703 free_init_pages("initrd memory", start, end);
1da177e4
LT
704}
705#endif
706
707void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
708{
709 /* Should check here against the e820 map to avoid double free */
2b97690f 710#ifdef CONFIG_NUMA
1da177e4
LT
711 int nid = phys_to_nid(phys);
712 reserve_bootmem_node(NODE_DATA(nid), phys, len);
713#else
714 reserve_bootmem(phys, len);
715#endif
e18c6874
AK
716 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
717 dma_reserve += len / PAGE_SIZE;
1da177e4
LT
718}
719
720int kern_addr_valid(unsigned long addr)
721{
722 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
723 pgd_t *pgd;
724 pud_t *pud;
725 pmd_t *pmd;
726 pte_t *pte;
727
728 if (above != 0 && above != -1UL)
729 return 0;
730
731 pgd = pgd_offset_k(addr);
732 if (pgd_none(*pgd))
733 return 0;
734
735 pud = pud_offset(pgd, addr);
736 if (pud_none(*pud))
737 return 0;
738
739 pmd = pmd_offset(pud, addr);
740 if (pmd_none(*pmd))
741 return 0;
742 if (pmd_large(*pmd))
743 return pfn_valid(pmd_pfn(*pmd));
744
745 pte = pte_offset_kernel(pmd, addr);
746 if (pte_none(*pte))
747 return 0;
748 return pfn_valid(pte_pfn(*pte));
749}
750
751#ifdef CONFIG_SYSCTL
752#include <linux/sysctl.h>
753
754extern int exception_trace, page_fault_trace;
755
756static ctl_table debug_table2[] = {
757 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
758 proc_dointvec },
1da177e4
LT
759 { 0, }
760};
761
762static ctl_table debug_root_table2[] = {
763 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
764 .child = debug_table2 },
765 { 0 },
766};
767
768static __init int x8664_sysctl_init(void)
769{
770 register_sysctl_table(debug_root_table2, 1);
771 return 0;
772}
773__initcall(x8664_sysctl_init);
774#endif
775
1e014410
AK
776/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
777 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
778 not need special handling anymore. */
1da177e4
LT
779
780static struct vm_area_struct gate_vma = {
781 .vm_start = VSYSCALL_START,
782 .vm_end = VSYSCALL_END,
783 .vm_page_prot = PAGE_READONLY
784};
785
1da177e4
LT
786struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
787{
788#ifdef CONFIG_IA32_EMULATION
1e014410
AK
789 if (test_tsk_thread_flag(tsk, TIF_IA32))
790 return NULL;
1da177e4
LT
791#endif
792 return &gate_vma;
793}
794
795int in_gate_area(struct task_struct *task, unsigned long addr)
796{
797 struct vm_area_struct *vma = get_gate_vma(task);
1e014410
AK
798 if (!vma)
799 return 0;
1da177e4
LT
800 return (addr >= vma->vm_start) && (addr < vma->vm_end);
801}
802
803/* Use this when you have no reliable task/vma, typically from interrupt
804 * context. It is less reliable than using the task's vma and may give
805 * false positives.
806 */
807int in_gate_area_no_task(unsigned long addr)
808{
1e014410 809 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
1da177e4 810}