[PATCH] scx200_acb: handle PCI errors
[linux-2.6-block.git] / arch / x86_64 / mm / init.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
1da177e4
LT
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/pagemap.h>
22#include <linux/bootmem.h>
23#include <linux/proc_fs.h>
59170891 24#include <linux/pci.h>
c9cf5528 25#include <linux/poison.h>
17a941d8 26#include <linux/dma-mapping.h>
44df75e6
MT
27#include <linux/module.h>
28#include <linux/memory_hotplug.h>
1da177e4
LT
29
30#include <asm/processor.h>
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/pgtable.h>
34#include <asm/pgalloc.h>
35#include <asm/dma.h>
36#include <asm/fixmap.h>
37#include <asm/e820.h>
38#include <asm/apic.h>
39#include <asm/tlb.h>
40#include <asm/mmu_context.h>
41#include <asm/proto.h>
42#include <asm/smp.h>
2bc0414e 43#include <asm/sections.h>
1da177e4
LT
44
45#ifndef Dprintk
46#define Dprintk(x...)
47#endif
48
17a941d8
MBY
49struct dma_mapping_ops* dma_ops;
50EXPORT_SYMBOL(dma_ops);
51
e18c6874
AK
52static unsigned long dma_reserve __initdata;
53
1da177e4
LT
54DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
55
56/*
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
60 */
61
62void show_mem(void)
63{
e92343cc
AK
64 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
1da177e4
LT
66 pg_data_t *pgdat;
67 struct page *page;
68
e92343cc 69 printk(KERN_INFO "Mem-info:\n");
1da177e4 70 show_free_areas();
e92343cc 71 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
1da177e4 72
ec936fc5 73 for_each_online_pgdat(pgdat) {
1da177e4
LT
74 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
76 total++;
e92343cc
AK
77 if (PageReserved(page))
78 reserved++;
79 else if (PageSwapCache(page))
80 cached++;
81 else if (page_count(page))
82 shared += page_count(page) - 1;
1da177e4
LT
83 }
84 }
e92343cc
AK
85 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
1da177e4
LT
89}
90
1da177e4
LT
91int after_bootmem;
92
5f44a669 93static __init void *spp_getpage(void)
1da177e4
LT
94{
95 void *ptr;
96 if (after_bootmem)
97 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
98 else
99 ptr = alloc_bootmem_pages(PAGE_SIZE);
100 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
101 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
102
103 Dprintk("spp_getpage %p\n", ptr);
104 return ptr;
105}
106
5f44a669 107static __init void set_pte_phys(unsigned long vaddr,
1da177e4
LT
108 unsigned long phys, pgprot_t prot)
109{
110 pgd_t *pgd;
111 pud_t *pud;
112 pmd_t *pmd;
113 pte_t *pte, new_pte;
114
115 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
116
117 pgd = pgd_offset_k(vaddr);
118 if (pgd_none(*pgd)) {
119 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
120 return;
121 }
122 pud = pud_offset(pgd, vaddr);
123 if (pud_none(*pud)) {
124 pmd = (pmd_t *) spp_getpage();
125 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
126 if (pmd != pmd_offset(pud, 0)) {
127 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
128 return;
129 }
130 }
131 pmd = pmd_offset(pud, vaddr);
132 if (pmd_none(*pmd)) {
133 pte = (pte_t *) spp_getpage();
134 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
135 if (pte != pte_offset_kernel(pmd, 0)) {
136 printk("PAGETABLE BUG #02!\n");
137 return;
138 }
139 }
140 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
141
142 pte = pte_offset_kernel(pmd, vaddr);
143 if (!pte_none(*pte) &&
144 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
145 pte_ERROR(*pte);
146 set_pte(pte, new_pte);
147
148 /*
149 * It's enough to flush this one mapping.
150 * (PGE mappings get flushed as well)
151 */
152 __flush_tlb_one(vaddr);
153}
154
155/* NOTE: this is meant to be run only at boot */
5f44a669
AK
156void __init
157__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
1da177e4
LT
158{
159 unsigned long address = __fix_to_virt(idx);
160
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
163 return;
164 }
165 set_pte_phys(address, phys, prot);
166}
167
168unsigned long __initdata table_start, table_end;
169
170extern pmd_t temp_boot_pmds[];
171
172static struct temp_map {
173 pmd_t *pmd;
174 void *address;
175 int allocated;
176} temp_mappings[] __initdata = {
177 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
178 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
179 {}
180};
181
44df75e6 182static __meminit void *alloc_low_page(int *index, unsigned long *phys)
1da177e4
LT
183{
184 struct temp_map *ti;
185 int i;
186 unsigned long pfn = table_end++, paddr;
187 void *adr;
188
44df75e6
MT
189 if (after_bootmem) {
190 adr = (void *)get_zeroed_page(GFP_ATOMIC);
191 *phys = __pa(adr);
192 return adr;
193 }
194
1da177e4
LT
195 if (pfn >= end_pfn)
196 panic("alloc_low_page: ran out of memory");
197 for (i = 0; temp_mappings[i].allocated; i++) {
198 if (!temp_mappings[i].pmd)
199 panic("alloc_low_page: ran out of temp mappings");
200 }
201 ti = &temp_mappings[i];
202 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
203 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
204 ti->allocated = 1;
205 __flush_tlb();
206 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
44df75e6 207 memset(adr, 0, PAGE_SIZE);
1da177e4
LT
208 *index = i;
209 *phys = pfn * PAGE_SIZE;
210 return adr;
211}
212
44df75e6 213static __meminit void unmap_low_page(int i)
1da177e4 214{
44df75e6
MT
215 struct temp_map *ti;
216
217 if (after_bootmem)
218 return;
219
220 ti = &temp_mappings[i];
1da177e4
LT
221 set_pmd(ti->pmd, __pmd(0));
222 ti->allocated = 0;
223}
224
f2d3efed
AK
225/* Must run before zap_low_mappings */
226__init void *early_ioremap(unsigned long addr, unsigned long size)
227{
228 unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
229
230 /* actually usually some more */
231 if (size >= LARGE_PAGE_SIZE) {
f2d3efed
AK
232 return NULL;
233 }
234 set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
235 map += LARGE_PAGE_SIZE;
236 set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
237 __flush_tlb();
238 return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
239}
240
241/* To avoid virtual aliases later */
242__init void early_iounmap(void *addr, unsigned long size)
243{
244 if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
245 printk("early_iounmap: bad address %p\n", addr);
246 set_pmd(temp_mappings[0].pmd, __pmd(0));
247 set_pmd(temp_mappings[1].pmd, __pmd(0));
248 __flush_tlb();
249}
250
44df75e6 251static void __meminit
6ad91658 252phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
44df75e6 253{
6ad91658 254 int i = pmd_index(address);
44df75e6 255
6ad91658 256 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
44df75e6 257 unsigned long entry;
6ad91658 258 pmd_t *pmd = pmd_page + pmd_index(address);
44df75e6 259
5f51e139
JB
260 if (address >= end) {
261 if (!after_bootmem)
262 for (; i < PTRS_PER_PMD; i++, pmd++)
263 set_pmd(pmd, __pmd(0));
44df75e6
MT
264 break;
265 }
6ad91658
KM
266
267 if (pmd_val(*pmd))
268 continue;
269
44df75e6
MT
270 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
271 entry &= __supported_pte_mask;
272 set_pmd(pmd, __pmd(entry));
273 }
274}
275
276static void __meminit
277phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
278{
6ad91658
KM
279 pmd_t *pmd = pmd_offset(pud,0);
280 spin_lock(&init_mm.page_table_lock);
281 phys_pmd_init(pmd, address, end);
282 spin_unlock(&init_mm.page_table_lock);
283 __flush_tlb_all();
44df75e6
MT
284}
285
6ad91658 286static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
1da177e4 287{
6ad91658 288 int i = pud_index(addr);
44df75e6 289
44df75e6 290
6ad91658 291 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
1da177e4 292 int map;
6ad91658
KM
293 unsigned long pmd_phys;
294 pud_t *pud = pud_page + pud_index(addr);
1da177e4
LT
295 pmd_t *pmd;
296
6ad91658 297 if (addr >= end)
1da177e4 298 break;
1da177e4 299
6ad91658 300 if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
1da177e4
LT
301 set_pud(pud, __pud(0));
302 continue;
303 }
304
6ad91658
KM
305 if (pud_val(*pud)) {
306 phys_pmd_update(pud, addr, end);
307 continue;
308 }
309
1da177e4 310 pmd = alloc_low_page(&map, &pmd_phys);
44df75e6 311 spin_lock(&init_mm.page_table_lock);
1da177e4 312 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
6ad91658 313 phys_pmd_init(pmd, addr, end);
44df75e6 314 spin_unlock(&init_mm.page_table_lock);
1da177e4
LT
315 unmap_low_page(map);
316 }
317 __flush_tlb();
318}
319
320static void __init find_early_table_space(unsigned long end)
321{
6c5acd16 322 unsigned long puds, pmds, tables, start;
1da177e4
LT
323
324 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
325 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
326 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
327 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
328
ee408c79
AK
329 /* RED-PEN putting page tables only on node 0 could
330 cause a hotspot and fill up ZONE_DMA. The page tables
331 need roughly 0.5KB per GB. */
332 start = 0x8000;
333 table_start = find_e820_area(start, end, tables);
1da177e4
LT
334 if (table_start == -1UL)
335 panic("Cannot find space for the kernel page tables");
336
337 table_start >>= PAGE_SHIFT;
338 table_end = table_start;
44df75e6
MT
339
340 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
5f51e139
JB
341 end, table_start << PAGE_SHIFT,
342 (table_start << PAGE_SHIFT) + tables);
1da177e4
LT
343}
344
345/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
346 This runs before bootmem is initialized and gets pages directly from the
347 physical memory. To access them they are temporarily mapped. */
44df75e6 348void __meminit init_memory_mapping(unsigned long start, unsigned long end)
1da177e4
LT
349{
350 unsigned long next;
351
352 Dprintk("init_memory_mapping\n");
353
354 /*
355 * Find space for the kernel direct mapping tables.
356 * Later we should allocate these tables in the local node of the memory
357 * mapped. Unfortunately this is done currently before the nodes are
358 * discovered.
359 */
44df75e6
MT
360 if (!after_bootmem)
361 find_early_table_space(end);
1da177e4
LT
362
363 start = (unsigned long)__va(start);
364 end = (unsigned long)__va(end);
365
366 for (; start < end; start = next) {
367 int map;
368 unsigned long pud_phys;
44df75e6
MT
369 pgd_t *pgd = pgd_offset_k(start);
370 pud_t *pud;
371
372 if (after_bootmem)
d2ae5b5f 373 pud = pud_offset(pgd, start & PGDIR_MASK);
44df75e6
MT
374 else
375 pud = alloc_low_page(&map, &pud_phys);
376
1da177e4
LT
377 next = start + PGDIR_SIZE;
378 if (next > end)
379 next = end;
380 phys_pud_init(pud, __pa(start), __pa(next));
44df75e6
MT
381 if (!after_bootmem)
382 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
1da177e4
LT
383 unmap_low_page(map);
384 }
385
44df75e6
MT
386 if (!after_bootmem)
387 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
1da177e4 388 __flush_tlb_all();
1da177e4
LT
389}
390
f6c2e333 391void __cpuinit zap_low_mappings(int cpu)
1da177e4 392{
f6c2e333
SS
393 if (cpu == 0) {
394 pgd_t *pgd = pgd_offset_k(0UL);
395 pgd_clear(pgd);
396 } else {
397 /*
398 * For AP's, zap the low identity mappings by changing the cr3
399 * to init_level4_pgt and doing local flush tlb all
400 */
401 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
402 }
403 __flush_tlb_all();
1da177e4
LT
404}
405
2b97690f 406#ifndef CONFIG_NUMA
1da177e4
LT
407void __init paging_init(void)
408{
6391af17
MG
409 unsigned long max_zone_pfns[MAX_NR_ZONES];
410 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
411 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
412 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
413 max_zone_pfns[ZONE_NORMAL] = end_pfn;
414
44df75e6
MT
415 memory_present(0, 0, end_pfn);
416 sparse_init();
5cb248ab 417 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
418}
419#endif
420
421/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
422 from the CPU leading to inconsistent cache lines. address and size
423 must be aligned to 2MB boundaries.
424 Does nothing when the mapping doesn't exist. */
425void __init clear_kernel_mapping(unsigned long address, unsigned long size)
426{
427 unsigned long end = address + size;
428
429 BUG_ON(address & ~LARGE_PAGE_MASK);
430 BUG_ON(size & ~LARGE_PAGE_MASK);
431
432 for (; address < end; address += LARGE_PAGE_SIZE) {
433 pgd_t *pgd = pgd_offset_k(address);
434 pud_t *pud;
435 pmd_t *pmd;
436 if (pgd_none(*pgd))
437 continue;
438 pud = pud_offset(pgd, address);
439 if (pud_none(*pud))
440 continue;
441 pmd = pmd_offset(pud, address);
442 if (!pmd || pmd_none(*pmd))
443 continue;
444 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
445 /* Could handle this, but it should not happen currently. */
446 printk(KERN_ERR
447 "clear_kernel_mapping: mapping has been split. will leak memory\n");
448 pmd_ERROR(*pmd);
449 }
450 set_pmd(pmd, __pmd(0));
451 }
452 __flush_tlb_all();
453}
454
44df75e6
MT
455/*
456 * Memory hotplug specific functions
44df75e6 457 */
44df75e6
MT
458void online_page(struct page *page)
459{
460 ClearPageReserved(page);
7835e98b 461 init_page_count(page);
44df75e6
MT
462 __free_page(page);
463 totalram_pages++;
464 num_physpages++;
465}
466
bc02af93 467#ifdef CONFIG_MEMORY_HOTPLUG
9d99aaa3
AK
468/*
469 * Memory is added always to NORMAL zone. This means you will never get
470 * additional DMA/DMA32 memory.
471 */
bc02af93 472int arch_add_memory(int nid, u64 start, u64 size)
44df75e6 473{
bc02af93 474 struct pglist_data *pgdat = NODE_DATA(nid);
776ed98b 475 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
44df75e6
MT
476 unsigned long start_pfn = start >> PAGE_SHIFT;
477 unsigned long nr_pages = size >> PAGE_SHIFT;
478 int ret;
479
45e0b78b
KM
480 init_memory_mapping(start, (start + size -1));
481
44df75e6
MT
482 ret = __add_pages(zone, start_pfn, nr_pages);
483 if (ret)
484 goto error;
485
44df75e6
MT
486 return ret;
487error:
488 printk("%s: Problem encountered in __add_pages!\n", __func__);
489 return ret;
490}
bc02af93 491EXPORT_SYMBOL_GPL(arch_add_memory);
44df75e6
MT
492
493int remove_memory(u64 start, u64 size)
494{
495 return -EINVAL;
496}
497EXPORT_SYMBOL_GPL(remove_memory);
498
4942e998
KM
499#ifndef CONFIG_ACPI_NUMA
500int memory_add_physaddr_to_nid(u64 start)
501{
502 return 0;
503}
8c2676a5 504EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
4942e998
KM
505#endif
506
45e0b78b
KM
507#ifndef CONFIG_ACPI_NUMA
508int memory_add_physaddr_to_nid(u64 start)
509{
510 return 0;
511}
512#endif
513
514#endif /* CONFIG_MEMORY_HOTPLUG */
515
516#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
bc02af93
YG
517/*
518 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
519 * just online the pages.
520 */
521int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
522{
523 int err = -EIO;
524 unsigned long pfn;
525 unsigned long total = 0, mem = 0;
526 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
527 if (pfn_valid(pfn)) {
528 online_page(pfn_to_page(pfn));
529 err = 0;
530 mem++;
531 }
532 total++;
533 }
534 if (!err) {
535 z->spanned_pages += total;
536 z->present_pages += mem;
537 z->zone_pgdat->node_spanned_pages += total;
538 z->zone_pgdat->node_present_pages += mem;
539 }
540 return err;
541}
45e0b78b 542#endif
44df75e6 543
1da177e4
LT
544static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
545 kcore_vsyscall;
546
547void __init mem_init(void)
548{
0a43e4bf 549 long codesize, reservedpages, datasize, initsize;
1da177e4 550
0dc243ae 551 pci_iommu_alloc();
1da177e4 552
1da177e4
LT
553 /* clear the zero-page */
554 memset(empty_zero_page, 0, PAGE_SIZE);
555
556 reservedpages = 0;
557
558 /* this will put all low memory onto the freelists */
2b97690f 559#ifdef CONFIG_NUMA
0a43e4bf 560 totalram_pages = numa_free_all_bootmem();
1da177e4 561#else
0a43e4bf 562 totalram_pages = free_all_bootmem();
1da177e4 563#endif
5cb248ab
MG
564 reservedpages = end_pfn - totalram_pages -
565 absent_pages_in_range(0, end_pfn);
1da177e4
LT
566
567 after_bootmem = 1;
568
569 codesize = (unsigned long) &_etext - (unsigned long) &_text;
570 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
571 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
572
573 /* Register memory areas for /proc/kcore */
574 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
575 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
576 VMALLOC_END-VMALLOC_START);
577 kclist_add(&kcore_kernel, &_stext, _end - _stext);
578 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
579 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
580 VSYSCALL_END - VSYSCALL_START);
581
0a43e4bf 582 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
1da177e4
LT
583 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
584 end_pfn << (PAGE_SHIFT-10),
585 codesize >> 10,
586 reservedpages << (PAGE_SHIFT-10),
587 datasize >> 10,
588 initsize >> 10);
589
f6c2e333 590#ifdef CONFIG_SMP
1da177e4 591 /*
f6c2e333
SS
592 * Sync boot_level4_pgt mappings with the init_level4_pgt
593 * except for the low identity mappings which are already zapped
594 * in init_level4_pgt. This sync-up is essential for AP's bringup
1da177e4 595 */
f6c2e333 596 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
1da177e4
LT
597#endif
598}
599
d167a518 600void free_init_pages(char *what, unsigned long begin, unsigned long end)
1da177e4
LT
601{
602 unsigned long addr;
603
d167a518
GH
604 if (begin >= end)
605 return;
606
607 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
608 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1da177e4 609 ClearPageReserved(virt_to_page(addr));
7835e98b 610 init_page_count(virt_to_page(addr));
c9cf5528
RD
611 memset((void *)(addr & ~(PAGE_SIZE-1)),
612 POISON_FREE_INITMEM, PAGE_SIZE);
1da177e4
LT
613 free_page(addr);
614 totalram_pages++;
615 }
d167a518
GH
616}
617
618void free_initmem(void)
619{
c9cf5528
RD
620 memset(__initdata_begin, POISON_FREE_INITDATA,
621 __initdata_end - __initdata_begin);
d167a518
GH
622 free_init_pages("unused kernel memory",
623 (unsigned long)(&__init_begin),
624 (unsigned long)(&__init_end));
1da177e4
LT
625}
626
67df197b
AV
627#ifdef CONFIG_DEBUG_RODATA
628
67df197b
AV
629void mark_rodata_ro(void)
630{
a581c2a4 631 unsigned long addr = (unsigned long)__start_rodata;
67df197b 632
a581c2a4 633 for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
67df197b
AV
634 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
635
636 printk ("Write protecting the kernel read-only data: %luk\n",
a581c2a4 637 (__end_rodata - __start_rodata) >> 10);
67df197b
AV
638
639 /*
640 * change_page_attr_addr() requires a global_flush_tlb() call after it.
641 * We do this after the printk so that if something went wrong in the
642 * change, the printk gets out at least to give a better debug hint
643 * of who is the culprit.
644 */
645 global_flush_tlb();
646}
647#endif
648
1da177e4
LT
649#ifdef CONFIG_BLK_DEV_INITRD
650void free_initrd_mem(unsigned long start, unsigned long end)
651{
d167a518 652 free_init_pages("initrd memory", start, end);
1da177e4
LT
653}
654#endif
655
656void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
657{
2b97690f 658#ifdef CONFIG_NUMA
1da177e4 659 int nid = phys_to_nid(phys);
5e58a02a
AK
660#endif
661 unsigned long pfn = phys >> PAGE_SHIFT;
662 if (pfn >= end_pfn) {
663 /* This can happen with kdump kernels when accessing firmware
664 tables. */
665 if (pfn < end_pfn_map)
666 return;
667 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
668 phys, len);
669 return;
670 }
671
672 /* Should check here against the e820 map to avoid double free */
673#ifdef CONFIG_NUMA
1da177e4
LT
674 reserve_bootmem_node(NODE_DATA(nid), phys, len);
675#else
676 reserve_bootmem(phys, len);
677#endif
0e0b864e 678 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
e18c6874 679 dma_reserve += len / PAGE_SIZE;
0e0b864e
MG
680 set_dma_reserve(dma_reserve);
681 }
1da177e4
LT
682}
683
684int kern_addr_valid(unsigned long addr)
685{
686 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
687 pgd_t *pgd;
688 pud_t *pud;
689 pmd_t *pmd;
690 pte_t *pte;
691
692 if (above != 0 && above != -1UL)
693 return 0;
694
695 pgd = pgd_offset_k(addr);
696 if (pgd_none(*pgd))
697 return 0;
698
699 pud = pud_offset(pgd, addr);
700 if (pud_none(*pud))
701 return 0;
702
703 pmd = pmd_offset(pud, addr);
704 if (pmd_none(*pmd))
705 return 0;
706 if (pmd_large(*pmd))
707 return pfn_valid(pmd_pfn(*pmd));
708
709 pte = pte_offset_kernel(pmd, addr);
710 if (pte_none(*pte))
711 return 0;
712 return pfn_valid(pte_pfn(*pte));
713}
714
715#ifdef CONFIG_SYSCTL
716#include <linux/sysctl.h>
717
718extern int exception_trace, page_fault_trace;
719
720static ctl_table debug_table2[] = {
721 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
722 proc_dointvec },
1da177e4
LT
723 { 0, }
724};
725
726static ctl_table debug_root_table2[] = {
727 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
728 .child = debug_table2 },
729 { 0 },
730};
731
732static __init int x8664_sysctl_init(void)
733{
734 register_sysctl_table(debug_root_table2, 1);
735 return 0;
736}
737__initcall(x8664_sysctl_init);
738#endif
739
1e014410
AK
740/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
741 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
742 not need special handling anymore. */
1da177e4
LT
743
744static struct vm_area_struct gate_vma = {
745 .vm_start = VSYSCALL_START,
746 .vm_end = VSYSCALL_END,
747 .vm_page_prot = PAGE_READONLY
748};
749
1da177e4
LT
750struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
751{
752#ifdef CONFIG_IA32_EMULATION
1e014410
AK
753 if (test_tsk_thread_flag(tsk, TIF_IA32))
754 return NULL;
1da177e4
LT
755#endif
756 return &gate_vma;
757}
758
759int in_gate_area(struct task_struct *task, unsigned long addr)
760{
761 struct vm_area_struct *vma = get_gate_vma(task);
1e014410
AK
762 if (!vma)
763 return 0;
1da177e4
LT
764 return (addr >= vma->vm_start) && (addr < vma->vm_end);
765}
766
767/* Use this when you have no reliable task/vma, typically from interrupt
768 * context. It is less reliable than using the task's vma and may give
769 * false positives.
770 */
771int in_gate_area_no_task(unsigned long addr)
772{
1e014410 773 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
1da177e4 774}