2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
18 #include <linux/swap.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/initrd.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/memblock.h>
25 #include <linux/proc_fs.h>
26 #include <linux/pci.h>
27 #include <linux/pfn.h>
28 #include <linux/poison.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/module.h>
31 #include <linux/memory.h>
32 #include <linux/memory_hotplug.h>
33 #include <linux/nmi.h>
34 #include <linux/gfp.h>
35 #include <linux/kcore.h>
37 #include <asm/processor.h>
38 #include <asm/bios_ebda.h>
39 #include <asm/uaccess.h>
40 #include <asm/pgtable.h>
41 #include <asm/pgalloc.h>
43 #include <asm/fixmap.h>
47 #include <asm/mmu_context.h>
48 #include <asm/proto.h>
50 #include <asm/sections.h>
51 #include <asm/kdebug.h>
53 #include <asm/cacheflush.h>
55 #include <asm/uv/uv.h>
56 #include <asm/setup.h>
58 #include "mm_internal.h"
60 static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
61 unsigned long addr, unsigned long end)
64 for (; addr < end; addr += PMD_SIZE) {
65 pmd_t *pmd = pmd_page + pmd_index(addr);
67 if (!pmd_present(*pmd))
68 set_pmd(pmd, __pmd(addr | pmd_flag));
71 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
72 unsigned long addr, unsigned long end)
76 for (; addr < end; addr = next) {
77 pud_t *pud = pud_page + pud_index(addr);
80 next = (addr & PUD_MASK) + PUD_SIZE;
84 if (pud_present(*pud)) {
85 pmd = pmd_offset(pud, 0);
86 ident_pmd_init(info->pmd_flag, pmd, addr, next);
89 pmd = (pmd_t *)info->alloc_pgt_page(info->context);
92 ident_pmd_init(info->pmd_flag, pmd, addr, next);
93 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
99 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
100 unsigned long addr, unsigned long end)
104 int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
106 for (; addr < end; addr = next) {
107 pgd_t *pgd = pgd_page + pgd_index(addr) + off;
110 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
114 if (pgd_present(*pgd)) {
115 pud = pud_offset(pgd, 0);
116 result = ident_pud_init(info, pud, addr, next);
122 pud = (pud_t *)info->alloc_pgt_page(info->context);
125 result = ident_pud_init(info, pud, addr, next);
128 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
134 static int __init parse_direct_gbpages_off(char *arg)
139 early_param("nogbpages", parse_direct_gbpages_off);
141 static int __init parse_direct_gbpages_on(char *arg)
146 early_param("gbpages", parse_direct_gbpages_on);
149 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
150 * physical space so we can cache the place of the first one and move
151 * around without checking the pgd every time.
154 pteval_t __supported_pte_mask __read_mostly = ~0;
155 EXPORT_SYMBOL_GPL(__supported_pte_mask);
157 int force_personality32;
161 * Control non executable heap for 32bit processes.
162 * To control the stack too use noexec=off
164 * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
165 * off PROT_READ implies PROT_EXEC
167 static int __init nonx32_setup(char *str)
169 if (!strcmp(str, "on"))
170 force_personality32 &= ~READ_IMPLIES_EXEC;
171 else if (!strcmp(str, "off"))
172 force_personality32 |= READ_IMPLIES_EXEC;
175 __setup("noexec32=", nonx32_setup);
178 * When memory was added/removed make sure all the processes MM have
179 * suitable PGD entries in the local PGD level page.
181 void sync_global_pgds(unsigned long start, unsigned long end, int removed)
183 unsigned long address;
185 for (address = start; address <= end; address += PGDIR_SIZE) {
186 const pgd_t *pgd_ref = pgd_offset_k(address);
190 * When it is called after memory hot remove, pgd_none()
191 * returns true. In this case (removed == 1), we must clear
192 * the PGD entries in the local PGD level page.
194 if (pgd_none(*pgd_ref) && !removed)
197 spin_lock(&pgd_lock);
198 list_for_each_entry(page, &pgd_list, lru) {
200 spinlock_t *pgt_lock;
202 pgd = (pgd_t *)page_address(page) + pgd_index(address);
203 /* the pgt_lock only for Xen */
204 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
207 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
208 BUG_ON(pgd_page_vaddr(*pgd)
209 != pgd_page_vaddr(*pgd_ref));
212 if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
216 set_pgd(pgd, *pgd_ref);
219 spin_unlock(pgt_lock);
221 spin_unlock(&pgd_lock);
226 * NOTE: This function is marked __ref because it calls __init function
227 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
229 static __ref void *spp_getpage(void)
234 ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
236 ptr = alloc_bootmem_pages(PAGE_SIZE);
238 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
239 panic("set_pte_phys: cannot allocate page data %s\n",
240 after_bootmem ? "after bootmem" : "");
243 pr_debug("spp_getpage %p\n", ptr);
248 static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
250 if (pgd_none(*pgd)) {
251 pud_t *pud = (pud_t *)spp_getpage();
252 pgd_populate(&init_mm, pgd, pud);
253 if (pud != pud_offset(pgd, 0))
254 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
255 pud, pud_offset(pgd, 0));
257 return pud_offset(pgd, vaddr);
260 static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
262 if (pud_none(*pud)) {
263 pmd_t *pmd = (pmd_t *) spp_getpage();
264 pud_populate(&init_mm, pud, pmd);
265 if (pmd != pmd_offset(pud, 0))
266 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
267 pmd, pmd_offset(pud, 0));
269 return pmd_offset(pud, vaddr);
272 static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
274 if (pmd_none(*pmd)) {
275 pte_t *pte = (pte_t *) spp_getpage();
276 pmd_populate_kernel(&init_mm, pmd, pte);
277 if (pte != pte_offset_kernel(pmd, 0))
278 printk(KERN_ERR "PAGETABLE BUG #02!\n");
280 return pte_offset_kernel(pmd, vaddr);
283 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
289 pud = pud_page + pud_index(vaddr);
290 pmd = fill_pmd(pud, vaddr);
291 pte = fill_pte(pmd, vaddr);
293 set_pte(pte, new_pte);
296 * It's enough to flush this one mapping.
297 * (PGE mappings get flushed as well)
299 __flush_tlb_one(vaddr);
302 void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
307 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
309 pgd = pgd_offset_k(vaddr);
310 if (pgd_none(*pgd)) {
312 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
315 pud_page = (pud_t*)pgd_page_vaddr(*pgd);
316 set_pte_vaddr_pud(pud_page, vaddr, pteval);
319 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
324 pgd = pgd_offset_k(vaddr);
325 pud = fill_pud(pgd, vaddr);
326 return fill_pmd(pud, vaddr);
329 pte_t * __init populate_extra_pte(unsigned long vaddr)
333 pmd = populate_extra_pmd(vaddr);
334 return fill_pte(pmd, vaddr);
338 * Create large page table mappings for a range of physical addresses.
340 static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
347 BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
348 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
349 pgd = pgd_offset_k((unsigned long)__va(phys));
350 if (pgd_none(*pgd)) {
351 pud = (pud_t *) spp_getpage();
352 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
355 pud = pud_offset(pgd, (unsigned long)__va(phys));
356 if (pud_none(*pud)) {
357 pmd = (pmd_t *) spp_getpage();
358 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
361 pmd = pmd_offset(pud, phys);
362 BUG_ON(!pmd_none(*pmd));
363 set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
367 void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
369 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
372 void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
374 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
378 * The head.S code sets up the kernel high mapping:
380 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
382 * phys_base holds the negative offset to the kernel, which is added
383 * to the compile time generated pmds. This results in invalid pmds up
384 * to the point where we hit the physaddr 0 mapping.
386 * We limit the mappings to the region from _text to _brk_end. _brk_end
387 * is rounded up to the 2MB boundary. This catches the invalid pmds as
388 * well, as they are located before _text:
390 void __init cleanup_highmap(void)
392 unsigned long vaddr = __START_KERNEL_map;
393 unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
394 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
395 pmd_t *pmd = level2_kernel_pgt;
398 * Native path, max_pfn_mapped is not set yet.
399 * Xen has valid max_pfn_mapped set in
400 * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
403 vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
405 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
408 if (vaddr < (unsigned long) _text || vaddr > end)
409 set_pmd(pmd, __pmd(0));
413 static unsigned long __meminit
414 phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
417 unsigned long pages = 0, next;
418 unsigned long last_map_addr = end;
421 pte_t *pte = pte_page + pte_index(addr);
423 for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) {
424 next = (addr & PAGE_MASK) + PAGE_SIZE;
426 if (!after_bootmem &&
427 !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) &&
428 !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN))
429 set_pte(pte, __pte(0));
434 * We will re-use the existing mapping.
435 * Xen for example has some special requirements, like mapping
436 * pagetable pages as RO. So assume someone who pre-setup
437 * these mappings are more intelligent.
446 printk(" pte=%p addr=%lx pte=%016lx\n",
447 pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
449 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
450 last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
453 update_page_count(PG_LEVEL_4K, pages);
455 return last_map_addr;
458 static unsigned long __meminit
459 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
460 unsigned long page_size_mask, pgprot_t prot)
462 unsigned long pages = 0, next;
463 unsigned long last_map_addr = end;
465 int i = pmd_index(address);
467 for (; i < PTRS_PER_PMD; i++, address = next) {
468 pmd_t *pmd = pmd_page + pmd_index(address);
470 pgprot_t new_prot = prot;
472 next = (address & PMD_MASK) + PMD_SIZE;
473 if (address >= end) {
474 if (!after_bootmem &&
475 !e820_any_mapped(address & PMD_MASK, next, E820_RAM) &&
476 !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN))
477 set_pmd(pmd, __pmd(0));
482 if (!pmd_large(*pmd)) {
483 spin_lock(&init_mm.page_table_lock);
484 pte = (pte_t *)pmd_page_vaddr(*pmd);
485 last_map_addr = phys_pte_init(pte, address,
487 spin_unlock(&init_mm.page_table_lock);
491 * If we are ok with PG_LEVEL_2M mapping, then we will
492 * use the existing mapping,
494 * Otherwise, we will split the large page mapping but
495 * use the same existing protection bits except for
496 * large page, so that we don't violate Intel's TLB
497 * Application note (317080) which says, while changing
498 * the page sizes, new and old translations should
499 * not differ with respect to page frame and
502 if (page_size_mask & (1 << PG_LEVEL_2M)) {
505 last_map_addr = next;
508 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
511 if (page_size_mask & (1<<PG_LEVEL_2M)) {
513 spin_lock(&init_mm.page_table_lock);
514 set_pte((pte_t *)pmd,
515 pfn_pte((address & PMD_MASK) >> PAGE_SHIFT,
516 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
517 spin_unlock(&init_mm.page_table_lock);
518 last_map_addr = next;
522 pte = alloc_low_page();
523 last_map_addr = phys_pte_init(pte, address, end, new_prot);
525 spin_lock(&init_mm.page_table_lock);
526 pmd_populate_kernel(&init_mm, pmd, pte);
527 spin_unlock(&init_mm.page_table_lock);
529 update_page_count(PG_LEVEL_2M, pages);
530 return last_map_addr;
533 static unsigned long __meminit
534 phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
535 unsigned long page_size_mask)
537 unsigned long pages = 0, next;
538 unsigned long last_map_addr = end;
539 int i = pud_index(addr);
541 for (; i < PTRS_PER_PUD; i++, addr = next) {
542 pud_t *pud = pud_page + pud_index(addr);
544 pgprot_t prot = PAGE_KERNEL;
546 next = (addr & PUD_MASK) + PUD_SIZE;
548 if (!after_bootmem &&
549 !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) &&
550 !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN))
551 set_pud(pud, __pud(0));
556 if (!pud_large(*pud)) {
557 pmd = pmd_offset(pud, 0);
558 last_map_addr = phys_pmd_init(pmd, addr, end,
559 page_size_mask, prot);
564 * If we are ok with PG_LEVEL_1G mapping, then we will
565 * use the existing mapping.
567 * Otherwise, we will split the gbpage mapping but use
568 * the same existing protection bits except for large
569 * page, so that we don't violate Intel's TLB
570 * Application note (317080) which says, while changing
571 * the page sizes, new and old translations should
572 * not differ with respect to page frame and
575 if (page_size_mask & (1 << PG_LEVEL_1G)) {
578 last_map_addr = next;
581 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
584 if (page_size_mask & (1<<PG_LEVEL_1G)) {
586 spin_lock(&init_mm.page_table_lock);
587 set_pte((pte_t *)pud,
588 pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT,
590 spin_unlock(&init_mm.page_table_lock);
591 last_map_addr = next;
595 pmd = alloc_low_page();
596 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
599 spin_lock(&init_mm.page_table_lock);
600 pud_populate(&init_mm, pud, pmd);
601 spin_unlock(&init_mm.page_table_lock);
605 update_page_count(PG_LEVEL_1G, pages);
607 return last_map_addr;
610 unsigned long __meminit
611 kernel_physical_mapping_init(unsigned long start,
613 unsigned long page_size_mask)
615 bool pgd_changed = false;
616 unsigned long next, last_map_addr = end;
619 start = (unsigned long)__va(start);
620 end = (unsigned long)__va(end);
623 for (; start < end; start = next) {
624 pgd_t *pgd = pgd_offset_k(start);
627 next = (start & PGDIR_MASK) + PGDIR_SIZE;
630 pud = (pud_t *)pgd_page_vaddr(*pgd);
631 last_map_addr = phys_pud_init(pud, __pa(start),
632 __pa(end), page_size_mask);
636 pud = alloc_low_page();
637 last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
640 spin_lock(&init_mm.page_table_lock);
641 pgd_populate(&init_mm, pgd, pud);
642 spin_unlock(&init_mm.page_table_lock);
647 sync_global_pgds(addr, end - 1, 0);
651 return last_map_addr;
655 void __init initmem_init(void)
657 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
661 void __init paging_init(void)
663 sparse_memory_present_with_active_regions(MAX_NUMNODES);
667 * clear the default setting with node 0
668 * note: don't use nodes_clear here, that is really clearing when
669 * numa support is not compiled in, and later node_set_state
670 * will not set it back.
672 node_clear_state(0, N_MEMORY);
673 if (N_MEMORY != N_NORMAL_MEMORY)
674 node_clear_state(0, N_NORMAL_MEMORY);
680 * Memory hotplug specific functions
682 #ifdef CONFIG_MEMORY_HOTPLUG
684 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
687 static void update_end_of_memory_vars(u64 start, u64 size)
689 unsigned long end_pfn = PFN_UP(start + size);
691 if (end_pfn > max_pfn) {
693 max_low_pfn = end_pfn;
694 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
699 * Memory is added always to NORMAL zone. This means you will never get
700 * additional DMA/DMA32 memory.
702 int arch_add_memory(int nid, u64 start, u64 size)
704 struct pglist_data *pgdat = NODE_DATA(nid);
705 struct zone *zone = pgdat->node_zones +
706 zone_for_memory(nid, start, size, ZONE_NORMAL);
707 unsigned long start_pfn = start >> PAGE_SHIFT;
708 unsigned long nr_pages = size >> PAGE_SHIFT;
711 init_memory_mapping(start, start + size);
713 ret = __add_pages(nid, zone, start_pfn, nr_pages);
716 /* update max_pfn, max_low_pfn and high_memory */
717 update_end_of_memory_vars(start, size);
721 EXPORT_SYMBOL_GPL(arch_add_memory);
723 #define PAGE_INUSE 0xFD
725 static void __meminit free_pagetable(struct page *page, int order)
728 unsigned int nr_pages = 1 << order;
730 /* bootmem page has reserved flag */
731 if (PageReserved(page)) {
732 __ClearPageReserved(page);
734 magic = (unsigned long)page->lru.next;
735 if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
737 put_page_bootmem(page++);
740 free_reserved_page(page++);
742 free_pages((unsigned long)page_address(page), order);
745 static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
750 for (i = 0; i < PTRS_PER_PTE; i++) {
756 /* free a pte talbe */
757 free_pagetable(pmd_page(*pmd), 0);
758 spin_lock(&init_mm.page_table_lock);
760 spin_unlock(&init_mm.page_table_lock);
763 static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
768 for (i = 0; i < PTRS_PER_PMD; i++) {
774 /* free a pmd talbe */
775 free_pagetable(pud_page(*pud), 0);
776 spin_lock(&init_mm.page_table_lock);
778 spin_unlock(&init_mm.page_table_lock);
781 /* Return true if pgd is changed, otherwise return false. */
782 static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd)
787 for (i = 0; i < PTRS_PER_PUD; i++) {
793 /* free a pud table */
794 free_pagetable(pgd_page(*pgd), 0);
795 spin_lock(&init_mm.page_table_lock);
797 spin_unlock(&init_mm.page_table_lock);
802 static void __meminit
803 remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
806 unsigned long next, pages = 0;
809 phys_addr_t phys_addr;
811 pte = pte_start + pte_index(addr);
812 for (; addr < end; addr = next, pte++) {
813 next = (addr + PAGE_SIZE) & PAGE_MASK;
817 if (!pte_present(*pte))
821 * We mapped [0,1G) memory as identity mapping when
822 * initializing, in arch/x86/kernel/head_64.S. These
823 * pagetables cannot be removed.
825 phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
826 if (phys_addr < (phys_addr_t)0x40000000)
829 if (IS_ALIGNED(addr, PAGE_SIZE) &&
830 IS_ALIGNED(next, PAGE_SIZE)) {
832 * Do not free direct mapping pages since they were
833 * freed when offlining, or simplely not in use.
836 free_pagetable(pte_page(*pte), 0);
838 spin_lock(&init_mm.page_table_lock);
839 pte_clear(&init_mm, addr, pte);
840 spin_unlock(&init_mm.page_table_lock);
842 /* For non-direct mapping, pages means nothing. */
846 * If we are here, we are freeing vmemmap pages since
847 * direct mapped memory ranges to be freed are aligned.
849 * If we are not removing the whole page, it means
850 * other page structs in this page are being used and
851 * we canot remove them. So fill the unused page_structs
852 * with 0xFD, and remove the page when it is wholly
855 memset((void *)addr, PAGE_INUSE, next - addr);
857 page_addr = page_address(pte_page(*pte));
858 if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
859 free_pagetable(pte_page(*pte), 0);
861 spin_lock(&init_mm.page_table_lock);
862 pte_clear(&init_mm, addr, pte);
863 spin_unlock(&init_mm.page_table_lock);
868 /* Call free_pte_table() in remove_pmd_table(). */
871 update_page_count(PG_LEVEL_4K, -pages);
874 static void __meminit
875 remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
878 unsigned long next, pages = 0;
883 pmd = pmd_start + pmd_index(addr);
884 for (; addr < end; addr = next, pmd++) {
885 next = pmd_addr_end(addr, end);
887 if (!pmd_present(*pmd))
890 if (pmd_large(*pmd)) {
891 if (IS_ALIGNED(addr, PMD_SIZE) &&
892 IS_ALIGNED(next, PMD_SIZE)) {
894 free_pagetable(pmd_page(*pmd),
895 get_order(PMD_SIZE));
897 spin_lock(&init_mm.page_table_lock);
899 spin_unlock(&init_mm.page_table_lock);
902 /* If here, we are freeing vmemmap pages. */
903 memset((void *)addr, PAGE_INUSE, next - addr);
905 page_addr = page_address(pmd_page(*pmd));
906 if (!memchr_inv(page_addr, PAGE_INUSE,
908 free_pagetable(pmd_page(*pmd),
909 get_order(PMD_SIZE));
911 spin_lock(&init_mm.page_table_lock);
913 spin_unlock(&init_mm.page_table_lock);
920 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
921 remove_pte_table(pte_base, addr, next, direct);
922 free_pte_table(pte_base, pmd);
925 /* Call free_pmd_table() in remove_pud_table(). */
927 update_page_count(PG_LEVEL_2M, -pages);
930 static void __meminit
931 remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
934 unsigned long next, pages = 0;
939 pud = pud_start + pud_index(addr);
940 for (; addr < end; addr = next, pud++) {
941 next = pud_addr_end(addr, end);
943 if (!pud_present(*pud))
946 if (pud_large(*pud)) {
947 if (IS_ALIGNED(addr, PUD_SIZE) &&
948 IS_ALIGNED(next, PUD_SIZE)) {
950 free_pagetable(pud_page(*pud),
951 get_order(PUD_SIZE));
953 spin_lock(&init_mm.page_table_lock);
955 spin_unlock(&init_mm.page_table_lock);
958 /* If here, we are freeing vmemmap pages. */
959 memset((void *)addr, PAGE_INUSE, next - addr);
961 page_addr = page_address(pud_page(*pud));
962 if (!memchr_inv(page_addr, PAGE_INUSE,
964 free_pagetable(pud_page(*pud),
965 get_order(PUD_SIZE));
967 spin_lock(&init_mm.page_table_lock);
969 spin_unlock(&init_mm.page_table_lock);
976 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
977 remove_pmd_table(pmd_base, addr, next, direct);
978 free_pmd_table(pmd_base, pud);
982 update_page_count(PG_LEVEL_1G, -pages);
985 /* start and end are both virtual address. */
986 static void __meminit
987 remove_pagetable(unsigned long start, unsigned long end, bool direct)
993 bool pgd_changed = false;
995 for (addr = start; addr < end; addr = next) {
996 next = pgd_addr_end(addr, end);
998 pgd = pgd_offset_k(addr);
999 if (!pgd_present(*pgd))
1002 pud = (pud_t *)pgd_page_vaddr(*pgd);
1003 remove_pud_table(pud, addr, next, direct);
1004 if (free_pud_table(pud, pgd))
1009 sync_global_pgds(start, end - 1, 1);
1014 void __ref vmemmap_free(unsigned long start, unsigned long end)
1016 remove_pagetable(start, end, false);
1019 #ifdef CONFIG_MEMORY_HOTREMOVE
1020 static void __meminit
1021 kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1023 start = (unsigned long)__va(start);
1024 end = (unsigned long)__va(end);
1026 remove_pagetable(start, end, true);
1029 int __ref arch_remove_memory(u64 start, u64 size)
1031 unsigned long start_pfn = start >> PAGE_SHIFT;
1032 unsigned long nr_pages = size >> PAGE_SHIFT;
1036 zone = page_zone(pfn_to_page(start_pfn));
1037 kernel_physical_mapping_remove(start, start + size);
1038 ret = __remove_pages(zone, start_pfn, nr_pages);
1044 #endif /* CONFIG_MEMORY_HOTPLUG */
1046 static struct kcore_list kcore_vsyscall;
1048 static void __init register_page_bootmem_info(void)
1053 for_each_online_node(i)
1054 register_page_bootmem_info_node(NODE_DATA(i));
1058 void __init mem_init(void)
1062 /* clear_bss() already clear the empty_zero_page */
1064 register_page_bootmem_info();
1066 /* this will put all memory onto the freelists */
1070 /* Register memory areas for /proc/kcore */
1071 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
1072 PAGE_SIZE, KCORE_OTHER);
1074 mem_init_print_info(NULL);
1077 #ifdef CONFIG_DEBUG_RODATA
1078 const int rodata_test_data = 0xC3;
1079 EXPORT_SYMBOL_GPL(rodata_test_data);
1081 int kernel_set_to_readonly;
1083 void set_kernel_text_rw(void)
1085 unsigned long start = PFN_ALIGN(_text);
1086 unsigned long end = PFN_ALIGN(__stop___ex_table);
1088 if (!kernel_set_to_readonly)
1091 pr_debug("Set kernel text: %lx - %lx for read write\n",
1095 * Make the kernel identity mapping for text RW. Kernel text
1096 * mapping will always be RO. Refer to the comment in
1097 * static_protections() in pageattr.c
1099 set_memory_rw(start, (end - start) >> PAGE_SHIFT);
1102 void set_kernel_text_ro(void)
1104 unsigned long start = PFN_ALIGN(_text);
1105 unsigned long end = PFN_ALIGN(__stop___ex_table);
1107 if (!kernel_set_to_readonly)
1110 pr_debug("Set kernel text: %lx - %lx for read only\n",
1114 * Set the kernel identity mapping for text RO.
1116 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1119 void mark_rodata_ro(void)
1121 unsigned long start = PFN_ALIGN(_text);
1122 unsigned long rodata_start = PFN_ALIGN(__start_rodata);
1123 unsigned long end = (unsigned long) &__end_rodata_hpage_align;
1124 unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
1125 unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
1126 unsigned long all_end = PFN_ALIGN(&_end);
1128 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1129 (end - start) >> 10);
1130 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1132 kernel_set_to_readonly = 1;
1135 * The rodata/data/bss/brk section (but not the kernel text!)
1136 * should also be not-executable.
1138 set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
1142 #ifdef CONFIG_CPA_DEBUG
1143 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
1144 set_memory_rw(start, (end-start) >> PAGE_SHIFT);
1146 printk(KERN_INFO "Testing CPA: again\n");
1147 set_memory_ro(start, (end-start) >> PAGE_SHIFT);
1150 free_init_pages("unused kernel",
1151 (unsigned long) __va(__pa_symbol(text_end)),
1152 (unsigned long) __va(__pa_symbol(rodata_start)));
1153 free_init_pages("unused kernel",
1154 (unsigned long) __va(__pa_symbol(rodata_end)),
1155 (unsigned long) __va(__pa_symbol(_sdata)));
1160 int kern_addr_valid(unsigned long addr)
1162 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
1168 if (above != 0 && above != -1UL)
1171 pgd = pgd_offset_k(addr);
1175 pud = pud_offset(pgd, addr);
1179 if (pud_large(*pud))
1180 return pfn_valid(pud_pfn(*pud));
1182 pmd = pmd_offset(pud, addr);
1186 if (pmd_large(*pmd))
1187 return pfn_valid(pmd_pfn(*pmd));
1189 pte = pte_offset_kernel(pmd, addr);
1193 return pfn_valid(pte_pfn(*pte));
1197 * A pseudo VMA to allow ptrace access for the vsyscall page. This only
1198 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
1199 * not need special handling anymore:
1201 static const char *gate_vma_name(struct vm_area_struct *vma)
1203 return "[vsyscall]";
1205 static struct vm_operations_struct gate_vma_ops = {
1206 .name = gate_vma_name,
1208 static struct vm_area_struct gate_vma = {
1209 .vm_start = VSYSCALL_ADDR,
1210 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
1211 .vm_page_prot = PAGE_READONLY_EXEC,
1212 .vm_flags = VM_READ | VM_EXEC,
1213 .vm_ops = &gate_vma_ops,
1216 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
1218 #ifdef CONFIG_IA32_EMULATION
1219 if (!mm || mm->context.ia32_compat)
1225 int in_gate_area(struct mm_struct *mm, unsigned long addr)
1227 struct vm_area_struct *vma = get_gate_vma(mm);
1232 return (addr >= vma->vm_start) && (addr < vma->vm_end);
1236 * Use this when you have no reliable mm, typically from interrupt
1237 * context. It is less reliable than using a task's mm and may give
1240 int in_gate_area_no_mm(unsigned long addr)
1242 return (addr & PAGE_MASK) == VSYSCALL_ADDR;
1245 static unsigned long probe_memory_block_size(void)
1248 unsigned long bz = 1UL<<31;
1250 #ifdef CONFIG_X86_UV
1251 if (is_uv_system()) {
1252 printk(KERN_INFO "UV: memory block size 2GB\n");
1253 return 2UL * 1024 * 1024 * 1024;
1257 /* less than 64g installed */
1258 if ((max_pfn << PAGE_SHIFT) < (16UL << 32))
1259 return MIN_MEMORY_BLOCK_SIZE;
1261 /* get the tail size */
1262 while (bz > MIN_MEMORY_BLOCK_SIZE) {
1263 if (!((max_pfn << PAGE_SHIFT) & (bz - 1)))
1268 printk(KERN_DEBUG "memory block size : %ldMB\n", bz >> 20);
1273 static unsigned long memory_block_size_probed;
1274 unsigned long memory_block_size_bytes(void)
1276 if (!memory_block_size_probed)
1277 memory_block_size_probed = probe_memory_block_size();
1279 return memory_block_size_probed;
1282 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1284 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1286 static long __meminitdata addr_start, addr_end;
1287 static void __meminitdata *p_start, *p_end;
1288 static int __meminitdata node_start;
1290 static int __meminit vmemmap_populate_hugepages(unsigned long start,
1291 unsigned long end, int node)
1299 for (addr = start; addr < end; addr = next) {
1300 next = pmd_addr_end(addr, end);
1302 pgd = vmemmap_pgd_populate(addr, node);
1306 pud = vmemmap_pud_populate(pgd, addr, node);
1310 pmd = pmd_offset(pud, addr);
1311 if (pmd_none(*pmd)) {
1314 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
1318 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1320 set_pmd(pmd, __pmd(pte_val(entry)));
1322 /* check to see if we have contiguous blocks */
1323 if (p_end != p || node_start != node) {
1325 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1326 addr_start, addr_end-1, p_start, p_end-1, node_start);
1332 addr_end = addr + PMD_SIZE;
1333 p_end = p + PMD_SIZE;
1336 } else if (pmd_large(*pmd)) {
1337 vmemmap_verify((pte_t *)pmd, node, addr, next);
1340 pr_warn_once("vmemmap: falling back to regular page backing\n");
1341 if (vmemmap_populate_basepages(addr, next, node))
1347 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
1352 err = vmemmap_populate_hugepages(start, end, node);
1354 err = vmemmap_populate_basepages(start, end, node);
1356 sync_global_pgds(start, end - 1, 0);
1360 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
1361 void register_page_bootmem_memmap(unsigned long section_nr,
1362 struct page *start_page, unsigned long size)
1364 unsigned long addr = (unsigned long)start_page;
1365 unsigned long end = (unsigned long)(start_page + size);
1370 unsigned int nr_pages;
1373 for (; addr < end; addr = next) {
1376 pgd = pgd_offset_k(addr);
1377 if (pgd_none(*pgd)) {
1378 next = (addr + PAGE_SIZE) & PAGE_MASK;
1381 get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);
1383 pud = pud_offset(pgd, addr);
1384 if (pud_none(*pud)) {
1385 next = (addr + PAGE_SIZE) & PAGE_MASK;
1388 get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
1391 next = (addr + PAGE_SIZE) & PAGE_MASK;
1392 pmd = pmd_offset(pud, addr);
1395 get_page_bootmem(section_nr, pmd_page(*pmd),
1398 pte = pte_offset_kernel(pmd, addr);
1401 get_page_bootmem(section_nr, pte_page(*pte),
1404 next = pmd_addr_end(addr, end);
1406 pmd = pmd_offset(pud, addr);
1410 nr_pages = 1 << (get_order(PMD_SIZE));
1411 page = pmd_page(*pmd);
1413 get_page_bootmem(section_nr, page++,
1420 void __meminit vmemmap_populate_print_last(void)
1423 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1424 addr_start, addr_end-1, p_start, p_end-1, node_start);