Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
a2531293 | 5 | * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz> |
1da177e4 LT |
6 | * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/signal.h> |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/smp.h> | |
20 | #include <linux/init.h> | |
11034d55 | 21 | #include <linux/initrd.h> |
1da177e4 LT |
22 | #include <linux/pagemap.h> |
23 | #include <linux/bootmem.h> | |
a9ce6bc1 | 24 | #include <linux/memblock.h> |
1da177e4 | 25 | #include <linux/proc_fs.h> |
59170891 | 26 | #include <linux/pci.h> |
6fb14755 | 27 | #include <linux/pfn.h> |
c9cf5528 | 28 | #include <linux/poison.h> |
17a941d8 | 29 | #include <linux/dma-mapping.h> |
a63fdc51 | 30 | #include <linux/memory.h> |
44df75e6 | 31 | #include <linux/memory_hotplug.h> |
4b94ffdc | 32 | #include <linux/memremap.h> |
ae32b129 | 33 | #include <linux/nmi.h> |
5a0e3ad6 | 34 | #include <linux/gfp.h> |
2f96b8c1 | 35 | #include <linux/kcore.h> |
1da177e4 LT |
36 | |
37 | #include <asm/processor.h> | |
46eaa670 | 38 | #include <asm/bios_ebda.h> |
7c0f6ba6 | 39 | #include <linux/uaccess.h> |
1da177e4 LT |
40 | #include <asm/pgtable.h> |
41 | #include <asm/pgalloc.h> | |
42 | #include <asm/dma.h> | |
43 | #include <asm/fixmap.h> | |
66441bd3 | 44 | #include <asm/e820/api.h> |
1da177e4 LT |
45 | #include <asm/apic.h> |
46 | #include <asm/tlb.h> | |
47 | #include <asm/mmu_context.h> | |
48 | #include <asm/proto.h> | |
49 | #include <asm/smp.h> | |
2bc0414e | 50 | #include <asm/sections.h> |
718fc13b | 51 | #include <asm/kdebug.h> |
aaa64e04 | 52 | #include <asm/numa.h> |
7bfeab9a | 53 | #include <asm/cacheflush.h> |
4fcb2083 | 54 | #include <asm/init.h> |
43c75f93 | 55 | #include <asm/uv/uv.h> |
e5f15b45 | 56 | #include <asm/setup.h> |
1da177e4 | 57 | |
5c51bdbe YL |
58 | #include "mm_internal.h" |
59 | ||
cf4fb15b | 60 | #include "ident_map.c" |
aece2785 | 61 | |
1da177e4 LT |
62 | /* |
63 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the | |
64 | * physical space so we can cache the place of the first one and move | |
65 | * around without checking the pgd every time. | |
66 | */ | |
67 | ||
f955371c | 68 | pteval_t __supported_pte_mask __read_mostly = ~0; |
bd220a24 YL |
69 | EXPORT_SYMBOL_GPL(__supported_pte_mask); |
70 | ||
bd220a24 YL |
71 | int force_personality32; |
72 | ||
deed05b7 IM |
73 | /* |
74 | * noexec32=on|off | |
75 | * Control non executable heap for 32bit processes. | |
76 | * To control the stack too use noexec=off | |
77 | * | |
78 | * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default) | |
79 | * off PROT_READ implies PROT_EXEC | |
80 | */ | |
bd220a24 YL |
81 | static int __init nonx32_setup(char *str) |
82 | { | |
83 | if (!strcmp(str, "on")) | |
84 | force_personality32 &= ~READ_IMPLIES_EXEC; | |
85 | else if (!strcmp(str, "off")) | |
86 | force_personality32 |= READ_IMPLIES_EXEC; | |
87 | return 1; | |
88 | } | |
89 | __setup("noexec32=", nonx32_setup); | |
90 | ||
6afb5157 | 91 | /* |
5372e155 | 92 | * When memory was added make sure all the processes MM have |
6afb5157 HL |
93 | * suitable PGD entries in the local PGD level page. |
94 | */ | |
5372e155 | 95 | void sync_global_pgds(unsigned long start, unsigned long end) |
6afb5157 | 96 | { |
44235dcd JF |
97 | unsigned long address; |
98 | ||
99 | for (address = start; address <= end; address += PGDIR_SIZE) { | |
100 | const pgd_t *pgd_ref = pgd_offset_k(address); | |
44235dcd JF |
101 | struct page *page; |
102 | ||
5372e155 | 103 | if (pgd_none(*pgd_ref)) |
44235dcd JF |
104 | continue; |
105 | ||
a79e53d8 | 106 | spin_lock(&pgd_lock); |
44235dcd | 107 | list_for_each_entry(page, &pgd_list, lru) { |
be354f40 | 108 | pgd_t *pgd; |
617d34d9 JF |
109 | spinlock_t *pgt_lock; |
110 | ||
44235dcd | 111 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
a79e53d8 | 112 | /* the pgt_lock only for Xen */ |
617d34d9 JF |
113 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
114 | spin_lock(pgt_lock); | |
115 | ||
9661d5bc | 116 | if (!pgd_none(*pgd_ref) && !pgd_none(*pgd)) |
44235dcd JF |
117 | BUG_ON(pgd_page_vaddr(*pgd) |
118 | != pgd_page_vaddr(*pgd_ref)); | |
617d34d9 | 119 | |
5372e155 KS |
120 | if (pgd_none(*pgd)) |
121 | set_pgd(pgd, *pgd_ref); | |
9661d5bc | 122 | |
617d34d9 | 123 | spin_unlock(pgt_lock); |
44235dcd | 124 | } |
a79e53d8 | 125 | spin_unlock(&pgd_lock); |
44235dcd | 126 | } |
6afb5157 HL |
127 | } |
128 | ||
8d6ea967 MS |
129 | /* |
130 | * NOTE: This function is marked __ref because it calls __init function | |
131 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | |
132 | */ | |
133 | static __ref void *spp_getpage(void) | |
14a62c34 | 134 | { |
1da177e4 | 135 | void *ptr; |
14a62c34 | 136 | |
1da177e4 | 137 | if (after_bootmem) |
9e730237 | 138 | ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); |
1da177e4 LT |
139 | else |
140 | ptr = alloc_bootmem_pages(PAGE_SIZE); | |
14a62c34 TG |
141 | |
142 | if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { | |
143 | panic("set_pte_phys: cannot allocate page data %s\n", | |
144 | after_bootmem ? "after bootmem" : ""); | |
145 | } | |
1da177e4 | 146 | |
10f22dde | 147 | pr_debug("spp_getpage %p\n", ptr); |
14a62c34 | 148 | |
1da177e4 | 149 | return ptr; |
14a62c34 | 150 | } |
1da177e4 | 151 | |
f254f390 | 152 | static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr) |
1da177e4 | 153 | { |
458a3e64 TH |
154 | if (pgd_none(*pgd)) { |
155 | pud_t *pud = (pud_t *)spp_getpage(); | |
156 | pgd_populate(&init_mm, pgd, pud); | |
157 | if (pud != pud_offset(pgd, 0)) | |
158 | printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", | |
159 | pud, pud_offset(pgd, 0)); | |
160 | } | |
161 | return pud_offset(pgd, vaddr); | |
162 | } | |
1da177e4 | 163 | |
f254f390 | 164 | static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) |
458a3e64 | 165 | { |
1da177e4 | 166 | if (pud_none(*pud)) { |
458a3e64 | 167 | pmd_t *pmd = (pmd_t *) spp_getpage(); |
bb23e403 | 168 | pud_populate(&init_mm, pud, pmd); |
458a3e64 | 169 | if (pmd != pmd_offset(pud, 0)) |
10f22dde | 170 | printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", |
458a3e64 | 171 | pmd, pmd_offset(pud, 0)); |
1da177e4 | 172 | } |
458a3e64 TH |
173 | return pmd_offset(pud, vaddr); |
174 | } | |
175 | ||
f254f390 | 176 | static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) |
458a3e64 | 177 | { |
1da177e4 | 178 | if (pmd_none(*pmd)) { |
458a3e64 | 179 | pte_t *pte = (pte_t *) spp_getpage(); |
bb23e403 | 180 | pmd_populate_kernel(&init_mm, pmd, pte); |
458a3e64 | 181 | if (pte != pte_offset_kernel(pmd, 0)) |
10f22dde | 182 | printk(KERN_ERR "PAGETABLE BUG #02!\n"); |
1da177e4 | 183 | } |
458a3e64 TH |
184 | return pte_offset_kernel(pmd, vaddr); |
185 | } | |
186 | ||
187 | void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) | |
188 | { | |
189 | pud_t *pud; | |
190 | pmd_t *pmd; | |
191 | pte_t *pte; | |
192 | ||
193 | pud = pud_page + pud_index(vaddr); | |
194 | pmd = fill_pmd(pud, vaddr); | |
195 | pte = fill_pte(pmd, vaddr); | |
1da177e4 | 196 | |
1da177e4 LT |
197 | set_pte(pte, new_pte); |
198 | ||
199 | /* | |
200 | * It's enough to flush this one mapping. | |
201 | * (PGE mappings get flushed as well) | |
202 | */ | |
203 | __flush_tlb_one(vaddr); | |
204 | } | |
205 | ||
458a3e64 | 206 | void set_pte_vaddr(unsigned long vaddr, pte_t pteval) |
0814e0ba EH |
207 | { |
208 | pgd_t *pgd; | |
209 | pud_t *pud_page; | |
210 | ||
211 | pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); | |
212 | ||
213 | pgd = pgd_offset_k(vaddr); | |
214 | if (pgd_none(*pgd)) { | |
215 | printk(KERN_ERR | |
216 | "PGD FIXMAP MISSING, it should be setup in head.S!\n"); | |
217 | return; | |
218 | } | |
219 | pud_page = (pud_t*)pgd_page_vaddr(*pgd); | |
220 | set_pte_vaddr_pud(pud_page, vaddr, pteval); | |
221 | } | |
222 | ||
458a3e64 | 223 | pmd_t * __init populate_extra_pmd(unsigned long vaddr) |
11124411 TH |
224 | { |
225 | pgd_t *pgd; | |
226 | pud_t *pud; | |
227 | ||
228 | pgd = pgd_offset_k(vaddr); | |
458a3e64 TH |
229 | pud = fill_pud(pgd, vaddr); |
230 | return fill_pmd(pud, vaddr); | |
231 | } | |
232 | ||
233 | pte_t * __init populate_extra_pte(unsigned long vaddr) | |
234 | { | |
235 | pmd_t *pmd; | |
11124411 | 236 | |
458a3e64 TH |
237 | pmd = populate_extra_pmd(vaddr); |
238 | return fill_pte(pmd, vaddr); | |
11124411 TH |
239 | } |
240 | ||
3a9e189d JS |
241 | /* |
242 | * Create large page table mappings for a range of physical addresses. | |
243 | */ | |
244 | static void __init __init_extra_mapping(unsigned long phys, unsigned long size, | |
2df58b6d | 245 | enum page_cache_mode cache) |
3a9e189d JS |
246 | { |
247 | pgd_t *pgd; | |
248 | pud_t *pud; | |
249 | pmd_t *pmd; | |
2df58b6d | 250 | pgprot_t prot; |
3a9e189d | 251 | |
2df58b6d JG |
252 | pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) | |
253 | pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache))); | |
3a9e189d JS |
254 | BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); |
255 | for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { | |
256 | pgd = pgd_offset_k((unsigned long)__va(phys)); | |
257 | if (pgd_none(*pgd)) { | |
258 | pud = (pud_t *) spp_getpage(); | |
259 | set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | | |
260 | _PAGE_USER)); | |
261 | } | |
262 | pud = pud_offset(pgd, (unsigned long)__va(phys)); | |
263 | if (pud_none(*pud)) { | |
264 | pmd = (pmd_t *) spp_getpage(); | |
265 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | | |
266 | _PAGE_USER)); | |
267 | } | |
268 | pmd = pmd_offset(pud, phys); | |
269 | BUG_ON(!pmd_none(*pmd)); | |
270 | set_pmd(pmd, __pmd(phys | pgprot_val(prot))); | |
271 | } | |
272 | } | |
273 | ||
274 | void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) | |
275 | { | |
2df58b6d | 276 | __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB); |
3a9e189d JS |
277 | } |
278 | ||
279 | void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) | |
280 | { | |
2df58b6d | 281 | __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC); |
3a9e189d JS |
282 | } |
283 | ||
31eedd82 | 284 | /* |
88f3aec7 IM |
285 | * The head.S code sets up the kernel high mapping: |
286 | * | |
287 | * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) | |
31eedd82 | 288 | * |
1e3b3081 | 289 | * phys_base holds the negative offset to the kernel, which is added |
31eedd82 TG |
290 | * to the compile time generated pmds. This results in invalid pmds up |
291 | * to the point where we hit the physaddr 0 mapping. | |
292 | * | |
e5f15b45 YL |
293 | * We limit the mappings to the region from _text to _brk_end. _brk_end |
294 | * is rounded up to the 2MB boundary. This catches the invalid pmds as | |
31eedd82 TG |
295 | * well, as they are located before _text: |
296 | */ | |
297 | void __init cleanup_highmap(void) | |
298 | { | |
299 | unsigned long vaddr = __START_KERNEL_map; | |
10054230 | 300 | unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE; |
e5f15b45 | 301 | unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; |
31eedd82 | 302 | pmd_t *pmd = level2_kernel_pgt; |
31eedd82 | 303 | |
10054230 YL |
304 | /* |
305 | * Native path, max_pfn_mapped is not set yet. | |
306 | * Xen has valid max_pfn_mapped set in | |
307 | * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable(). | |
308 | */ | |
309 | if (max_pfn_mapped) | |
310 | vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); | |
311 | ||
e5f15b45 | 312 | for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { |
2884f110 | 313 | if (pmd_none(*pmd)) |
31eedd82 TG |
314 | continue; |
315 | if (vaddr < (unsigned long) _text || vaddr > end) | |
316 | set_pmd(pmd, __pmd(0)); | |
317 | } | |
318 | } | |
319 | ||
59b3d020 TG |
320 | /* |
321 | * Create PTE level page table mapping for physical addresses. | |
322 | * It returns the last physical address mapped. | |
323 | */ | |
7b16eb89 | 324 | static unsigned long __meminit |
59b3d020 | 325 | phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end, |
b27a43c1 | 326 | pgprot_t prot) |
4f9c11dd | 327 | { |
59b3d020 TG |
328 | unsigned long pages = 0, paddr_next; |
329 | unsigned long paddr_last = paddr_end; | |
330 | pte_t *pte; | |
4f9c11dd | 331 | int i; |
7b16eb89 | 332 | |
59b3d020 TG |
333 | pte = pte_page + pte_index(paddr); |
334 | i = pte_index(paddr); | |
4f9c11dd | 335 | |
59b3d020 TG |
336 | for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) { |
337 | paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE; | |
338 | if (paddr >= paddr_end) { | |
eceb3632 | 339 | if (!after_bootmem && |
3bce64f0 | 340 | !e820__mapped_any(paddr & PAGE_MASK, paddr_next, |
59b3d020 | 341 | E820_RAM) && |
3bce64f0 | 342 | !e820__mapped_any(paddr & PAGE_MASK, paddr_next, |
59b3d020 | 343 | E820_RESERVED_KERN)) |
eceb3632 YL |
344 | set_pte(pte, __pte(0)); |
345 | continue; | |
4f9c11dd JF |
346 | } |
347 | ||
b27a43c1 SS |
348 | /* |
349 | * We will re-use the existing mapping. | |
350 | * Xen for example has some special requirements, like mapping | |
351 | * pagetable pages as RO. So assume someone who pre-setup | |
352 | * these mappings are more intelligent. | |
353 | */ | |
dcb32d99 | 354 | if (!pte_none(*pte)) { |
876ee61a JB |
355 | if (!after_bootmem) |
356 | pages++; | |
4f9c11dd | 357 | continue; |
3afa3949 | 358 | } |
4f9c11dd JF |
359 | |
360 | if (0) | |
59b3d020 TG |
361 | pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr, |
362 | pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte); | |
4f9c11dd | 363 | pages++; |
59b3d020 TG |
364 | set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot)); |
365 | paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE; | |
4f9c11dd | 366 | } |
a2699e47 | 367 | |
4f9c11dd | 368 | update_page_count(PG_LEVEL_4K, pages); |
7b16eb89 | 369 | |
59b3d020 | 370 | return paddr_last; |
4f9c11dd JF |
371 | } |
372 | ||
59b3d020 TG |
373 | /* |
374 | * Create PMD level page table mapping for physical addresses. The virtual | |
375 | * and physical address have to be aligned at this level. | |
376 | * It returns the last physical address mapped. | |
377 | */ | |
cc615032 | 378 | static unsigned long __meminit |
59b3d020 | 379 | phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, |
b27a43c1 | 380 | unsigned long page_size_mask, pgprot_t prot) |
44df75e6 | 381 | { |
59b3d020 TG |
382 | unsigned long pages = 0, paddr_next; |
383 | unsigned long paddr_last = paddr_end; | |
ce0c0e50 | 384 | |
59b3d020 | 385 | int i = pmd_index(paddr); |
44df75e6 | 386 | |
59b3d020 TG |
387 | for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) { |
388 | pmd_t *pmd = pmd_page + pmd_index(paddr); | |
4f9c11dd | 389 | pte_t *pte; |
b27a43c1 | 390 | pgprot_t new_prot = prot; |
44df75e6 | 391 | |
59b3d020 TG |
392 | paddr_next = (paddr & PMD_MASK) + PMD_SIZE; |
393 | if (paddr >= paddr_end) { | |
eceb3632 | 394 | if (!after_bootmem && |
3bce64f0 | 395 | !e820__mapped_any(paddr & PMD_MASK, paddr_next, |
59b3d020 | 396 | E820_RAM) && |
3bce64f0 | 397 | !e820__mapped_any(paddr & PMD_MASK, paddr_next, |
59b3d020 | 398 | E820_RESERVED_KERN)) |
eceb3632 YL |
399 | set_pmd(pmd, __pmd(0)); |
400 | continue; | |
44df75e6 | 401 | } |
6ad91658 | 402 | |
dcb32d99 | 403 | if (!pmd_none(*pmd)) { |
8ae3a5a8 JB |
404 | if (!pmd_large(*pmd)) { |
405 | spin_lock(&init_mm.page_table_lock); | |
973dc4f3 | 406 | pte = (pte_t *)pmd_page_vaddr(*pmd); |
59b3d020 TG |
407 | paddr_last = phys_pte_init(pte, paddr, |
408 | paddr_end, prot); | |
8ae3a5a8 | 409 | spin_unlock(&init_mm.page_table_lock); |
a2699e47 | 410 | continue; |
8ae3a5a8 | 411 | } |
b27a43c1 SS |
412 | /* |
413 | * If we are ok with PG_LEVEL_2M mapping, then we will | |
414 | * use the existing mapping, | |
415 | * | |
416 | * Otherwise, we will split the large page mapping but | |
417 | * use the same existing protection bits except for | |
418 | * large page, so that we don't violate Intel's TLB | |
419 | * Application note (317080) which says, while changing | |
420 | * the page sizes, new and old translations should | |
421 | * not differ with respect to page frame and | |
422 | * attributes. | |
423 | */ | |
3afa3949 | 424 | if (page_size_mask & (1 << PG_LEVEL_2M)) { |
876ee61a JB |
425 | if (!after_bootmem) |
426 | pages++; | |
59b3d020 | 427 | paddr_last = paddr_next; |
b27a43c1 | 428 | continue; |
3afa3949 | 429 | } |
b27a43c1 | 430 | new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); |
4f9c11dd JF |
431 | } |
432 | ||
b50efd2a | 433 | if (page_size_mask & (1<<PG_LEVEL_2M)) { |
4f9c11dd | 434 | pages++; |
8ae3a5a8 | 435 | spin_lock(&init_mm.page_table_lock); |
4f9c11dd | 436 | set_pte((pte_t *)pmd, |
59b3d020 | 437 | pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT, |
b27a43c1 | 438 | __pgprot(pgprot_val(prot) | _PAGE_PSE))); |
8ae3a5a8 | 439 | spin_unlock(&init_mm.page_table_lock); |
59b3d020 | 440 | paddr_last = paddr_next; |
6ad91658 | 441 | continue; |
4f9c11dd | 442 | } |
6ad91658 | 443 | |
868bf4d6 | 444 | pte = alloc_low_page(); |
59b3d020 | 445 | paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot); |
4f9c11dd | 446 | |
8ae3a5a8 | 447 | spin_lock(&init_mm.page_table_lock); |
868bf4d6 | 448 | pmd_populate_kernel(&init_mm, pmd, pte); |
8ae3a5a8 | 449 | spin_unlock(&init_mm.page_table_lock); |
44df75e6 | 450 | } |
ce0c0e50 | 451 | update_page_count(PG_LEVEL_2M, pages); |
59b3d020 | 452 | return paddr_last; |
44df75e6 MT |
453 | } |
454 | ||
59b3d020 TG |
455 | /* |
456 | * Create PUD level page table mapping for physical addresses. The virtual | |
faa37933 TG |
457 | * and physical address do not have to be aligned at this level. KASLR can |
458 | * randomize virtual addresses up to this level. | |
59b3d020 TG |
459 | * It returns the last physical address mapped. |
460 | */ | |
cc615032 | 461 | static unsigned long __meminit |
59b3d020 TG |
462 | phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, |
463 | unsigned long page_size_mask) | |
14a62c34 | 464 | { |
59b3d020 TG |
465 | unsigned long pages = 0, paddr_next; |
466 | unsigned long paddr_last = paddr_end; | |
faa37933 TG |
467 | unsigned long vaddr = (unsigned long)__va(paddr); |
468 | int i = pud_index(vaddr); | |
44df75e6 | 469 | |
59b3d020 | 470 | for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) { |
faa37933 | 471 | pud_t *pud; |
1da177e4 | 472 | pmd_t *pmd; |
b27a43c1 | 473 | pgprot_t prot = PAGE_KERNEL; |
1da177e4 | 474 | |
faa37933 TG |
475 | vaddr = (unsigned long)__va(paddr); |
476 | pud = pud_page + pud_index(vaddr); | |
59b3d020 | 477 | paddr_next = (paddr & PUD_MASK) + PUD_SIZE; |
faa37933 | 478 | |
59b3d020 | 479 | if (paddr >= paddr_end) { |
eceb3632 | 480 | if (!after_bootmem && |
3bce64f0 | 481 | !e820__mapped_any(paddr & PUD_MASK, paddr_next, |
59b3d020 | 482 | E820_RAM) && |
3bce64f0 | 483 | !e820__mapped_any(paddr & PUD_MASK, paddr_next, |
59b3d020 | 484 | E820_RESERVED_KERN)) |
eceb3632 | 485 | set_pud(pud, __pud(0)); |
1da177e4 | 486 | continue; |
14a62c34 | 487 | } |
1da177e4 | 488 | |
dcb32d99 | 489 | if (!pud_none(*pud)) { |
a2699e47 | 490 | if (!pud_large(*pud)) { |
973dc4f3 | 491 | pmd = pmd_offset(pud, 0); |
59b3d020 TG |
492 | paddr_last = phys_pmd_init(pmd, paddr, |
493 | paddr_end, | |
494 | page_size_mask, | |
495 | prot); | |
4b239f45 | 496 | __flush_tlb_all(); |
a2699e47 SS |
497 | continue; |
498 | } | |
b27a43c1 SS |
499 | /* |
500 | * If we are ok with PG_LEVEL_1G mapping, then we will | |
501 | * use the existing mapping. | |
502 | * | |
503 | * Otherwise, we will split the gbpage mapping but use | |
504 | * the same existing protection bits except for large | |
505 | * page, so that we don't violate Intel's TLB | |
506 | * Application note (317080) which says, while changing | |
507 | * the page sizes, new and old translations should | |
508 | * not differ with respect to page frame and | |
509 | * attributes. | |
510 | */ | |
3afa3949 | 511 | if (page_size_mask & (1 << PG_LEVEL_1G)) { |
876ee61a JB |
512 | if (!after_bootmem) |
513 | pages++; | |
59b3d020 | 514 | paddr_last = paddr_next; |
b27a43c1 | 515 | continue; |
3afa3949 | 516 | } |
b27a43c1 | 517 | prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); |
ef925766 AK |
518 | } |
519 | ||
b50efd2a | 520 | if (page_size_mask & (1<<PG_LEVEL_1G)) { |
ce0c0e50 | 521 | pages++; |
8ae3a5a8 | 522 | spin_lock(&init_mm.page_table_lock); |
ef925766 | 523 | set_pte((pte_t *)pud, |
59b3d020 | 524 | pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT, |
960ddb4f | 525 | PAGE_KERNEL_LARGE)); |
8ae3a5a8 | 526 | spin_unlock(&init_mm.page_table_lock); |
59b3d020 | 527 | paddr_last = paddr_next; |
6ad91658 KM |
528 | continue; |
529 | } | |
530 | ||
868bf4d6 | 531 | pmd = alloc_low_page(); |
59b3d020 TG |
532 | paddr_last = phys_pmd_init(pmd, paddr, paddr_end, |
533 | page_size_mask, prot); | |
8ae3a5a8 JB |
534 | |
535 | spin_lock(&init_mm.page_table_lock); | |
868bf4d6 | 536 | pud_populate(&init_mm, pud, pmd); |
44df75e6 | 537 | spin_unlock(&init_mm.page_table_lock); |
1da177e4 | 538 | } |
1a2b4412 | 539 | __flush_tlb_all(); |
a2699e47 | 540 | |
ce0c0e50 | 541 | update_page_count(PG_LEVEL_1G, pages); |
cc615032 | 542 | |
59b3d020 | 543 | return paddr_last; |
14a62c34 | 544 | } |
1da177e4 | 545 | |
59b3d020 TG |
546 | /* |
547 | * Create page table mapping for the physical memory for specific physical | |
faa37933 | 548 | * addresses. The virtual and physical addresses have to be aligned on PMD level |
59b3d020 TG |
549 | * down. It returns the last physical address mapped. |
550 | */ | |
41d840e2 | 551 | unsigned long __meminit |
59b3d020 TG |
552 | kernel_physical_mapping_init(unsigned long paddr_start, |
553 | unsigned long paddr_end, | |
f765090a | 554 | unsigned long page_size_mask) |
14a62c34 | 555 | { |
9b861528 | 556 | bool pgd_changed = false; |
59b3d020 | 557 | unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last; |
1da177e4 | 558 | |
59b3d020 TG |
559 | paddr_last = paddr_end; |
560 | vaddr = (unsigned long)__va(paddr_start); | |
561 | vaddr_end = (unsigned long)__va(paddr_end); | |
562 | vaddr_start = vaddr; | |
1da177e4 | 563 | |
59b3d020 TG |
564 | for (; vaddr < vaddr_end; vaddr = vaddr_next) { |
565 | pgd_t *pgd = pgd_offset_k(vaddr); | |
44df75e6 MT |
566 | pud_t *pud; |
567 | ||
59b3d020 | 568 | vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE; |
4f9c11dd JF |
569 | |
570 | if (pgd_val(*pgd)) { | |
973dc4f3 | 571 | pud = (pud_t *)pgd_page_vaddr(*pgd); |
59b3d020 TG |
572 | paddr_last = phys_pud_init(pud, __pa(vaddr), |
573 | __pa(vaddr_end), | |
574 | page_size_mask); | |
4f9c11dd JF |
575 | continue; |
576 | } | |
577 | ||
868bf4d6 | 578 | pud = alloc_low_page(); |
59b3d020 TG |
579 | paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end), |
580 | page_size_mask); | |
8ae3a5a8 JB |
581 | |
582 | spin_lock(&init_mm.page_table_lock); | |
868bf4d6 | 583 | pgd_populate(&init_mm, pgd, pud); |
8ae3a5a8 | 584 | spin_unlock(&init_mm.page_table_lock); |
9b861528 | 585 | pgd_changed = true; |
14a62c34 | 586 | } |
9b861528 HL |
587 | |
588 | if (pgd_changed) | |
5372e155 | 589 | sync_global_pgds(vaddr_start, vaddr_end - 1); |
9b861528 | 590 | |
a2699e47 | 591 | __flush_tlb_all(); |
1da177e4 | 592 | |
59b3d020 | 593 | return paddr_last; |
b50efd2a | 594 | } |
7b16eb89 | 595 | |
2b97690f | 596 | #ifndef CONFIG_NUMA |
d8fc3afc | 597 | void __init initmem_init(void) |
1f75d7e3 | 598 | { |
e7e8de59 | 599 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); |
1f75d7e3 | 600 | } |
3551f88f | 601 | #endif |
1f75d7e3 | 602 | |
1da177e4 LT |
603 | void __init paging_init(void) |
604 | { | |
3551f88f | 605 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
44df75e6 | 606 | sparse_init(); |
44b57280 YL |
607 | |
608 | /* | |
609 | * clear the default setting with node 0 | |
610 | * note: don't use nodes_clear here, that is really clearing when | |
611 | * numa support is not compiled in, and later node_set_state | |
612 | * will not set it back. | |
613 | */ | |
4b0ef1fe LJ |
614 | node_clear_state(0, N_MEMORY); |
615 | if (N_MEMORY != N_NORMAL_MEMORY) | |
616 | node_clear_state(0, N_NORMAL_MEMORY); | |
44b57280 | 617 | |
4c0b2e5f | 618 | zone_sizes_init(); |
1da177e4 | 619 | } |
1da177e4 | 620 | |
44df75e6 MT |
621 | /* |
622 | * Memory hotplug specific functions | |
44df75e6 | 623 | */ |
bc02af93 | 624 | #ifdef CONFIG_MEMORY_HOTPLUG |
ea085417 SZ |
625 | /* |
626 | * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need | |
627 | * updating. | |
628 | */ | |
629 | static void update_end_of_memory_vars(u64 start, u64 size) | |
630 | { | |
631 | unsigned long end_pfn = PFN_UP(start + size); | |
632 | ||
633 | if (end_pfn > max_pfn) { | |
634 | max_pfn = end_pfn; | |
635 | max_low_pfn = end_pfn; | |
636 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; | |
637 | } | |
638 | } | |
639 | ||
9d99aaa3 AK |
640 | /* |
641 | * Memory is added always to NORMAL zone. This means you will never get | |
642 | * additional DMA/DMA32 memory. | |
643 | */ | |
033fbae9 | 644 | int arch_add_memory(int nid, u64 start, u64 size, bool for_device) |
44df75e6 | 645 | { |
bc02af93 | 646 | struct pglist_data *pgdat = NODE_DATA(nid); |
9bfc4113 | 647 | struct zone *zone = pgdat->node_zones + |
033fbae9 | 648 | zone_for_memory(nid, start, size, ZONE_NORMAL, for_device); |
66520ebc | 649 | unsigned long start_pfn = start >> PAGE_SHIFT; |
44df75e6 MT |
650 | unsigned long nr_pages = size >> PAGE_SHIFT; |
651 | int ret; | |
652 | ||
66520ebc | 653 | init_memory_mapping(start, start + size); |
45e0b78b | 654 | |
c04fc586 | 655 | ret = __add_pages(nid, zone, start_pfn, nr_pages); |
fe8b868e | 656 | WARN_ON_ONCE(ret); |
44df75e6 | 657 | |
ea085417 SZ |
658 | /* update max_pfn, max_low_pfn and high_memory */ |
659 | update_end_of_memory_vars(start, size); | |
660 | ||
44df75e6 | 661 | return ret; |
44df75e6 | 662 | } |
bc02af93 | 663 | EXPORT_SYMBOL_GPL(arch_add_memory); |
44df75e6 | 664 | |
ae9aae9e WC |
665 | #define PAGE_INUSE 0xFD |
666 | ||
667 | static void __meminit free_pagetable(struct page *page, int order) | |
668 | { | |
ae9aae9e WC |
669 | unsigned long magic; |
670 | unsigned int nr_pages = 1 << order; | |
4b94ffdc DW |
671 | struct vmem_altmap *altmap = to_vmem_altmap((unsigned long) page); |
672 | ||
673 | if (altmap) { | |
674 | vmem_altmap_free(altmap, nr_pages); | |
675 | return; | |
676 | } | |
ae9aae9e WC |
677 | |
678 | /* bootmem page has reserved flag */ | |
679 | if (PageReserved(page)) { | |
680 | __ClearPageReserved(page); | |
ae9aae9e WC |
681 | |
682 | magic = (unsigned long)page->lru.next; | |
683 | if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { | |
684 | while (nr_pages--) | |
685 | put_page_bootmem(page++); | |
686 | } else | |
170a5a7e JL |
687 | while (nr_pages--) |
688 | free_reserved_page(page++); | |
ae9aae9e WC |
689 | } else |
690 | free_pages((unsigned long)page_address(page), order); | |
ae9aae9e WC |
691 | } |
692 | ||
693 | static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) | |
694 | { | |
695 | pte_t *pte; | |
696 | int i; | |
697 | ||
698 | for (i = 0; i < PTRS_PER_PTE; i++) { | |
699 | pte = pte_start + i; | |
dcb32d99 | 700 | if (!pte_none(*pte)) |
ae9aae9e WC |
701 | return; |
702 | } | |
703 | ||
704 | /* free a pte talbe */ | |
705 | free_pagetable(pmd_page(*pmd), 0); | |
706 | spin_lock(&init_mm.page_table_lock); | |
707 | pmd_clear(pmd); | |
708 | spin_unlock(&init_mm.page_table_lock); | |
709 | } | |
710 | ||
711 | static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) | |
712 | { | |
713 | pmd_t *pmd; | |
714 | int i; | |
715 | ||
716 | for (i = 0; i < PTRS_PER_PMD; i++) { | |
717 | pmd = pmd_start + i; | |
dcb32d99 | 718 | if (!pmd_none(*pmd)) |
ae9aae9e WC |
719 | return; |
720 | } | |
721 | ||
722 | /* free a pmd talbe */ | |
723 | free_pagetable(pud_page(*pud), 0); | |
724 | spin_lock(&init_mm.page_table_lock); | |
725 | pud_clear(pud); | |
726 | spin_unlock(&init_mm.page_table_lock); | |
727 | } | |
728 | ||
ae9aae9e WC |
729 | static void __meminit |
730 | remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, | |
731 | bool direct) | |
732 | { | |
733 | unsigned long next, pages = 0; | |
734 | pte_t *pte; | |
735 | void *page_addr; | |
736 | phys_addr_t phys_addr; | |
737 | ||
738 | pte = pte_start + pte_index(addr); | |
739 | for (; addr < end; addr = next, pte++) { | |
740 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
741 | if (next > end) | |
742 | next = end; | |
743 | ||
744 | if (!pte_present(*pte)) | |
745 | continue; | |
746 | ||
747 | /* | |
748 | * We mapped [0,1G) memory as identity mapping when | |
749 | * initializing, in arch/x86/kernel/head_64.S. These | |
750 | * pagetables cannot be removed. | |
751 | */ | |
752 | phys_addr = pte_val(*pte) + (addr & PAGE_MASK); | |
753 | if (phys_addr < (phys_addr_t)0x40000000) | |
754 | return; | |
755 | ||
b500f77b | 756 | if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) { |
ae9aae9e WC |
757 | /* |
758 | * Do not free direct mapping pages since they were | |
759 | * freed when offlining, or simplely not in use. | |
760 | */ | |
761 | if (!direct) | |
762 | free_pagetable(pte_page(*pte), 0); | |
763 | ||
764 | spin_lock(&init_mm.page_table_lock); | |
765 | pte_clear(&init_mm, addr, pte); | |
766 | spin_unlock(&init_mm.page_table_lock); | |
767 | ||
768 | /* For non-direct mapping, pages means nothing. */ | |
769 | pages++; | |
770 | } else { | |
771 | /* | |
772 | * If we are here, we are freeing vmemmap pages since | |
773 | * direct mapped memory ranges to be freed are aligned. | |
774 | * | |
775 | * If we are not removing the whole page, it means | |
776 | * other page structs in this page are being used and | |
777 | * we canot remove them. So fill the unused page_structs | |
778 | * with 0xFD, and remove the page when it is wholly | |
779 | * filled with 0xFD. | |
780 | */ | |
781 | memset((void *)addr, PAGE_INUSE, next - addr); | |
782 | ||
783 | page_addr = page_address(pte_page(*pte)); | |
784 | if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { | |
785 | free_pagetable(pte_page(*pte), 0); | |
786 | ||
787 | spin_lock(&init_mm.page_table_lock); | |
788 | pte_clear(&init_mm, addr, pte); | |
789 | spin_unlock(&init_mm.page_table_lock); | |
790 | } | |
791 | } | |
792 | } | |
793 | ||
794 | /* Call free_pte_table() in remove_pmd_table(). */ | |
795 | flush_tlb_all(); | |
796 | if (direct) | |
797 | update_page_count(PG_LEVEL_4K, -pages); | |
798 | } | |
799 | ||
800 | static void __meminit | |
801 | remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, | |
802 | bool direct) | |
803 | { | |
804 | unsigned long next, pages = 0; | |
805 | pte_t *pte_base; | |
806 | pmd_t *pmd; | |
807 | void *page_addr; | |
808 | ||
809 | pmd = pmd_start + pmd_index(addr); | |
810 | for (; addr < end; addr = next, pmd++) { | |
811 | next = pmd_addr_end(addr, end); | |
812 | ||
813 | if (!pmd_present(*pmd)) | |
814 | continue; | |
815 | ||
816 | if (pmd_large(*pmd)) { | |
817 | if (IS_ALIGNED(addr, PMD_SIZE) && | |
818 | IS_ALIGNED(next, PMD_SIZE)) { | |
819 | if (!direct) | |
820 | free_pagetable(pmd_page(*pmd), | |
821 | get_order(PMD_SIZE)); | |
822 | ||
823 | spin_lock(&init_mm.page_table_lock); | |
824 | pmd_clear(pmd); | |
825 | spin_unlock(&init_mm.page_table_lock); | |
826 | pages++; | |
827 | } else { | |
828 | /* If here, we are freeing vmemmap pages. */ | |
829 | memset((void *)addr, PAGE_INUSE, next - addr); | |
830 | ||
831 | page_addr = page_address(pmd_page(*pmd)); | |
832 | if (!memchr_inv(page_addr, PAGE_INUSE, | |
833 | PMD_SIZE)) { | |
834 | free_pagetable(pmd_page(*pmd), | |
835 | get_order(PMD_SIZE)); | |
836 | ||
837 | spin_lock(&init_mm.page_table_lock); | |
838 | pmd_clear(pmd); | |
839 | spin_unlock(&init_mm.page_table_lock); | |
840 | } | |
841 | } | |
842 | ||
843 | continue; | |
844 | } | |
845 | ||
846 | pte_base = (pte_t *)pmd_page_vaddr(*pmd); | |
847 | remove_pte_table(pte_base, addr, next, direct); | |
848 | free_pte_table(pte_base, pmd); | |
849 | } | |
850 | ||
851 | /* Call free_pmd_table() in remove_pud_table(). */ | |
852 | if (direct) | |
853 | update_page_count(PG_LEVEL_2M, -pages); | |
854 | } | |
855 | ||
856 | static void __meminit | |
857 | remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, | |
858 | bool direct) | |
859 | { | |
860 | unsigned long next, pages = 0; | |
861 | pmd_t *pmd_base; | |
862 | pud_t *pud; | |
863 | void *page_addr; | |
864 | ||
865 | pud = pud_start + pud_index(addr); | |
866 | for (; addr < end; addr = next, pud++) { | |
867 | next = pud_addr_end(addr, end); | |
868 | ||
869 | if (!pud_present(*pud)) | |
870 | continue; | |
871 | ||
872 | if (pud_large(*pud)) { | |
873 | if (IS_ALIGNED(addr, PUD_SIZE) && | |
874 | IS_ALIGNED(next, PUD_SIZE)) { | |
875 | if (!direct) | |
876 | free_pagetable(pud_page(*pud), | |
877 | get_order(PUD_SIZE)); | |
878 | ||
879 | spin_lock(&init_mm.page_table_lock); | |
880 | pud_clear(pud); | |
881 | spin_unlock(&init_mm.page_table_lock); | |
882 | pages++; | |
883 | } else { | |
884 | /* If here, we are freeing vmemmap pages. */ | |
885 | memset((void *)addr, PAGE_INUSE, next - addr); | |
886 | ||
887 | page_addr = page_address(pud_page(*pud)); | |
888 | if (!memchr_inv(page_addr, PAGE_INUSE, | |
889 | PUD_SIZE)) { | |
890 | free_pagetable(pud_page(*pud), | |
891 | get_order(PUD_SIZE)); | |
892 | ||
893 | spin_lock(&init_mm.page_table_lock); | |
894 | pud_clear(pud); | |
895 | spin_unlock(&init_mm.page_table_lock); | |
896 | } | |
897 | } | |
898 | ||
899 | continue; | |
900 | } | |
901 | ||
902 | pmd_base = (pmd_t *)pud_page_vaddr(*pud); | |
903 | remove_pmd_table(pmd_base, addr, next, direct); | |
904 | free_pmd_table(pmd_base, pud); | |
905 | } | |
906 | ||
907 | if (direct) | |
908 | update_page_count(PG_LEVEL_1G, -pages); | |
909 | } | |
910 | ||
911 | /* start and end are both virtual address. */ | |
912 | static void __meminit | |
913 | remove_pagetable(unsigned long start, unsigned long end, bool direct) | |
914 | { | |
915 | unsigned long next; | |
5255e0a7 | 916 | unsigned long addr; |
ae9aae9e WC |
917 | pgd_t *pgd; |
918 | pud_t *pud; | |
ae9aae9e | 919 | |
5255e0a7 YI |
920 | for (addr = start; addr < end; addr = next) { |
921 | next = pgd_addr_end(addr, end); | |
ae9aae9e | 922 | |
5255e0a7 | 923 | pgd = pgd_offset_k(addr); |
ae9aae9e WC |
924 | if (!pgd_present(*pgd)) |
925 | continue; | |
926 | ||
927 | pud = (pud_t *)pgd_page_vaddr(*pgd); | |
5255e0a7 | 928 | remove_pud_table(pud, addr, next, direct); |
ae9aae9e WC |
929 | } |
930 | ||
ae9aae9e WC |
931 | flush_tlb_all(); |
932 | } | |
933 | ||
0aad818b | 934 | void __ref vmemmap_free(unsigned long start, unsigned long end) |
0197518c | 935 | { |
0197518c TC |
936 | remove_pagetable(start, end, false); |
937 | } | |
938 | ||
587ff8c4 | 939 | #ifdef CONFIG_MEMORY_HOTREMOVE |
bbcab878 TC |
940 | static void __meminit |
941 | kernel_physical_mapping_remove(unsigned long start, unsigned long end) | |
942 | { | |
943 | start = (unsigned long)__va(start); | |
944 | end = (unsigned long)__va(end); | |
945 | ||
946 | remove_pagetable(start, end, true); | |
947 | } | |
948 | ||
24d335ca WC |
949 | int __ref arch_remove_memory(u64 start, u64 size) |
950 | { | |
951 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
952 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
4b94ffdc DW |
953 | struct page *page = pfn_to_page(start_pfn); |
954 | struct vmem_altmap *altmap; | |
24d335ca WC |
955 | struct zone *zone; |
956 | int ret; | |
957 | ||
4b94ffdc DW |
958 | /* With altmap the first mapped page is offset from @start */ |
959 | altmap = to_vmem_altmap((unsigned long) page); | |
960 | if (altmap) | |
961 | page += vmem_altmap_offset(altmap); | |
962 | zone = page_zone(page); | |
24d335ca WC |
963 | ret = __remove_pages(zone, start_pfn, nr_pages); |
964 | WARN_ON_ONCE(ret); | |
4b94ffdc | 965 | kernel_physical_mapping_remove(start, start + size); |
24d335ca WC |
966 | |
967 | return ret; | |
968 | } | |
969 | #endif | |
45e0b78b KM |
970 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
971 | ||
81ac3ad9 | 972 | static struct kcore_list kcore_vsyscall; |
1da177e4 | 973 | |
94b43c3d YL |
974 | static void __init register_page_bootmem_info(void) |
975 | { | |
976 | #ifdef CONFIG_NUMA | |
977 | int i; | |
978 | ||
979 | for_each_online_node(i) | |
980 | register_page_bootmem_info_node(NODE_DATA(i)); | |
981 | #endif | |
982 | } | |
983 | ||
1da177e4 LT |
984 | void __init mem_init(void) |
985 | { | |
0dc243ae | 986 | pci_iommu_alloc(); |
1da177e4 | 987 | |
48ddb154 | 988 | /* clear_bss() already clear the empty_zero_page */ |
1da177e4 | 989 | |
94b43c3d | 990 | register_page_bootmem_info(); |
bced0e32 JL |
991 | |
992 | /* this will put all memory onto the freelists */ | |
0c988534 | 993 | free_all_bootmem(); |
1da177e4 LT |
994 | after_bootmem = 1; |
995 | ||
1da177e4 | 996 | /* Register memory areas for /proc/kcore */ |
f40c3300 AL |
997 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, |
998 | PAGE_SIZE, KCORE_OTHER); | |
1da177e4 | 999 | |
46a84132 | 1000 | mem_init_print_info(NULL); |
1da177e4 LT |
1001 | } |
1002 | ||
edeed305 AV |
1003 | const int rodata_test_data = 0xC3; |
1004 | EXPORT_SYMBOL_GPL(rodata_test_data); | |
67df197b | 1005 | |
502f6604 | 1006 | int kernel_set_to_readonly; |
16239630 SR |
1007 | |
1008 | void set_kernel_text_rw(void) | |
1009 | { | |
b9af7c0d | 1010 | unsigned long start = PFN_ALIGN(_text); |
e7d23dde | 1011 | unsigned long end = PFN_ALIGN(__stop___ex_table); |
16239630 SR |
1012 | |
1013 | if (!kernel_set_to_readonly) | |
1014 | return; | |
1015 | ||
1016 | pr_debug("Set kernel text: %lx - %lx for read write\n", | |
1017 | start, end); | |
1018 | ||
e7d23dde SS |
1019 | /* |
1020 | * Make the kernel identity mapping for text RW. Kernel text | |
1021 | * mapping will always be RO. Refer to the comment in | |
1022 | * static_protections() in pageattr.c | |
1023 | */ | |
16239630 SR |
1024 | set_memory_rw(start, (end - start) >> PAGE_SHIFT); |
1025 | } | |
1026 | ||
1027 | void set_kernel_text_ro(void) | |
1028 | { | |
b9af7c0d | 1029 | unsigned long start = PFN_ALIGN(_text); |
e7d23dde | 1030 | unsigned long end = PFN_ALIGN(__stop___ex_table); |
16239630 SR |
1031 | |
1032 | if (!kernel_set_to_readonly) | |
1033 | return; | |
1034 | ||
1035 | pr_debug("Set kernel text: %lx - %lx for read only\n", | |
1036 | start, end); | |
1037 | ||
e7d23dde SS |
1038 | /* |
1039 | * Set the kernel identity mapping for text RO. | |
1040 | */ | |
16239630 SR |
1041 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
1042 | } | |
1043 | ||
67df197b AV |
1044 | void mark_rodata_ro(void) |
1045 | { | |
74e08179 | 1046 | unsigned long start = PFN_ALIGN(_text); |
fc8d7826 | 1047 | unsigned long rodata_start = PFN_ALIGN(__start_rodata); |
74e08179 | 1048 | unsigned long end = (unsigned long) &__end_rodata_hpage_align; |
fc8d7826 AD |
1049 | unsigned long text_end = PFN_ALIGN(&__stop___ex_table); |
1050 | unsigned long rodata_end = PFN_ALIGN(&__end_rodata); | |
45e2a9d4 | 1051 | unsigned long all_end; |
8f0f996e | 1052 | |
6fb14755 | 1053 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
e3ebadd9 | 1054 | (end - start) >> 10); |
984bb80d AV |
1055 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
1056 | ||
16239630 SR |
1057 | kernel_set_to_readonly = 1; |
1058 | ||
984bb80d | 1059 | /* |
72212675 YL |
1060 | * The rodata/data/bss/brk section (but not the kernel text!) |
1061 | * should also be not-executable. | |
45e2a9d4 KC |
1062 | * |
1063 | * We align all_end to PMD_SIZE because the existing mapping | |
1064 | * is a full PMD. If we would align _brk_end to PAGE_SIZE we | |
1065 | * split the PMD and the reminder between _brk_end and the end | |
1066 | * of the PMD will remain mapped executable. | |
1067 | * | |
1068 | * Any PMD which was setup after the one which covers _brk_end | |
1069 | * has been zapped already via cleanup_highmem(). | |
984bb80d | 1070 | */ |
45e2a9d4 | 1071 | all_end = roundup((unsigned long)_brk_end, PMD_SIZE); |
ab76f7b4 | 1072 | set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); |
67df197b | 1073 | |
1a487252 AV |
1074 | rodata_test(); |
1075 | ||
0c42f392 | 1076 | #ifdef CONFIG_CPA_DEBUG |
10f22dde | 1077 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); |
6d238cc4 | 1078 | set_memory_rw(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 1079 | |
10f22dde | 1080 | printk(KERN_INFO "Testing CPA: again\n"); |
6d238cc4 | 1081 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 1082 | #endif |
74e08179 | 1083 | |
c88442ec | 1084 | free_init_pages("unused kernel", |
fc8d7826 AD |
1085 | (unsigned long) __va(__pa_symbol(text_end)), |
1086 | (unsigned long) __va(__pa_symbol(rodata_start))); | |
c88442ec | 1087 | free_init_pages("unused kernel", |
fc8d7826 AD |
1088 | (unsigned long) __va(__pa_symbol(rodata_end)), |
1089 | (unsigned long) __va(__pa_symbol(_sdata))); | |
e1a58320 SS |
1090 | |
1091 | debug_checkwx(); | |
67df197b | 1092 | } |
4e4eee0e | 1093 | |
14a62c34 TG |
1094 | int kern_addr_valid(unsigned long addr) |
1095 | { | |
1da177e4 | 1096 | unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; |
14a62c34 TG |
1097 | pgd_t *pgd; |
1098 | pud_t *pud; | |
1099 | pmd_t *pmd; | |
1100 | pte_t *pte; | |
1da177e4 LT |
1101 | |
1102 | if (above != 0 && above != -1UL) | |
14a62c34 TG |
1103 | return 0; |
1104 | ||
1da177e4 LT |
1105 | pgd = pgd_offset_k(addr); |
1106 | if (pgd_none(*pgd)) | |
1107 | return 0; | |
1108 | ||
1109 | pud = pud_offset(pgd, addr); | |
1110 | if (pud_none(*pud)) | |
14a62c34 | 1111 | return 0; |
1da177e4 | 1112 | |
0ee364eb MG |
1113 | if (pud_large(*pud)) |
1114 | return pfn_valid(pud_pfn(*pud)); | |
1115 | ||
1da177e4 LT |
1116 | pmd = pmd_offset(pud, addr); |
1117 | if (pmd_none(*pmd)) | |
1118 | return 0; | |
14a62c34 | 1119 | |
1da177e4 LT |
1120 | if (pmd_large(*pmd)) |
1121 | return pfn_valid(pmd_pfn(*pmd)); | |
1122 | ||
1123 | pte = pte_offset_kernel(pmd, addr); | |
1124 | if (pte_none(*pte)) | |
1125 | return 0; | |
14a62c34 | 1126 | |
1da177e4 LT |
1127 | return pfn_valid(pte_pfn(*pte)); |
1128 | } | |
1129 | ||
982792c7 | 1130 | static unsigned long probe_memory_block_size(void) |
1dc41aa6 | 1131 | { |
43c75f93 | 1132 | unsigned long bz = MIN_MEMORY_BLOCK_SIZE; |
982792c7 | 1133 | |
43c75f93 SJ |
1134 | /* if system is UV or has 64GB of RAM or more, use large blocks */ |
1135 | if (is_uv_system() || ((max_pfn << PAGE_SHIFT) >= (64UL << 30))) | |
1136 | bz = 2UL << 30; /* 2GB */ | |
982792c7 | 1137 | |
43c75f93 | 1138 | pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20); |
982792c7 YL |
1139 | |
1140 | return bz; | |
1141 | } | |
1142 | ||
1143 | static unsigned long memory_block_size_probed; | |
1144 | unsigned long memory_block_size_bytes(void) | |
1145 | { | |
1146 | if (!memory_block_size_probed) | |
1147 | memory_block_size_probed = probe_memory_block_size(); | |
1148 | ||
1149 | return memory_block_size_probed; | |
1150 | } | |
1151 | ||
0889eba5 CL |
1152 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
1153 | /* | |
1154 | * Initialise the sparsemem vmemmap using huge-pages at the PMD level. | |
1155 | */ | |
c2b91e2e YL |
1156 | static long __meminitdata addr_start, addr_end; |
1157 | static void __meminitdata *p_start, *p_end; | |
1158 | static int __meminitdata node_start; | |
1159 | ||
e8216da5 | 1160 | static int __meminit vmemmap_populate_hugepages(unsigned long start, |
4b94ffdc | 1161 | unsigned long end, int node, struct vmem_altmap *altmap) |
0889eba5 | 1162 | { |
0aad818b | 1163 | unsigned long addr; |
0889eba5 CL |
1164 | unsigned long next; |
1165 | pgd_t *pgd; | |
1166 | pud_t *pud; | |
1167 | pmd_t *pmd; | |
1168 | ||
0aad818b | 1169 | for (addr = start; addr < end; addr = next) { |
e8216da5 | 1170 | next = pmd_addr_end(addr, end); |
0889eba5 CL |
1171 | |
1172 | pgd = vmemmap_pgd_populate(addr, node); | |
1173 | if (!pgd) | |
1174 | return -ENOMEM; | |
14a62c34 | 1175 | |
0889eba5 CL |
1176 | pud = vmemmap_pud_populate(pgd, addr, node); |
1177 | if (!pud) | |
1178 | return -ENOMEM; | |
1179 | ||
e8216da5 JW |
1180 | pmd = pmd_offset(pud, addr); |
1181 | if (pmd_none(*pmd)) { | |
e8216da5 | 1182 | void *p; |
14a62c34 | 1183 | |
4b94ffdc | 1184 | p = __vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); |
8e2cdbcb JW |
1185 | if (p) { |
1186 | pte_t entry; | |
1187 | ||
1188 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, | |
1189 | PAGE_KERNEL_LARGE); | |
1190 | set_pmd(pmd, __pmd(pte_val(entry))); | |
1191 | ||
1192 | /* check to see if we have contiguous blocks */ | |
1193 | if (p_end != p || node_start != node) { | |
1194 | if (p_start) | |
c9cdaeb2 | 1195 | pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", |
8e2cdbcb JW |
1196 | addr_start, addr_end-1, p_start, p_end-1, node_start); |
1197 | addr_start = addr; | |
1198 | node_start = node; | |
1199 | p_start = p; | |
1200 | } | |
7c934d39 | 1201 | |
8e2cdbcb JW |
1202 | addr_end = addr + PMD_SIZE; |
1203 | p_end = p + PMD_SIZE; | |
1204 | continue; | |
4b94ffdc DW |
1205 | } else if (altmap) |
1206 | return -ENOMEM; /* no fallback */ | |
8e2cdbcb | 1207 | } else if (pmd_large(*pmd)) { |
e8216da5 | 1208 | vmemmap_verify((pte_t *)pmd, node, addr, next); |
8e2cdbcb JW |
1209 | continue; |
1210 | } | |
1211 | pr_warn_once("vmemmap: falling back to regular page backing\n"); | |
1212 | if (vmemmap_populate_basepages(addr, next, node)) | |
1213 | return -ENOMEM; | |
0889eba5 | 1214 | } |
0889eba5 CL |
1215 | return 0; |
1216 | } | |
c2b91e2e | 1217 | |
e8216da5 JW |
1218 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) |
1219 | { | |
4b94ffdc | 1220 | struct vmem_altmap *altmap = to_vmem_altmap(start); |
e8216da5 JW |
1221 | int err; |
1222 | ||
16bf9226 | 1223 | if (boot_cpu_has(X86_FEATURE_PSE)) |
4b94ffdc DW |
1224 | err = vmemmap_populate_hugepages(start, end, node, altmap); |
1225 | else if (altmap) { | |
1226 | pr_err_once("%s: no cpu support for altmap allocations\n", | |
1227 | __func__); | |
1228 | err = -ENOMEM; | |
1229 | } else | |
e8216da5 JW |
1230 | err = vmemmap_populate_basepages(start, end, node); |
1231 | if (!err) | |
5372e155 | 1232 | sync_global_pgds(start, end - 1); |
e8216da5 JW |
1233 | return err; |
1234 | } | |
1235 | ||
46723bfa YI |
1236 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) |
1237 | void register_page_bootmem_memmap(unsigned long section_nr, | |
1238 | struct page *start_page, unsigned long size) | |
1239 | { | |
1240 | unsigned long addr = (unsigned long)start_page; | |
1241 | unsigned long end = (unsigned long)(start_page + size); | |
1242 | unsigned long next; | |
1243 | pgd_t *pgd; | |
1244 | pud_t *pud; | |
1245 | pmd_t *pmd; | |
1246 | unsigned int nr_pages; | |
1247 | struct page *page; | |
1248 | ||
1249 | for (; addr < end; addr = next) { | |
1250 | pte_t *pte = NULL; | |
1251 | ||
1252 | pgd = pgd_offset_k(addr); | |
1253 | if (pgd_none(*pgd)) { | |
1254 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1255 | continue; | |
1256 | } | |
1257 | get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO); | |
1258 | ||
1259 | pud = pud_offset(pgd, addr); | |
1260 | if (pud_none(*pud)) { | |
1261 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1262 | continue; | |
1263 | } | |
1264 | get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO); | |
1265 | ||
16bf9226 | 1266 | if (!boot_cpu_has(X86_FEATURE_PSE)) { |
46723bfa YI |
1267 | next = (addr + PAGE_SIZE) & PAGE_MASK; |
1268 | pmd = pmd_offset(pud, addr); | |
1269 | if (pmd_none(*pmd)) | |
1270 | continue; | |
1271 | get_page_bootmem(section_nr, pmd_page(*pmd), | |
1272 | MIX_SECTION_INFO); | |
1273 | ||
1274 | pte = pte_offset_kernel(pmd, addr); | |
1275 | if (pte_none(*pte)) | |
1276 | continue; | |
1277 | get_page_bootmem(section_nr, pte_page(*pte), | |
1278 | SECTION_INFO); | |
1279 | } else { | |
1280 | next = pmd_addr_end(addr, end); | |
1281 | ||
1282 | pmd = pmd_offset(pud, addr); | |
1283 | if (pmd_none(*pmd)) | |
1284 | continue; | |
1285 | ||
1286 | nr_pages = 1 << (get_order(PMD_SIZE)); | |
1287 | page = pmd_page(*pmd); | |
1288 | while (nr_pages--) | |
1289 | get_page_bootmem(section_nr, page++, | |
1290 | SECTION_INFO); | |
1291 | } | |
1292 | } | |
1293 | } | |
1294 | #endif | |
1295 | ||
c2b91e2e YL |
1296 | void __meminit vmemmap_populate_print_last(void) |
1297 | { | |
1298 | if (p_start) { | |
c9cdaeb2 | 1299 | pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", |
c2b91e2e YL |
1300 | addr_start, addr_end-1, p_start, p_end-1, node_start); |
1301 | p_start = NULL; | |
1302 | p_end = NULL; | |
1303 | node_start = 0; | |
1304 | } | |
1305 | } | |
0889eba5 | 1306 | #endif |