Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
a2531293 | 5 | * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz> |
1da177e4 LT |
6 | * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/signal.h> |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/smp.h> | |
20 | #include <linux/init.h> | |
11034d55 | 21 | #include <linux/initrd.h> |
1da177e4 | 22 | #include <linux/pagemap.h> |
a9ce6bc1 | 23 | #include <linux/memblock.h> |
1da177e4 | 24 | #include <linux/proc_fs.h> |
59170891 | 25 | #include <linux/pci.h> |
6fb14755 | 26 | #include <linux/pfn.h> |
c9cf5528 | 27 | #include <linux/poison.h> |
17a941d8 | 28 | #include <linux/dma-mapping.h> |
a63fdc51 | 29 | #include <linux/memory.h> |
44df75e6 | 30 | #include <linux/memory_hotplug.h> |
4b94ffdc | 31 | #include <linux/memremap.h> |
ae32b129 | 32 | #include <linux/nmi.h> |
5a0e3ad6 | 33 | #include <linux/gfp.h> |
2f96b8c1 | 34 | #include <linux/kcore.h> |
1da177e4 LT |
35 | |
36 | #include <asm/processor.h> | |
46eaa670 | 37 | #include <asm/bios_ebda.h> |
7c0f6ba6 | 38 | #include <linux/uaccess.h> |
1da177e4 LT |
39 | #include <asm/pgtable.h> |
40 | #include <asm/pgalloc.h> | |
41 | #include <asm/dma.h> | |
42 | #include <asm/fixmap.h> | |
66441bd3 | 43 | #include <asm/e820/api.h> |
1da177e4 LT |
44 | #include <asm/apic.h> |
45 | #include <asm/tlb.h> | |
46 | #include <asm/mmu_context.h> | |
47 | #include <asm/proto.h> | |
48 | #include <asm/smp.h> | |
2bc0414e | 49 | #include <asm/sections.h> |
718fc13b | 50 | #include <asm/kdebug.h> |
aaa64e04 | 51 | #include <asm/numa.h> |
d1163651 | 52 | #include <asm/set_memory.h> |
4fcb2083 | 53 | #include <asm/init.h> |
43c75f93 | 54 | #include <asm/uv/uv.h> |
e5f15b45 | 55 | #include <asm/setup.h> |
1da177e4 | 56 | |
5c51bdbe YL |
57 | #include "mm_internal.h" |
58 | ||
cf4fb15b | 59 | #include "ident_map.c" |
aece2785 | 60 | |
1da177e4 LT |
61 | /* |
62 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the | |
63 | * physical space so we can cache the place of the first one and move | |
64 | * around without checking the pgd every time. | |
65 | */ | |
66 | ||
8a57f484 | 67 | /* Bits supported by the hardware: */ |
f955371c | 68 | pteval_t __supported_pte_mask __read_mostly = ~0; |
8a57f484 DH |
69 | /* Bits allowed in normal kernel mappings: */ |
70 | pteval_t __default_kernel_pte_mask __read_mostly = ~0; | |
bd220a24 | 71 | EXPORT_SYMBOL_GPL(__supported_pte_mask); |
8a57f484 DH |
72 | /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */ |
73 | EXPORT_SYMBOL(__default_kernel_pte_mask); | |
bd220a24 | 74 | |
bd220a24 YL |
75 | int force_personality32; |
76 | ||
deed05b7 IM |
77 | /* |
78 | * noexec32=on|off | |
79 | * Control non executable heap for 32bit processes. | |
80 | * To control the stack too use noexec=off | |
81 | * | |
82 | * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default) | |
83 | * off PROT_READ implies PROT_EXEC | |
84 | */ | |
bd220a24 YL |
85 | static int __init nonx32_setup(char *str) |
86 | { | |
87 | if (!strcmp(str, "on")) | |
88 | force_personality32 &= ~READ_IMPLIES_EXEC; | |
89 | else if (!strcmp(str, "off")) | |
90 | force_personality32 |= READ_IMPLIES_EXEC; | |
91 | return 1; | |
92 | } | |
93 | __setup("noexec32=", nonx32_setup); | |
94 | ||
91f606a8 | 95 | static void sync_global_pgds_l5(unsigned long start, unsigned long end) |
141efad7 KS |
96 | { |
97 | unsigned long addr; | |
98 | ||
99 | for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { | |
100 | const pgd_t *pgd_ref = pgd_offset_k(addr); | |
101 | struct page *page; | |
102 | ||
103 | /* Check for overflow */ | |
104 | if (addr < start) | |
105 | break; | |
106 | ||
107 | if (pgd_none(*pgd_ref)) | |
108 | continue; | |
109 | ||
110 | spin_lock(&pgd_lock); | |
111 | list_for_each_entry(page, &pgd_list, lru) { | |
112 | pgd_t *pgd; | |
113 | spinlock_t *pgt_lock; | |
114 | ||
115 | pgd = (pgd_t *)page_address(page) + pgd_index(addr); | |
116 | /* the pgt_lock only for Xen */ | |
117 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | |
118 | spin_lock(pgt_lock); | |
119 | ||
120 | if (!pgd_none(*pgd_ref) && !pgd_none(*pgd)) | |
121 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | |
122 | ||
123 | if (pgd_none(*pgd)) | |
124 | set_pgd(pgd, *pgd_ref); | |
125 | ||
126 | spin_unlock(pgt_lock); | |
127 | } | |
128 | spin_unlock(&pgd_lock); | |
129 | } | |
130 | } | |
91f606a8 KS |
131 | |
132 | static void sync_global_pgds_l4(unsigned long start, unsigned long end) | |
6afb5157 | 133 | { |
fc5f9d5f | 134 | unsigned long addr; |
44235dcd | 135 | |
fc5f9d5f BH |
136 | for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { |
137 | pgd_t *pgd_ref = pgd_offset_k(addr); | |
f2a6a705 | 138 | const p4d_t *p4d_ref; |
44235dcd JF |
139 | struct page *page; |
140 | ||
f2a6a705 KS |
141 | /* |
142 | * With folded p4d, pgd_none() is always false, we need to | |
143 | * handle synchonization on p4d level. | |
144 | */ | |
c65e774f | 145 | MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref)); |
fc5f9d5f | 146 | p4d_ref = p4d_offset(pgd_ref, addr); |
f2a6a705 KS |
147 | |
148 | if (p4d_none(*p4d_ref)) | |
44235dcd JF |
149 | continue; |
150 | ||
a79e53d8 | 151 | spin_lock(&pgd_lock); |
44235dcd | 152 | list_for_each_entry(page, &pgd_list, lru) { |
be354f40 | 153 | pgd_t *pgd; |
f2a6a705 | 154 | p4d_t *p4d; |
617d34d9 JF |
155 | spinlock_t *pgt_lock; |
156 | ||
fc5f9d5f BH |
157 | pgd = (pgd_t *)page_address(page) + pgd_index(addr); |
158 | p4d = p4d_offset(pgd, addr); | |
a79e53d8 | 159 | /* the pgt_lock only for Xen */ |
617d34d9 JF |
160 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
161 | spin_lock(pgt_lock); | |
162 | ||
f2a6a705 KS |
163 | if (!p4d_none(*p4d_ref) && !p4d_none(*p4d)) |
164 | BUG_ON(p4d_page_vaddr(*p4d) | |
165 | != p4d_page_vaddr(*p4d_ref)); | |
617d34d9 | 166 | |
f2a6a705 KS |
167 | if (p4d_none(*p4d)) |
168 | set_p4d(p4d, *p4d_ref); | |
9661d5bc | 169 | |
617d34d9 | 170 | spin_unlock(pgt_lock); |
44235dcd | 171 | } |
a79e53d8 | 172 | spin_unlock(&pgd_lock); |
44235dcd | 173 | } |
6afb5157 | 174 | } |
91f606a8 KS |
175 | |
176 | /* | |
177 | * When memory was added make sure all the processes MM have | |
178 | * suitable PGD entries in the local PGD level page. | |
179 | */ | |
180 | void sync_global_pgds(unsigned long start, unsigned long end) | |
181 | { | |
ed7588d5 | 182 | if (pgtable_l5_enabled()) |
91f606a8 KS |
183 | sync_global_pgds_l5(start, end); |
184 | else | |
185 | sync_global_pgds_l4(start, end); | |
186 | } | |
6afb5157 | 187 | |
8d6ea967 MS |
188 | /* |
189 | * NOTE: This function is marked __ref because it calls __init function | |
190 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | |
191 | */ | |
192 | static __ref void *spp_getpage(void) | |
14a62c34 | 193 | { |
1da177e4 | 194 | void *ptr; |
14a62c34 | 195 | |
1da177e4 | 196 | if (after_bootmem) |
75f296d9 | 197 | ptr = (void *) get_zeroed_page(GFP_ATOMIC); |
1da177e4 | 198 | else |
15c3c114 | 199 | ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
14a62c34 TG |
200 | |
201 | if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { | |
202 | panic("set_pte_phys: cannot allocate page data %s\n", | |
203 | after_bootmem ? "after bootmem" : ""); | |
204 | } | |
1da177e4 | 205 | |
10f22dde | 206 | pr_debug("spp_getpage %p\n", ptr); |
14a62c34 | 207 | |
1da177e4 | 208 | return ptr; |
14a62c34 | 209 | } |
1da177e4 | 210 | |
f2a6a705 | 211 | static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr) |
1da177e4 | 212 | { |
458a3e64 | 213 | if (pgd_none(*pgd)) { |
f2a6a705 KS |
214 | p4d_t *p4d = (p4d_t *)spp_getpage(); |
215 | pgd_populate(&init_mm, pgd, p4d); | |
216 | if (p4d != p4d_offset(pgd, 0)) | |
458a3e64 | 217 | printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", |
f2a6a705 KS |
218 | p4d, p4d_offset(pgd, 0)); |
219 | } | |
220 | return p4d_offset(pgd, vaddr); | |
221 | } | |
222 | ||
223 | static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr) | |
224 | { | |
225 | if (p4d_none(*p4d)) { | |
226 | pud_t *pud = (pud_t *)spp_getpage(); | |
227 | p4d_populate(&init_mm, p4d, pud); | |
228 | if (pud != pud_offset(p4d, 0)) | |
229 | printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", | |
230 | pud, pud_offset(p4d, 0)); | |
458a3e64 | 231 | } |
f2a6a705 | 232 | return pud_offset(p4d, vaddr); |
458a3e64 | 233 | } |
1da177e4 | 234 | |
f254f390 | 235 | static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) |
458a3e64 | 236 | { |
1da177e4 | 237 | if (pud_none(*pud)) { |
458a3e64 | 238 | pmd_t *pmd = (pmd_t *) spp_getpage(); |
bb23e403 | 239 | pud_populate(&init_mm, pud, pmd); |
458a3e64 | 240 | if (pmd != pmd_offset(pud, 0)) |
f2a6a705 | 241 | printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n", |
458a3e64 | 242 | pmd, pmd_offset(pud, 0)); |
1da177e4 | 243 | } |
458a3e64 TH |
244 | return pmd_offset(pud, vaddr); |
245 | } | |
246 | ||
f254f390 | 247 | static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) |
458a3e64 | 248 | { |
1da177e4 | 249 | if (pmd_none(*pmd)) { |
458a3e64 | 250 | pte_t *pte = (pte_t *) spp_getpage(); |
bb23e403 | 251 | pmd_populate_kernel(&init_mm, pmd, pte); |
458a3e64 | 252 | if (pte != pte_offset_kernel(pmd, 0)) |
f2a6a705 | 253 | printk(KERN_ERR "PAGETABLE BUG #03!\n"); |
1da177e4 | 254 | } |
458a3e64 TH |
255 | return pte_offset_kernel(pmd, vaddr); |
256 | } | |
257 | ||
f2a6a705 | 258 | static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) |
458a3e64 | 259 | { |
f2a6a705 KS |
260 | pmd_t *pmd = fill_pmd(pud, vaddr); |
261 | pte_t *pte = fill_pte(pmd, vaddr); | |
1da177e4 | 262 | |
1da177e4 LT |
263 | set_pte(pte, new_pte); |
264 | ||
265 | /* | |
266 | * It's enough to flush this one mapping. | |
267 | * (PGE mappings get flushed as well) | |
268 | */ | |
1299ef1d | 269 | __flush_tlb_one_kernel(vaddr); |
1da177e4 LT |
270 | } |
271 | ||
f2a6a705 KS |
272 | void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) |
273 | { | |
274 | p4d_t *p4d = p4d_page + p4d_index(vaddr); | |
275 | pud_t *pud = fill_pud(p4d, vaddr); | |
276 | ||
277 | __set_pte_vaddr(pud, vaddr, new_pte); | |
278 | } | |
279 | ||
280 | void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) | |
281 | { | |
282 | pud_t *pud = pud_page + pud_index(vaddr); | |
283 | ||
284 | __set_pte_vaddr(pud, vaddr, new_pte); | |
285 | } | |
286 | ||
458a3e64 | 287 | void set_pte_vaddr(unsigned long vaddr, pte_t pteval) |
0814e0ba EH |
288 | { |
289 | pgd_t *pgd; | |
f2a6a705 | 290 | p4d_t *p4d_page; |
0814e0ba EH |
291 | |
292 | pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); | |
293 | ||
294 | pgd = pgd_offset_k(vaddr); | |
295 | if (pgd_none(*pgd)) { | |
296 | printk(KERN_ERR | |
297 | "PGD FIXMAP MISSING, it should be setup in head.S!\n"); | |
298 | return; | |
299 | } | |
f2a6a705 KS |
300 | |
301 | p4d_page = p4d_offset(pgd, 0); | |
302 | set_pte_vaddr_p4d(p4d_page, vaddr, pteval); | |
0814e0ba EH |
303 | } |
304 | ||
458a3e64 | 305 | pmd_t * __init populate_extra_pmd(unsigned long vaddr) |
11124411 TH |
306 | { |
307 | pgd_t *pgd; | |
f2a6a705 | 308 | p4d_t *p4d; |
11124411 TH |
309 | pud_t *pud; |
310 | ||
311 | pgd = pgd_offset_k(vaddr); | |
f2a6a705 KS |
312 | p4d = fill_p4d(pgd, vaddr); |
313 | pud = fill_pud(p4d, vaddr); | |
458a3e64 TH |
314 | return fill_pmd(pud, vaddr); |
315 | } | |
316 | ||
317 | pte_t * __init populate_extra_pte(unsigned long vaddr) | |
318 | { | |
319 | pmd_t *pmd; | |
11124411 | 320 | |
458a3e64 TH |
321 | pmd = populate_extra_pmd(vaddr); |
322 | return fill_pte(pmd, vaddr); | |
11124411 TH |
323 | } |
324 | ||
3a9e189d JS |
325 | /* |
326 | * Create large page table mappings for a range of physical addresses. | |
327 | */ | |
328 | static void __init __init_extra_mapping(unsigned long phys, unsigned long size, | |
2df58b6d | 329 | enum page_cache_mode cache) |
3a9e189d JS |
330 | { |
331 | pgd_t *pgd; | |
f2a6a705 | 332 | p4d_t *p4d; |
3a9e189d JS |
333 | pud_t *pud; |
334 | pmd_t *pmd; | |
2df58b6d | 335 | pgprot_t prot; |
3a9e189d | 336 | |
2df58b6d JG |
337 | pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) | |
338 | pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache))); | |
3a9e189d JS |
339 | BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); |
340 | for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { | |
341 | pgd = pgd_offset_k((unsigned long)__va(phys)); | |
342 | if (pgd_none(*pgd)) { | |
f2a6a705 KS |
343 | p4d = (p4d_t *) spp_getpage(); |
344 | set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE | | |
345 | _PAGE_USER)); | |
346 | } | |
347 | p4d = p4d_offset(pgd, (unsigned long)__va(phys)); | |
348 | if (p4d_none(*p4d)) { | |
3a9e189d | 349 | pud = (pud_t *) spp_getpage(); |
f2a6a705 | 350 | set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE | |
3a9e189d JS |
351 | _PAGE_USER)); |
352 | } | |
f2a6a705 | 353 | pud = pud_offset(p4d, (unsigned long)__va(phys)); |
3a9e189d JS |
354 | if (pud_none(*pud)) { |
355 | pmd = (pmd_t *) spp_getpage(); | |
356 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | | |
357 | _PAGE_USER)); | |
358 | } | |
359 | pmd = pmd_offset(pud, phys); | |
360 | BUG_ON(!pmd_none(*pmd)); | |
361 | set_pmd(pmd, __pmd(phys | pgprot_val(prot))); | |
362 | } | |
363 | } | |
364 | ||
365 | void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) | |
366 | { | |
2df58b6d | 367 | __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB); |
3a9e189d JS |
368 | } |
369 | ||
370 | void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) | |
371 | { | |
2df58b6d | 372 | __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC); |
3a9e189d JS |
373 | } |
374 | ||
31eedd82 | 375 | /* |
88f3aec7 IM |
376 | * The head.S code sets up the kernel high mapping: |
377 | * | |
378 | * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) | |
31eedd82 | 379 | * |
1e3b3081 | 380 | * phys_base holds the negative offset to the kernel, which is added |
31eedd82 TG |
381 | * to the compile time generated pmds. This results in invalid pmds up |
382 | * to the point where we hit the physaddr 0 mapping. | |
383 | * | |
e5f15b45 YL |
384 | * We limit the mappings to the region from _text to _brk_end. _brk_end |
385 | * is rounded up to the 2MB boundary. This catches the invalid pmds as | |
31eedd82 TG |
386 | * well, as they are located before _text: |
387 | */ | |
388 | void __init cleanup_highmap(void) | |
389 | { | |
390 | unsigned long vaddr = __START_KERNEL_map; | |
10054230 | 391 | unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE; |
e5f15b45 | 392 | unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; |
31eedd82 | 393 | pmd_t *pmd = level2_kernel_pgt; |
31eedd82 | 394 | |
10054230 YL |
395 | /* |
396 | * Native path, max_pfn_mapped is not set yet. | |
397 | * Xen has valid max_pfn_mapped set in | |
398 | * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable(). | |
399 | */ | |
400 | if (max_pfn_mapped) | |
401 | vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); | |
402 | ||
e5f15b45 | 403 | for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { |
2884f110 | 404 | if (pmd_none(*pmd)) |
31eedd82 TG |
405 | continue; |
406 | if (vaddr < (unsigned long) _text || vaddr > end) | |
407 | set_pmd(pmd, __pmd(0)); | |
408 | } | |
409 | } | |
410 | ||
59b3d020 TG |
411 | /* |
412 | * Create PTE level page table mapping for physical addresses. | |
413 | * It returns the last physical address mapped. | |
414 | */ | |
7b16eb89 | 415 | static unsigned long __meminit |
59b3d020 | 416 | phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end, |
b27a43c1 | 417 | pgprot_t prot) |
4f9c11dd | 418 | { |
59b3d020 TG |
419 | unsigned long pages = 0, paddr_next; |
420 | unsigned long paddr_last = paddr_end; | |
421 | pte_t *pte; | |
4f9c11dd | 422 | int i; |
7b16eb89 | 423 | |
59b3d020 TG |
424 | pte = pte_page + pte_index(paddr); |
425 | i = pte_index(paddr); | |
4f9c11dd | 426 | |
59b3d020 TG |
427 | for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) { |
428 | paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE; | |
429 | if (paddr >= paddr_end) { | |
eceb3632 | 430 | if (!after_bootmem && |
3bce64f0 | 431 | !e820__mapped_any(paddr & PAGE_MASK, paddr_next, |
09821ff1 | 432 | E820_TYPE_RAM) && |
3bce64f0 | 433 | !e820__mapped_any(paddr & PAGE_MASK, paddr_next, |
09821ff1 | 434 | E820_TYPE_RESERVED_KERN)) |
eceb3632 YL |
435 | set_pte(pte, __pte(0)); |
436 | continue; | |
4f9c11dd JF |
437 | } |
438 | ||
b27a43c1 SS |
439 | /* |
440 | * We will re-use the existing mapping. | |
441 | * Xen for example has some special requirements, like mapping | |
442 | * pagetable pages as RO. So assume someone who pre-setup | |
443 | * these mappings are more intelligent. | |
444 | */ | |
dcb32d99 | 445 | if (!pte_none(*pte)) { |
876ee61a JB |
446 | if (!after_bootmem) |
447 | pages++; | |
4f9c11dd | 448 | continue; |
3afa3949 | 449 | } |
4f9c11dd JF |
450 | |
451 | if (0) | |
59b3d020 TG |
452 | pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr, |
453 | pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte); | |
4f9c11dd | 454 | pages++; |
59b3d020 TG |
455 | set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot)); |
456 | paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE; | |
4f9c11dd | 457 | } |
a2699e47 | 458 | |
4f9c11dd | 459 | update_page_count(PG_LEVEL_4K, pages); |
7b16eb89 | 460 | |
59b3d020 | 461 | return paddr_last; |
4f9c11dd JF |
462 | } |
463 | ||
59b3d020 TG |
464 | /* |
465 | * Create PMD level page table mapping for physical addresses. The virtual | |
466 | * and physical address have to be aligned at this level. | |
467 | * It returns the last physical address mapped. | |
468 | */ | |
cc615032 | 469 | static unsigned long __meminit |
59b3d020 | 470 | phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, |
b27a43c1 | 471 | unsigned long page_size_mask, pgprot_t prot) |
44df75e6 | 472 | { |
59b3d020 TG |
473 | unsigned long pages = 0, paddr_next; |
474 | unsigned long paddr_last = paddr_end; | |
ce0c0e50 | 475 | |
59b3d020 | 476 | int i = pmd_index(paddr); |
44df75e6 | 477 | |
59b3d020 TG |
478 | for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) { |
479 | pmd_t *pmd = pmd_page + pmd_index(paddr); | |
4f9c11dd | 480 | pte_t *pte; |
b27a43c1 | 481 | pgprot_t new_prot = prot; |
44df75e6 | 482 | |
59b3d020 TG |
483 | paddr_next = (paddr & PMD_MASK) + PMD_SIZE; |
484 | if (paddr >= paddr_end) { | |
eceb3632 | 485 | if (!after_bootmem && |
3bce64f0 | 486 | !e820__mapped_any(paddr & PMD_MASK, paddr_next, |
09821ff1 | 487 | E820_TYPE_RAM) && |
3bce64f0 | 488 | !e820__mapped_any(paddr & PMD_MASK, paddr_next, |
09821ff1 | 489 | E820_TYPE_RESERVED_KERN)) |
eceb3632 YL |
490 | set_pmd(pmd, __pmd(0)); |
491 | continue; | |
44df75e6 | 492 | } |
6ad91658 | 493 | |
dcb32d99 | 494 | if (!pmd_none(*pmd)) { |
8ae3a5a8 JB |
495 | if (!pmd_large(*pmd)) { |
496 | spin_lock(&init_mm.page_table_lock); | |
973dc4f3 | 497 | pte = (pte_t *)pmd_page_vaddr(*pmd); |
59b3d020 TG |
498 | paddr_last = phys_pte_init(pte, paddr, |
499 | paddr_end, prot); | |
8ae3a5a8 | 500 | spin_unlock(&init_mm.page_table_lock); |
a2699e47 | 501 | continue; |
8ae3a5a8 | 502 | } |
b27a43c1 SS |
503 | /* |
504 | * If we are ok with PG_LEVEL_2M mapping, then we will | |
505 | * use the existing mapping, | |
506 | * | |
507 | * Otherwise, we will split the large page mapping but | |
508 | * use the same existing protection bits except for | |
509 | * large page, so that we don't violate Intel's TLB | |
510 | * Application note (317080) which says, while changing | |
511 | * the page sizes, new and old translations should | |
512 | * not differ with respect to page frame and | |
513 | * attributes. | |
514 | */ | |
3afa3949 | 515 | if (page_size_mask & (1 << PG_LEVEL_2M)) { |
876ee61a JB |
516 | if (!after_bootmem) |
517 | pages++; | |
59b3d020 | 518 | paddr_last = paddr_next; |
b27a43c1 | 519 | continue; |
3afa3949 | 520 | } |
b27a43c1 | 521 | new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); |
4f9c11dd JF |
522 | } |
523 | ||
b50efd2a | 524 | if (page_size_mask & (1<<PG_LEVEL_2M)) { |
4f9c11dd | 525 | pages++; |
8ae3a5a8 | 526 | spin_lock(&init_mm.page_table_lock); |
4f9c11dd | 527 | set_pte((pte_t *)pmd, |
59b3d020 | 528 | pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT, |
b27a43c1 | 529 | __pgprot(pgprot_val(prot) | _PAGE_PSE))); |
8ae3a5a8 | 530 | spin_unlock(&init_mm.page_table_lock); |
59b3d020 | 531 | paddr_last = paddr_next; |
6ad91658 | 532 | continue; |
4f9c11dd | 533 | } |
6ad91658 | 534 | |
868bf4d6 | 535 | pte = alloc_low_page(); |
59b3d020 | 536 | paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot); |
4f9c11dd | 537 | |
8ae3a5a8 | 538 | spin_lock(&init_mm.page_table_lock); |
868bf4d6 | 539 | pmd_populate_kernel(&init_mm, pmd, pte); |
8ae3a5a8 | 540 | spin_unlock(&init_mm.page_table_lock); |
44df75e6 | 541 | } |
ce0c0e50 | 542 | update_page_count(PG_LEVEL_2M, pages); |
59b3d020 | 543 | return paddr_last; |
44df75e6 MT |
544 | } |
545 | ||
59b3d020 TG |
546 | /* |
547 | * Create PUD level page table mapping for physical addresses. The virtual | |
faa37933 TG |
548 | * and physical address do not have to be aligned at this level. KASLR can |
549 | * randomize virtual addresses up to this level. | |
59b3d020 TG |
550 | * It returns the last physical address mapped. |
551 | */ | |
cc615032 | 552 | static unsigned long __meminit |
59b3d020 TG |
553 | phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, |
554 | unsigned long page_size_mask) | |
14a62c34 | 555 | { |
59b3d020 TG |
556 | unsigned long pages = 0, paddr_next; |
557 | unsigned long paddr_last = paddr_end; | |
faa37933 TG |
558 | unsigned long vaddr = (unsigned long)__va(paddr); |
559 | int i = pud_index(vaddr); | |
44df75e6 | 560 | |
59b3d020 | 561 | for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) { |
faa37933 | 562 | pud_t *pud; |
1da177e4 | 563 | pmd_t *pmd; |
b27a43c1 | 564 | pgprot_t prot = PAGE_KERNEL; |
1da177e4 | 565 | |
faa37933 TG |
566 | vaddr = (unsigned long)__va(paddr); |
567 | pud = pud_page + pud_index(vaddr); | |
59b3d020 | 568 | paddr_next = (paddr & PUD_MASK) + PUD_SIZE; |
faa37933 | 569 | |
59b3d020 | 570 | if (paddr >= paddr_end) { |
eceb3632 | 571 | if (!after_bootmem && |
3bce64f0 | 572 | !e820__mapped_any(paddr & PUD_MASK, paddr_next, |
09821ff1 | 573 | E820_TYPE_RAM) && |
3bce64f0 | 574 | !e820__mapped_any(paddr & PUD_MASK, paddr_next, |
09821ff1 | 575 | E820_TYPE_RESERVED_KERN)) |
eceb3632 | 576 | set_pud(pud, __pud(0)); |
1da177e4 | 577 | continue; |
14a62c34 | 578 | } |
1da177e4 | 579 | |
dcb32d99 | 580 | if (!pud_none(*pud)) { |
a2699e47 | 581 | if (!pud_large(*pud)) { |
973dc4f3 | 582 | pmd = pmd_offset(pud, 0); |
59b3d020 TG |
583 | paddr_last = phys_pmd_init(pmd, paddr, |
584 | paddr_end, | |
585 | page_size_mask, | |
586 | prot); | |
4b239f45 | 587 | __flush_tlb_all(); |
a2699e47 SS |
588 | continue; |
589 | } | |
b27a43c1 SS |
590 | /* |
591 | * If we are ok with PG_LEVEL_1G mapping, then we will | |
592 | * use the existing mapping. | |
593 | * | |
594 | * Otherwise, we will split the gbpage mapping but use | |
595 | * the same existing protection bits except for large | |
596 | * page, so that we don't violate Intel's TLB | |
597 | * Application note (317080) which says, while changing | |
598 | * the page sizes, new and old translations should | |
599 | * not differ with respect to page frame and | |
600 | * attributes. | |
601 | */ | |
3afa3949 | 602 | if (page_size_mask & (1 << PG_LEVEL_1G)) { |
876ee61a JB |
603 | if (!after_bootmem) |
604 | pages++; | |
59b3d020 | 605 | paddr_last = paddr_next; |
b27a43c1 | 606 | continue; |
3afa3949 | 607 | } |
b27a43c1 | 608 | prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); |
ef925766 AK |
609 | } |
610 | ||
b50efd2a | 611 | if (page_size_mask & (1<<PG_LEVEL_1G)) { |
ce0c0e50 | 612 | pages++; |
8ae3a5a8 | 613 | spin_lock(&init_mm.page_table_lock); |
ef925766 | 614 | set_pte((pte_t *)pud, |
59b3d020 | 615 | pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT, |
960ddb4f | 616 | PAGE_KERNEL_LARGE)); |
8ae3a5a8 | 617 | spin_unlock(&init_mm.page_table_lock); |
59b3d020 | 618 | paddr_last = paddr_next; |
6ad91658 KM |
619 | continue; |
620 | } | |
621 | ||
868bf4d6 | 622 | pmd = alloc_low_page(); |
59b3d020 TG |
623 | paddr_last = phys_pmd_init(pmd, paddr, paddr_end, |
624 | page_size_mask, prot); | |
8ae3a5a8 JB |
625 | |
626 | spin_lock(&init_mm.page_table_lock); | |
868bf4d6 | 627 | pud_populate(&init_mm, pud, pmd); |
44df75e6 | 628 | spin_unlock(&init_mm.page_table_lock); |
1da177e4 | 629 | } |
1a2b4412 | 630 | __flush_tlb_all(); |
a2699e47 | 631 | |
ce0c0e50 | 632 | update_page_count(PG_LEVEL_1G, pages); |
cc615032 | 633 | |
59b3d020 | 634 | return paddr_last; |
14a62c34 | 635 | } |
1da177e4 | 636 | |
7e82ea94 KS |
637 | static unsigned long __meminit |
638 | phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, | |
639 | unsigned long page_size_mask) | |
640 | { | |
641 | unsigned long paddr_next, paddr_last = paddr_end; | |
642 | unsigned long vaddr = (unsigned long)__va(paddr); | |
643 | int i = p4d_index(vaddr); | |
644 | ||
ed7588d5 | 645 | if (!pgtable_l5_enabled()) |
7e82ea94 KS |
646 | return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask); |
647 | ||
648 | for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) { | |
649 | p4d_t *p4d; | |
650 | pud_t *pud; | |
651 | ||
652 | vaddr = (unsigned long)__va(paddr); | |
653 | p4d = p4d_page + p4d_index(vaddr); | |
654 | paddr_next = (paddr & P4D_MASK) + P4D_SIZE; | |
655 | ||
656 | if (paddr >= paddr_end) { | |
657 | if (!after_bootmem && | |
658 | !e820__mapped_any(paddr & P4D_MASK, paddr_next, | |
659 | E820_TYPE_RAM) && | |
660 | !e820__mapped_any(paddr & P4D_MASK, paddr_next, | |
661 | E820_TYPE_RESERVED_KERN)) | |
662 | set_p4d(p4d, __p4d(0)); | |
663 | continue; | |
664 | } | |
665 | ||
666 | if (!p4d_none(*p4d)) { | |
667 | pud = pud_offset(p4d, 0); | |
668 | paddr_last = phys_pud_init(pud, paddr, | |
669 | paddr_end, | |
670 | page_size_mask); | |
671 | __flush_tlb_all(); | |
672 | continue; | |
673 | } | |
674 | ||
675 | pud = alloc_low_page(); | |
676 | paddr_last = phys_pud_init(pud, paddr, paddr_end, | |
677 | page_size_mask); | |
678 | ||
679 | spin_lock(&init_mm.page_table_lock); | |
680 | p4d_populate(&init_mm, p4d, pud); | |
681 | spin_unlock(&init_mm.page_table_lock); | |
682 | } | |
683 | __flush_tlb_all(); | |
684 | ||
685 | return paddr_last; | |
686 | } | |
687 | ||
59b3d020 TG |
688 | /* |
689 | * Create page table mapping for the physical memory for specific physical | |
faa37933 | 690 | * addresses. The virtual and physical addresses have to be aligned on PMD level |
59b3d020 TG |
691 | * down. It returns the last physical address mapped. |
692 | */ | |
41d840e2 | 693 | unsigned long __meminit |
59b3d020 TG |
694 | kernel_physical_mapping_init(unsigned long paddr_start, |
695 | unsigned long paddr_end, | |
f765090a | 696 | unsigned long page_size_mask) |
14a62c34 | 697 | { |
9b861528 | 698 | bool pgd_changed = false; |
59b3d020 | 699 | unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last; |
1da177e4 | 700 | |
59b3d020 TG |
701 | paddr_last = paddr_end; |
702 | vaddr = (unsigned long)__va(paddr_start); | |
703 | vaddr_end = (unsigned long)__va(paddr_end); | |
704 | vaddr_start = vaddr; | |
1da177e4 | 705 | |
59b3d020 TG |
706 | for (; vaddr < vaddr_end; vaddr = vaddr_next) { |
707 | pgd_t *pgd = pgd_offset_k(vaddr); | |
f2a6a705 | 708 | p4d_t *p4d; |
44df75e6 | 709 | |
59b3d020 | 710 | vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE; |
4f9c11dd | 711 | |
7e82ea94 KS |
712 | if (pgd_val(*pgd)) { |
713 | p4d = (p4d_t *)pgd_page_vaddr(*pgd); | |
714 | paddr_last = phys_p4d_init(p4d, __pa(vaddr), | |
59b3d020 TG |
715 | __pa(vaddr_end), |
716 | page_size_mask); | |
4f9c11dd JF |
717 | continue; |
718 | } | |
719 | ||
7e82ea94 KS |
720 | p4d = alloc_low_page(); |
721 | paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end), | |
59b3d020 | 722 | page_size_mask); |
8ae3a5a8 JB |
723 | |
724 | spin_lock(&init_mm.page_table_lock); | |
ed7588d5 | 725 | if (pgtable_l5_enabled()) |
7e82ea94 KS |
726 | pgd_populate(&init_mm, pgd, p4d); |
727 | else | |
728 | p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d); | |
8ae3a5a8 | 729 | spin_unlock(&init_mm.page_table_lock); |
9b861528 | 730 | pgd_changed = true; |
14a62c34 | 731 | } |
9b861528 HL |
732 | |
733 | if (pgd_changed) | |
5372e155 | 734 | sync_global_pgds(vaddr_start, vaddr_end - 1); |
9b861528 | 735 | |
a2699e47 | 736 | __flush_tlb_all(); |
1da177e4 | 737 | |
59b3d020 | 738 | return paddr_last; |
b50efd2a | 739 | } |
7b16eb89 | 740 | |
2b97690f | 741 | #ifndef CONFIG_NUMA |
d8fc3afc | 742 | void __init initmem_init(void) |
1f75d7e3 | 743 | { |
d7dc899a | 744 | memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); |
1f75d7e3 | 745 | } |
3551f88f | 746 | #endif |
1f75d7e3 | 747 | |
1da177e4 LT |
748 | void __init paging_init(void) |
749 | { | |
3551f88f | 750 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
44df75e6 | 751 | sparse_init(); |
44b57280 YL |
752 | |
753 | /* | |
754 | * clear the default setting with node 0 | |
755 | * note: don't use nodes_clear here, that is really clearing when | |
756 | * numa support is not compiled in, and later node_set_state | |
757 | * will not set it back. | |
758 | */ | |
4b0ef1fe LJ |
759 | node_clear_state(0, N_MEMORY); |
760 | if (N_MEMORY != N_NORMAL_MEMORY) | |
761 | node_clear_state(0, N_NORMAL_MEMORY); | |
44b57280 | 762 | |
4c0b2e5f | 763 | zone_sizes_init(); |
1da177e4 | 764 | } |
1da177e4 | 765 | |
44df75e6 MT |
766 | /* |
767 | * Memory hotplug specific functions | |
44df75e6 | 768 | */ |
bc02af93 | 769 | #ifdef CONFIG_MEMORY_HOTPLUG |
ea085417 SZ |
770 | /* |
771 | * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need | |
772 | * updating. | |
773 | */ | |
3072e413 | 774 | static void update_end_of_memory_vars(u64 start, u64 size) |
ea085417 SZ |
775 | { |
776 | unsigned long end_pfn = PFN_UP(start + size); | |
777 | ||
778 | if (end_pfn > max_pfn) { | |
779 | max_pfn = end_pfn; | |
780 | max_low_pfn = end_pfn; | |
781 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; | |
782 | } | |
783 | } | |
784 | ||
24e6d5a5 CH |
785 | int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, |
786 | struct vmem_altmap *altmap, bool want_memblock) | |
44df75e6 | 787 | { |
44df75e6 MT |
788 | int ret; |
789 | ||
24e6d5a5 | 790 | ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); |
fe8b868e | 791 | WARN_ON_ONCE(ret); |
44df75e6 | 792 | |
ea085417 | 793 | /* update max_pfn, max_low_pfn and high_memory */ |
3072e413 MH |
794 | update_end_of_memory_vars(start_pfn << PAGE_SHIFT, |
795 | nr_pages << PAGE_SHIFT); | |
ea085417 | 796 | |
44df75e6 | 797 | return ret; |
44df75e6 | 798 | } |
3072e413 | 799 | |
24e6d5a5 CH |
800 | int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, |
801 | bool want_memblock) | |
3072e413 MH |
802 | { |
803 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
804 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
805 | ||
806 | init_memory_mapping(start, start + size); | |
807 | ||
24e6d5a5 | 808 | return add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); |
3072e413 | 809 | } |
44df75e6 | 810 | |
ae9aae9e WC |
811 | #define PAGE_INUSE 0xFD |
812 | ||
a7e6c701 | 813 | static void __meminit free_pagetable(struct page *page, int order) |
ae9aae9e | 814 | { |
ae9aae9e WC |
815 | unsigned long magic; |
816 | unsigned int nr_pages = 1 << order; | |
4b94ffdc | 817 | |
ae9aae9e WC |
818 | /* bootmem page has reserved flag */ |
819 | if (PageReserved(page)) { | |
820 | __ClearPageReserved(page); | |
ae9aae9e | 821 | |
ddffe98d | 822 | magic = (unsigned long)page->freelist; |
ae9aae9e WC |
823 | if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { |
824 | while (nr_pages--) | |
825 | put_page_bootmem(page++); | |
826 | } else | |
170a5a7e JL |
827 | while (nr_pages--) |
828 | free_reserved_page(page++); | |
ae9aae9e WC |
829 | } else |
830 | free_pages((unsigned long)page_address(page), order); | |
ae9aae9e WC |
831 | } |
832 | ||
a7e6c701 | 833 | static void __meminit free_hugepage_table(struct page *page, |
24b6d416 | 834 | struct vmem_altmap *altmap) |
a7e6c701 DW |
835 | { |
836 | if (altmap) | |
837 | vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE); | |
838 | else | |
839 | free_pagetable(page, get_order(PMD_SIZE)); | |
840 | } | |
841 | ||
842 | static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) | |
ae9aae9e WC |
843 | { |
844 | pte_t *pte; | |
845 | int i; | |
846 | ||
847 | for (i = 0; i < PTRS_PER_PTE; i++) { | |
848 | pte = pte_start + i; | |
dcb32d99 | 849 | if (!pte_none(*pte)) |
ae9aae9e WC |
850 | return; |
851 | } | |
852 | ||
853 | /* free a pte talbe */ | |
a7e6c701 | 854 | free_pagetable(pmd_page(*pmd), 0); |
ae9aae9e WC |
855 | spin_lock(&init_mm.page_table_lock); |
856 | pmd_clear(pmd); | |
857 | spin_unlock(&init_mm.page_table_lock); | |
858 | } | |
859 | ||
a7e6c701 | 860 | static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) |
ae9aae9e WC |
861 | { |
862 | pmd_t *pmd; | |
863 | int i; | |
864 | ||
865 | for (i = 0; i < PTRS_PER_PMD; i++) { | |
866 | pmd = pmd_start + i; | |
dcb32d99 | 867 | if (!pmd_none(*pmd)) |
ae9aae9e WC |
868 | return; |
869 | } | |
870 | ||
871 | /* free a pmd talbe */ | |
a7e6c701 | 872 | free_pagetable(pud_page(*pud), 0); |
ae9aae9e WC |
873 | spin_lock(&init_mm.page_table_lock); |
874 | pud_clear(pud); | |
875 | spin_unlock(&init_mm.page_table_lock); | |
876 | } | |
877 | ||
a7e6c701 | 878 | static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d) |
f2a6a705 KS |
879 | { |
880 | pud_t *pud; | |
881 | int i; | |
882 | ||
883 | for (i = 0; i < PTRS_PER_PUD; i++) { | |
884 | pud = pud_start + i; | |
885 | if (!pud_none(*pud)) | |
886 | return; | |
887 | } | |
888 | ||
889 | /* free a pud talbe */ | |
a7e6c701 | 890 | free_pagetable(p4d_page(*p4d), 0); |
f2a6a705 KS |
891 | spin_lock(&init_mm.page_table_lock); |
892 | p4d_clear(p4d); | |
893 | spin_unlock(&init_mm.page_table_lock); | |
894 | } | |
895 | ||
ae9aae9e WC |
896 | static void __meminit |
897 | remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, | |
a7e6c701 | 898 | bool direct) |
ae9aae9e WC |
899 | { |
900 | unsigned long next, pages = 0; | |
901 | pte_t *pte; | |
902 | void *page_addr; | |
903 | phys_addr_t phys_addr; | |
904 | ||
905 | pte = pte_start + pte_index(addr); | |
906 | for (; addr < end; addr = next, pte++) { | |
907 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
908 | if (next > end) | |
909 | next = end; | |
910 | ||
911 | if (!pte_present(*pte)) | |
912 | continue; | |
913 | ||
914 | /* | |
915 | * We mapped [0,1G) memory as identity mapping when | |
916 | * initializing, in arch/x86/kernel/head_64.S. These | |
917 | * pagetables cannot be removed. | |
918 | */ | |
919 | phys_addr = pte_val(*pte) + (addr & PAGE_MASK); | |
920 | if (phys_addr < (phys_addr_t)0x40000000) | |
921 | return; | |
922 | ||
b500f77b | 923 | if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) { |
ae9aae9e WC |
924 | /* |
925 | * Do not free direct mapping pages since they were | |
926 | * freed when offlining, or simplely not in use. | |
927 | */ | |
928 | if (!direct) | |
a7e6c701 | 929 | free_pagetable(pte_page(*pte), 0); |
ae9aae9e WC |
930 | |
931 | spin_lock(&init_mm.page_table_lock); | |
932 | pte_clear(&init_mm, addr, pte); | |
933 | spin_unlock(&init_mm.page_table_lock); | |
934 | ||
935 | /* For non-direct mapping, pages means nothing. */ | |
936 | pages++; | |
937 | } else { | |
938 | /* | |
939 | * If we are here, we are freeing vmemmap pages since | |
940 | * direct mapped memory ranges to be freed are aligned. | |
941 | * | |
942 | * If we are not removing the whole page, it means | |
943 | * other page structs in this page are being used and | |
944 | * we canot remove them. So fill the unused page_structs | |
945 | * with 0xFD, and remove the page when it is wholly | |
946 | * filled with 0xFD. | |
947 | */ | |
948 | memset((void *)addr, PAGE_INUSE, next - addr); | |
949 | ||
950 | page_addr = page_address(pte_page(*pte)); | |
951 | if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { | |
a7e6c701 | 952 | free_pagetable(pte_page(*pte), 0); |
ae9aae9e WC |
953 | |
954 | spin_lock(&init_mm.page_table_lock); | |
955 | pte_clear(&init_mm, addr, pte); | |
956 | spin_unlock(&init_mm.page_table_lock); | |
957 | } | |
958 | } | |
959 | } | |
960 | ||
961 | /* Call free_pte_table() in remove_pmd_table(). */ | |
962 | flush_tlb_all(); | |
963 | if (direct) | |
964 | update_page_count(PG_LEVEL_4K, -pages); | |
965 | } | |
966 | ||
967 | static void __meminit | |
968 | remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, | |
24b6d416 | 969 | bool direct, struct vmem_altmap *altmap) |
ae9aae9e WC |
970 | { |
971 | unsigned long next, pages = 0; | |
972 | pte_t *pte_base; | |
973 | pmd_t *pmd; | |
974 | void *page_addr; | |
975 | ||
976 | pmd = pmd_start + pmd_index(addr); | |
977 | for (; addr < end; addr = next, pmd++) { | |
978 | next = pmd_addr_end(addr, end); | |
979 | ||
980 | if (!pmd_present(*pmd)) | |
981 | continue; | |
982 | ||
983 | if (pmd_large(*pmd)) { | |
984 | if (IS_ALIGNED(addr, PMD_SIZE) && | |
985 | IS_ALIGNED(next, PMD_SIZE)) { | |
986 | if (!direct) | |
a7e6c701 DW |
987 | free_hugepage_table(pmd_page(*pmd), |
988 | altmap); | |
ae9aae9e WC |
989 | |
990 | spin_lock(&init_mm.page_table_lock); | |
991 | pmd_clear(pmd); | |
992 | spin_unlock(&init_mm.page_table_lock); | |
993 | pages++; | |
994 | } else { | |
995 | /* If here, we are freeing vmemmap pages. */ | |
996 | memset((void *)addr, PAGE_INUSE, next - addr); | |
997 | ||
998 | page_addr = page_address(pmd_page(*pmd)); | |
999 | if (!memchr_inv(page_addr, PAGE_INUSE, | |
1000 | PMD_SIZE)) { | |
a7e6c701 DW |
1001 | free_hugepage_table(pmd_page(*pmd), |
1002 | altmap); | |
ae9aae9e WC |
1003 | |
1004 | spin_lock(&init_mm.page_table_lock); | |
1005 | pmd_clear(pmd); | |
1006 | spin_unlock(&init_mm.page_table_lock); | |
1007 | } | |
1008 | } | |
1009 | ||
1010 | continue; | |
1011 | } | |
1012 | ||
1013 | pte_base = (pte_t *)pmd_page_vaddr(*pmd); | |
a7e6c701 DW |
1014 | remove_pte_table(pte_base, addr, next, direct); |
1015 | free_pte_table(pte_base, pmd); | |
ae9aae9e WC |
1016 | } |
1017 | ||
1018 | /* Call free_pmd_table() in remove_pud_table(). */ | |
1019 | if (direct) | |
1020 | update_page_count(PG_LEVEL_2M, -pages); | |
1021 | } | |
1022 | ||
1023 | static void __meminit | |
1024 | remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, | |
24b6d416 | 1025 | struct vmem_altmap *altmap, bool direct) |
ae9aae9e WC |
1026 | { |
1027 | unsigned long next, pages = 0; | |
1028 | pmd_t *pmd_base; | |
1029 | pud_t *pud; | |
1030 | void *page_addr; | |
1031 | ||
1032 | pud = pud_start + pud_index(addr); | |
1033 | for (; addr < end; addr = next, pud++) { | |
1034 | next = pud_addr_end(addr, end); | |
1035 | ||
1036 | if (!pud_present(*pud)) | |
1037 | continue; | |
1038 | ||
1039 | if (pud_large(*pud)) { | |
1040 | if (IS_ALIGNED(addr, PUD_SIZE) && | |
1041 | IS_ALIGNED(next, PUD_SIZE)) { | |
1042 | if (!direct) | |
1043 | free_pagetable(pud_page(*pud), | |
a7e6c701 | 1044 | get_order(PUD_SIZE)); |
ae9aae9e WC |
1045 | |
1046 | spin_lock(&init_mm.page_table_lock); | |
1047 | pud_clear(pud); | |
1048 | spin_unlock(&init_mm.page_table_lock); | |
1049 | pages++; | |
1050 | } else { | |
1051 | /* If here, we are freeing vmemmap pages. */ | |
1052 | memset((void *)addr, PAGE_INUSE, next - addr); | |
1053 | ||
1054 | page_addr = page_address(pud_page(*pud)); | |
1055 | if (!memchr_inv(page_addr, PAGE_INUSE, | |
1056 | PUD_SIZE)) { | |
1057 | free_pagetable(pud_page(*pud), | |
a7e6c701 | 1058 | get_order(PUD_SIZE)); |
ae9aae9e WC |
1059 | |
1060 | spin_lock(&init_mm.page_table_lock); | |
1061 | pud_clear(pud); | |
1062 | spin_unlock(&init_mm.page_table_lock); | |
1063 | } | |
1064 | } | |
1065 | ||
1066 | continue; | |
1067 | } | |
1068 | ||
e6ab9c4d | 1069 | pmd_base = pmd_offset(pud, 0); |
24b6d416 | 1070 | remove_pmd_table(pmd_base, addr, next, direct, altmap); |
a7e6c701 | 1071 | free_pmd_table(pmd_base, pud); |
ae9aae9e WC |
1072 | } |
1073 | ||
1074 | if (direct) | |
1075 | update_page_count(PG_LEVEL_1G, -pages); | |
1076 | } | |
1077 | ||
f2a6a705 KS |
1078 | static void __meminit |
1079 | remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, | |
24b6d416 | 1080 | struct vmem_altmap *altmap, bool direct) |
f2a6a705 KS |
1081 | { |
1082 | unsigned long next, pages = 0; | |
1083 | pud_t *pud_base; | |
1084 | p4d_t *p4d; | |
1085 | ||
1086 | p4d = p4d_start + p4d_index(addr); | |
1087 | for (; addr < end; addr = next, p4d++) { | |
1088 | next = p4d_addr_end(addr, end); | |
1089 | ||
1090 | if (!p4d_present(*p4d)) | |
1091 | continue; | |
1092 | ||
1093 | BUILD_BUG_ON(p4d_large(*p4d)); | |
1094 | ||
e6ab9c4d | 1095 | pud_base = pud_offset(p4d, 0); |
24b6d416 | 1096 | remove_pud_table(pud_base, addr, next, altmap, direct); |
98fe3633 JG |
1097 | /* |
1098 | * For 4-level page tables we do not want to free PUDs, but in the | |
1099 | * 5-level case we should free them. This code will have to change | |
1100 | * to adapt for boot-time switching between 4 and 5 level page tables. | |
1101 | */ | |
ed7588d5 | 1102 | if (pgtable_l5_enabled()) |
a7e6c701 | 1103 | free_pud_table(pud_base, p4d); |
f2a6a705 KS |
1104 | } |
1105 | ||
1106 | if (direct) | |
1107 | update_page_count(PG_LEVEL_512G, -pages); | |
1108 | } | |
1109 | ||
ae9aae9e WC |
1110 | /* start and end are both virtual address. */ |
1111 | static void __meminit | |
24b6d416 CH |
1112 | remove_pagetable(unsigned long start, unsigned long end, bool direct, |
1113 | struct vmem_altmap *altmap) | |
ae9aae9e WC |
1114 | { |
1115 | unsigned long next; | |
5255e0a7 | 1116 | unsigned long addr; |
ae9aae9e | 1117 | pgd_t *pgd; |
f2a6a705 | 1118 | p4d_t *p4d; |
ae9aae9e | 1119 | |
5255e0a7 YI |
1120 | for (addr = start; addr < end; addr = next) { |
1121 | next = pgd_addr_end(addr, end); | |
ae9aae9e | 1122 | |
5255e0a7 | 1123 | pgd = pgd_offset_k(addr); |
ae9aae9e WC |
1124 | if (!pgd_present(*pgd)) |
1125 | continue; | |
1126 | ||
e6ab9c4d | 1127 | p4d = p4d_offset(pgd, 0); |
24b6d416 | 1128 | remove_p4d_table(p4d, addr, next, altmap, direct); |
ae9aae9e WC |
1129 | } |
1130 | ||
ae9aae9e WC |
1131 | flush_tlb_all(); |
1132 | } | |
1133 | ||
24b6d416 CH |
1134 | void __ref vmemmap_free(unsigned long start, unsigned long end, |
1135 | struct vmem_altmap *altmap) | |
0197518c | 1136 | { |
24b6d416 | 1137 | remove_pagetable(start, end, false, altmap); |
0197518c TC |
1138 | } |
1139 | ||
587ff8c4 | 1140 | #ifdef CONFIG_MEMORY_HOTREMOVE |
bbcab878 TC |
1141 | static void __meminit |
1142 | kernel_physical_mapping_remove(unsigned long start, unsigned long end) | |
1143 | { | |
1144 | start = (unsigned long)__va(start); | |
1145 | end = (unsigned long)__va(end); | |
1146 | ||
24b6d416 | 1147 | remove_pagetable(start, end, true, NULL); |
bbcab878 TC |
1148 | } |
1149 | ||
da024512 | 1150 | int __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) |
24d335ca WC |
1151 | { |
1152 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
1153 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
4b94ffdc | 1154 | struct page *page = pfn_to_page(start_pfn); |
24d335ca WC |
1155 | struct zone *zone; |
1156 | int ret; | |
1157 | ||
4b94ffdc | 1158 | /* With altmap the first mapped page is offset from @start */ |
4b94ffdc DW |
1159 | if (altmap) |
1160 | page += vmem_altmap_offset(altmap); | |
1161 | zone = page_zone(page); | |
da024512 | 1162 | ret = __remove_pages(zone, start_pfn, nr_pages, altmap); |
24d335ca | 1163 | WARN_ON_ONCE(ret); |
4b94ffdc | 1164 | kernel_physical_mapping_remove(start, start + size); |
24d335ca WC |
1165 | |
1166 | return ret; | |
1167 | } | |
1168 | #endif | |
45e0b78b KM |
1169 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
1170 | ||
81ac3ad9 | 1171 | static struct kcore_list kcore_vsyscall; |
1da177e4 | 1172 | |
94b43c3d YL |
1173 | static void __init register_page_bootmem_info(void) |
1174 | { | |
1175 | #ifdef CONFIG_NUMA | |
1176 | int i; | |
1177 | ||
1178 | for_each_online_node(i) | |
1179 | register_page_bootmem_info_node(NODE_DATA(i)); | |
1180 | #endif | |
1181 | } | |
1182 | ||
1da177e4 LT |
1183 | void __init mem_init(void) |
1184 | { | |
0dc243ae | 1185 | pci_iommu_alloc(); |
1da177e4 | 1186 | |
48ddb154 | 1187 | /* clear_bss() already clear the empty_zero_page */ |
1da177e4 | 1188 | |
bced0e32 | 1189 | /* this will put all memory onto the freelists */ |
c6ffc5ca | 1190 | memblock_free_all(); |
1da177e4 | 1191 | after_bootmem = 1; |
6f84f8d1 | 1192 | x86_init.hyper.init_after_bootmem(); |
1da177e4 | 1193 | |
353b1e7b PT |
1194 | /* |
1195 | * Must be done after boot memory is put on freelist, because here we | |
1196 | * might set fields in deferred struct pages that have not yet been | |
c6ffc5ca | 1197 | * initialized, and memblock_free_all() initializes all the reserved |
353b1e7b PT |
1198 | * deferred pages for us. |
1199 | */ | |
1200 | register_page_bootmem_info(); | |
1201 | ||
1da177e4 | 1202 | /* Register memory areas for /proc/kcore */ |
cd026ca2 JZ |
1203 | if (get_gate_vma(&init_mm)) |
1204 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); | |
1da177e4 | 1205 | |
46a84132 | 1206 | mem_init_print_info(NULL); |
1da177e4 LT |
1207 | } |
1208 | ||
502f6604 | 1209 | int kernel_set_to_readonly; |
16239630 SR |
1210 | |
1211 | void set_kernel_text_rw(void) | |
1212 | { | |
b9af7c0d | 1213 | unsigned long start = PFN_ALIGN(_text); |
e7d23dde | 1214 | unsigned long end = PFN_ALIGN(__stop___ex_table); |
16239630 SR |
1215 | |
1216 | if (!kernel_set_to_readonly) | |
1217 | return; | |
1218 | ||
1219 | pr_debug("Set kernel text: %lx - %lx for read write\n", | |
1220 | start, end); | |
1221 | ||
e7d23dde SS |
1222 | /* |
1223 | * Make the kernel identity mapping for text RW. Kernel text | |
1224 | * mapping will always be RO. Refer to the comment in | |
1225 | * static_protections() in pageattr.c | |
1226 | */ | |
16239630 SR |
1227 | set_memory_rw(start, (end - start) >> PAGE_SHIFT); |
1228 | } | |
1229 | ||
1230 | void set_kernel_text_ro(void) | |
1231 | { | |
b9af7c0d | 1232 | unsigned long start = PFN_ALIGN(_text); |
e7d23dde | 1233 | unsigned long end = PFN_ALIGN(__stop___ex_table); |
16239630 SR |
1234 | |
1235 | if (!kernel_set_to_readonly) | |
1236 | return; | |
1237 | ||
1238 | pr_debug("Set kernel text: %lx - %lx for read only\n", | |
1239 | start, end); | |
1240 | ||
e7d23dde SS |
1241 | /* |
1242 | * Set the kernel identity mapping for text RO. | |
1243 | */ | |
16239630 SR |
1244 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
1245 | } | |
1246 | ||
67df197b AV |
1247 | void mark_rodata_ro(void) |
1248 | { | |
74e08179 | 1249 | unsigned long start = PFN_ALIGN(_text); |
fc8d7826 | 1250 | unsigned long rodata_start = PFN_ALIGN(__start_rodata); |
74e08179 | 1251 | unsigned long end = (unsigned long) &__end_rodata_hpage_align; |
fc8d7826 AD |
1252 | unsigned long text_end = PFN_ALIGN(&__stop___ex_table); |
1253 | unsigned long rodata_end = PFN_ALIGN(&__end_rodata); | |
45e2a9d4 | 1254 | unsigned long all_end; |
8f0f996e | 1255 | |
6fb14755 | 1256 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
e3ebadd9 | 1257 | (end - start) >> 10); |
984bb80d AV |
1258 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
1259 | ||
16239630 SR |
1260 | kernel_set_to_readonly = 1; |
1261 | ||
984bb80d | 1262 | /* |
72212675 YL |
1263 | * The rodata/data/bss/brk section (but not the kernel text!) |
1264 | * should also be not-executable. | |
45e2a9d4 KC |
1265 | * |
1266 | * We align all_end to PMD_SIZE because the existing mapping | |
1267 | * is a full PMD. If we would align _brk_end to PAGE_SIZE we | |
1268 | * split the PMD and the reminder between _brk_end and the end | |
1269 | * of the PMD will remain mapped executable. | |
1270 | * | |
1271 | * Any PMD which was setup after the one which covers _brk_end | |
1272 | * has been zapped already via cleanup_highmem(). | |
984bb80d | 1273 | */ |
45e2a9d4 | 1274 | all_end = roundup((unsigned long)_brk_end, PMD_SIZE); |
ab76f7b4 | 1275 | set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); |
67df197b | 1276 | |
0c42f392 | 1277 | #ifdef CONFIG_CPA_DEBUG |
10f22dde | 1278 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); |
6d238cc4 | 1279 | set_memory_rw(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 1280 | |
10f22dde | 1281 | printk(KERN_INFO "Testing CPA: again\n"); |
6d238cc4 | 1282 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 1283 | #endif |
74e08179 | 1284 | |
6ea2738e DH |
1285 | free_kernel_image_pages((void *)text_end, (void *)rodata_start); |
1286 | free_kernel_image_pages((void *)rodata_end, (void *)_sdata); | |
e1a58320 SS |
1287 | |
1288 | debug_checkwx(); | |
67df197b | 1289 | } |
4e4eee0e | 1290 | |
14a62c34 TG |
1291 | int kern_addr_valid(unsigned long addr) |
1292 | { | |
1da177e4 | 1293 | unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; |
14a62c34 | 1294 | pgd_t *pgd; |
f2a6a705 | 1295 | p4d_t *p4d; |
14a62c34 TG |
1296 | pud_t *pud; |
1297 | pmd_t *pmd; | |
1298 | pte_t *pte; | |
1da177e4 LT |
1299 | |
1300 | if (above != 0 && above != -1UL) | |
14a62c34 TG |
1301 | return 0; |
1302 | ||
1da177e4 LT |
1303 | pgd = pgd_offset_k(addr); |
1304 | if (pgd_none(*pgd)) | |
1305 | return 0; | |
1306 | ||
f2a6a705 KS |
1307 | p4d = p4d_offset(pgd, addr); |
1308 | if (p4d_none(*p4d)) | |
1309 | return 0; | |
1310 | ||
1311 | pud = pud_offset(p4d, addr); | |
1da177e4 | 1312 | if (pud_none(*pud)) |
14a62c34 | 1313 | return 0; |
1da177e4 | 1314 | |
0ee364eb MG |
1315 | if (pud_large(*pud)) |
1316 | return pfn_valid(pud_pfn(*pud)); | |
1317 | ||
1da177e4 LT |
1318 | pmd = pmd_offset(pud, addr); |
1319 | if (pmd_none(*pmd)) | |
1320 | return 0; | |
14a62c34 | 1321 | |
1da177e4 LT |
1322 | if (pmd_large(*pmd)) |
1323 | return pfn_valid(pmd_pfn(*pmd)); | |
1324 | ||
1325 | pte = pte_offset_kernel(pmd, addr); | |
1326 | if (pte_none(*pte)) | |
1327 | return 0; | |
14a62c34 | 1328 | |
1da177e4 LT |
1329 | return pfn_valid(pte_pfn(*pte)); |
1330 | } | |
1331 | ||
078eb6aa PT |
1332 | /* |
1333 | * Block size is the minimum amount of memory which can be hotplugged or | |
1334 | * hotremoved. It must be power of two and must be equal or larger than | |
1335 | * MIN_MEMORY_BLOCK_SIZE. | |
1336 | */ | |
1337 | #define MAX_BLOCK_SIZE (2UL << 30) | |
1338 | ||
1339 | /* Amount of ram needed to start using large blocks */ | |
1340 | #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30) | |
1341 | ||
f642fb58 | 1342 | /* Adjustable memory block size */ |
1343 | static unsigned long set_memory_block_size; | |
1344 | int __init set_memory_block_size_order(unsigned int order) | |
1345 | { | |
1346 | unsigned long size = 1UL << order; | |
1347 | ||
1348 | if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE) | |
1349 | return -EINVAL; | |
1350 | ||
1351 | set_memory_block_size = size; | |
1352 | return 0; | |
1353 | } | |
1354 | ||
982792c7 | 1355 | static unsigned long probe_memory_block_size(void) |
1dc41aa6 | 1356 | { |
078eb6aa PT |
1357 | unsigned long boot_mem_end = max_pfn << PAGE_SHIFT; |
1358 | unsigned long bz; | |
982792c7 | 1359 | |
f642fb58 | 1360 | /* If memory block size has been set, then use it */ |
1361 | bz = set_memory_block_size; | |
1362 | if (bz) | |
078eb6aa | 1363 | goto done; |
982792c7 | 1364 | |
078eb6aa PT |
1365 | /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */ |
1366 | if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) { | |
1367 | bz = MIN_MEMORY_BLOCK_SIZE; | |
1368 | goto done; | |
1369 | } | |
1370 | ||
1371 | /* Find the largest allowed block size that aligns to memory end */ | |
1372 | for (bz = MAX_BLOCK_SIZE; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) { | |
1373 | if (IS_ALIGNED(boot_mem_end, bz)) | |
1374 | break; | |
1375 | } | |
1376 | done: | |
43c75f93 | 1377 | pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20); |
982792c7 YL |
1378 | |
1379 | return bz; | |
1380 | } | |
1381 | ||
1382 | static unsigned long memory_block_size_probed; | |
1383 | unsigned long memory_block_size_bytes(void) | |
1384 | { | |
1385 | if (!memory_block_size_probed) | |
1386 | memory_block_size_probed = probe_memory_block_size(); | |
1387 | ||
1388 | return memory_block_size_probed; | |
1389 | } | |
1390 | ||
0889eba5 CL |
1391 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
1392 | /* | |
1393 | * Initialise the sparsemem vmemmap using huge-pages at the PMD level. | |
1394 | */ | |
c2b91e2e YL |
1395 | static long __meminitdata addr_start, addr_end; |
1396 | static void __meminitdata *p_start, *p_end; | |
1397 | static int __meminitdata node_start; | |
1398 | ||
e8216da5 | 1399 | static int __meminit vmemmap_populate_hugepages(unsigned long start, |
4b94ffdc | 1400 | unsigned long end, int node, struct vmem_altmap *altmap) |
0889eba5 | 1401 | { |
0aad818b | 1402 | unsigned long addr; |
0889eba5 CL |
1403 | unsigned long next; |
1404 | pgd_t *pgd; | |
f2a6a705 | 1405 | p4d_t *p4d; |
0889eba5 CL |
1406 | pud_t *pud; |
1407 | pmd_t *pmd; | |
1408 | ||
0aad818b | 1409 | for (addr = start; addr < end; addr = next) { |
e8216da5 | 1410 | next = pmd_addr_end(addr, end); |
0889eba5 CL |
1411 | |
1412 | pgd = vmemmap_pgd_populate(addr, node); | |
1413 | if (!pgd) | |
1414 | return -ENOMEM; | |
14a62c34 | 1415 | |
f2a6a705 KS |
1416 | p4d = vmemmap_p4d_populate(pgd, addr, node); |
1417 | if (!p4d) | |
1418 | return -ENOMEM; | |
1419 | ||
1420 | pud = vmemmap_pud_populate(p4d, addr, node); | |
0889eba5 CL |
1421 | if (!pud) |
1422 | return -ENOMEM; | |
1423 | ||
e8216da5 JW |
1424 | pmd = pmd_offset(pud, addr); |
1425 | if (pmd_none(*pmd)) { | |
e8216da5 | 1426 | void *p; |
14a62c34 | 1427 | |
a8fc357b CH |
1428 | if (altmap) |
1429 | p = altmap_alloc_block_buf(PMD_SIZE, altmap); | |
1430 | else | |
1431 | p = vmemmap_alloc_block_buf(PMD_SIZE, node); | |
8e2cdbcb JW |
1432 | if (p) { |
1433 | pte_t entry; | |
1434 | ||
1435 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, | |
1436 | PAGE_KERNEL_LARGE); | |
1437 | set_pmd(pmd, __pmd(pte_val(entry))); | |
1438 | ||
1439 | /* check to see if we have contiguous blocks */ | |
1440 | if (p_end != p || node_start != node) { | |
1441 | if (p_start) | |
c9cdaeb2 | 1442 | pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", |
8e2cdbcb JW |
1443 | addr_start, addr_end-1, p_start, p_end-1, node_start); |
1444 | addr_start = addr; | |
1445 | node_start = node; | |
1446 | p_start = p; | |
1447 | } | |
7c934d39 | 1448 | |
8e2cdbcb JW |
1449 | addr_end = addr + PMD_SIZE; |
1450 | p_end = p + PMD_SIZE; | |
1451 | continue; | |
4b94ffdc DW |
1452 | } else if (altmap) |
1453 | return -ENOMEM; /* no fallback */ | |
8e2cdbcb | 1454 | } else if (pmd_large(*pmd)) { |
e8216da5 | 1455 | vmemmap_verify((pte_t *)pmd, node, addr, next); |
8e2cdbcb JW |
1456 | continue; |
1457 | } | |
8e2cdbcb JW |
1458 | if (vmemmap_populate_basepages(addr, next, node)) |
1459 | return -ENOMEM; | |
0889eba5 | 1460 | } |
0889eba5 CL |
1461 | return 0; |
1462 | } | |
c2b91e2e | 1463 | |
7b73d978 CH |
1464 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |
1465 | struct vmem_altmap *altmap) | |
e8216da5 JW |
1466 | { |
1467 | int err; | |
1468 | ||
16bf9226 | 1469 | if (boot_cpu_has(X86_FEATURE_PSE)) |
4b94ffdc DW |
1470 | err = vmemmap_populate_hugepages(start, end, node, altmap); |
1471 | else if (altmap) { | |
1472 | pr_err_once("%s: no cpu support for altmap allocations\n", | |
1473 | __func__); | |
1474 | err = -ENOMEM; | |
1475 | } else | |
e8216da5 JW |
1476 | err = vmemmap_populate_basepages(start, end, node); |
1477 | if (!err) | |
5372e155 | 1478 | sync_global_pgds(start, end - 1); |
e8216da5 JW |
1479 | return err; |
1480 | } | |
1481 | ||
46723bfa YI |
1482 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) |
1483 | void register_page_bootmem_memmap(unsigned long section_nr, | |
15670bfe | 1484 | struct page *start_page, unsigned long nr_pages) |
46723bfa YI |
1485 | { |
1486 | unsigned long addr = (unsigned long)start_page; | |
15670bfe | 1487 | unsigned long end = (unsigned long)(start_page + nr_pages); |
46723bfa YI |
1488 | unsigned long next; |
1489 | pgd_t *pgd; | |
f2a6a705 | 1490 | p4d_t *p4d; |
46723bfa YI |
1491 | pud_t *pud; |
1492 | pmd_t *pmd; | |
15670bfe | 1493 | unsigned int nr_pmd_pages; |
46723bfa YI |
1494 | struct page *page; |
1495 | ||
1496 | for (; addr < end; addr = next) { | |
1497 | pte_t *pte = NULL; | |
1498 | ||
1499 | pgd = pgd_offset_k(addr); | |
1500 | if (pgd_none(*pgd)) { | |
1501 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1502 | continue; | |
1503 | } | |
1504 | get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO); | |
1505 | ||
f2a6a705 KS |
1506 | p4d = p4d_offset(pgd, addr); |
1507 | if (p4d_none(*p4d)) { | |
1508 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1509 | continue; | |
1510 | } | |
1511 | get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO); | |
1512 | ||
1513 | pud = pud_offset(p4d, addr); | |
46723bfa YI |
1514 | if (pud_none(*pud)) { |
1515 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1516 | continue; | |
1517 | } | |
1518 | get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO); | |
1519 | ||
16bf9226 | 1520 | if (!boot_cpu_has(X86_FEATURE_PSE)) { |
46723bfa YI |
1521 | next = (addr + PAGE_SIZE) & PAGE_MASK; |
1522 | pmd = pmd_offset(pud, addr); | |
1523 | if (pmd_none(*pmd)) | |
1524 | continue; | |
1525 | get_page_bootmem(section_nr, pmd_page(*pmd), | |
1526 | MIX_SECTION_INFO); | |
1527 | ||
1528 | pte = pte_offset_kernel(pmd, addr); | |
1529 | if (pte_none(*pte)) | |
1530 | continue; | |
1531 | get_page_bootmem(section_nr, pte_page(*pte), | |
1532 | SECTION_INFO); | |
1533 | } else { | |
1534 | next = pmd_addr_end(addr, end); | |
1535 | ||
1536 | pmd = pmd_offset(pud, addr); | |
1537 | if (pmd_none(*pmd)) | |
1538 | continue; | |
1539 | ||
15670bfe | 1540 | nr_pmd_pages = 1 << get_order(PMD_SIZE); |
46723bfa | 1541 | page = pmd_page(*pmd); |
15670bfe | 1542 | while (nr_pmd_pages--) |
46723bfa YI |
1543 | get_page_bootmem(section_nr, page++, |
1544 | SECTION_INFO); | |
1545 | } | |
1546 | } | |
1547 | } | |
1548 | #endif | |
1549 | ||
c2b91e2e YL |
1550 | void __meminit vmemmap_populate_print_last(void) |
1551 | { | |
1552 | if (p_start) { | |
c9cdaeb2 | 1553 | pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", |
c2b91e2e YL |
1554 | addr_start, addr_end-1, p_start, p_end-1, node_start); |
1555 | p_start = NULL; | |
1556 | p_end = NULL; | |
1557 | node_start = 0; | |
1558 | } | |
1559 | } | |
0889eba5 | 1560 | #endif |