Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * linux/arch/x86_64/mm/init.c | |
4 | * | |
5 | * Copyright (C) 1995 Linus Torvalds | |
a2531293 | 6 | * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz> |
1da177e4 LT |
7 | * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> |
8 | */ | |
9 | ||
1da177e4 LT |
10 | #include <linux/signal.h> |
11 | #include <linux/sched.h> | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/string.h> | |
15 | #include <linux/types.h> | |
16 | #include <linux/ptrace.h> | |
17 | #include <linux/mman.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/swap.h> | |
20 | #include <linux/smp.h> | |
21 | #include <linux/init.h> | |
11034d55 | 22 | #include <linux/initrd.h> |
1da177e4 | 23 | #include <linux/pagemap.h> |
a9ce6bc1 | 24 | #include <linux/memblock.h> |
1da177e4 | 25 | #include <linux/proc_fs.h> |
59170891 | 26 | #include <linux/pci.h> |
6fb14755 | 27 | #include <linux/pfn.h> |
c9cf5528 | 28 | #include <linux/poison.h> |
17a941d8 | 29 | #include <linux/dma-mapping.h> |
a63fdc51 | 30 | #include <linux/memory.h> |
44df75e6 | 31 | #include <linux/memory_hotplug.h> |
4b94ffdc | 32 | #include <linux/memremap.h> |
ae32b129 | 33 | #include <linux/nmi.h> |
5a0e3ad6 | 34 | #include <linux/gfp.h> |
2f96b8c1 | 35 | #include <linux/kcore.h> |
426e5c42 | 36 | #include <linux/bootmem_info.h> |
1da177e4 LT |
37 | |
38 | #include <asm/processor.h> | |
46eaa670 | 39 | #include <asm/bios_ebda.h> |
7c0f6ba6 | 40 | #include <linux/uaccess.h> |
1da177e4 LT |
41 | #include <asm/pgalloc.h> |
42 | #include <asm/dma.h> | |
43 | #include <asm/fixmap.h> | |
66441bd3 | 44 | #include <asm/e820/api.h> |
1da177e4 LT |
45 | #include <asm/apic.h> |
46 | #include <asm/tlb.h> | |
47 | #include <asm/mmu_context.h> | |
48 | #include <asm/proto.h> | |
49 | #include <asm/smp.h> | |
2bc0414e | 50 | #include <asm/sections.h> |
718fc13b | 51 | #include <asm/kdebug.h> |
aaa64e04 | 52 | #include <asm/numa.h> |
d1163651 | 53 | #include <asm/set_memory.h> |
4fcb2083 | 54 | #include <asm/init.h> |
43c75f93 | 55 | #include <asm/uv/uv.h> |
e5f15b45 | 56 | #include <asm/setup.h> |
59566b0b | 57 | #include <asm/ftrace.h> |
1da177e4 | 58 | |
5c51bdbe YL |
59 | #include "mm_internal.h" |
60 | ||
cf4fb15b | 61 | #include "ident_map.c" |
aece2785 | 62 | |
eccd9064 BS |
63 | #define DEFINE_POPULATE(fname, type1, type2, init) \ |
64 | static inline void fname##_init(struct mm_struct *mm, \ | |
65 | type1##_t *arg1, type2##_t *arg2, bool init) \ | |
66 | { \ | |
67 | if (init) \ | |
68 | fname##_safe(mm, arg1, arg2); \ | |
69 | else \ | |
70 | fname(mm, arg1, arg2); \ | |
71 | } | |
72 | ||
73 | DEFINE_POPULATE(p4d_populate, p4d, pud, init) | |
74 | DEFINE_POPULATE(pgd_populate, pgd, p4d, init) | |
75 | DEFINE_POPULATE(pud_populate, pud, pmd, init) | |
76 | DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init) | |
77 | ||
78 | #define DEFINE_ENTRY(type1, type2, init) \ | |
79 | static inline void set_##type1##_init(type1##_t *arg1, \ | |
80 | type2##_t arg2, bool init) \ | |
81 | { \ | |
82 | if (init) \ | |
83 | set_##type1##_safe(arg1, arg2); \ | |
84 | else \ | |
85 | set_##type1(arg1, arg2); \ | |
86 | } | |
87 | ||
88 | DEFINE_ENTRY(p4d, p4d, init) | |
89 | DEFINE_ENTRY(pud, pud, init) | |
90 | DEFINE_ENTRY(pmd, pmd, init) | |
91 | DEFINE_ENTRY(pte, pte, init) | |
92 | ||
334b2cea LT |
93 | static inline pgprot_t prot_sethuge(pgprot_t prot) |
94 | { | |
95 | WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PAT); | |
96 | ||
97 | return __pgprot(pgprot_val(prot) | _PAGE_PSE); | |
98 | } | |
eccd9064 | 99 | |
1da177e4 LT |
100 | /* |
101 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the | |
102 | * physical space so we can cache the place of the first one and move | |
103 | * around without checking the pgd every time. | |
104 | */ | |
105 | ||
8a57f484 | 106 | /* Bits supported by the hardware: */ |
f955371c | 107 | pteval_t __supported_pte_mask __read_mostly = ~0; |
8a57f484 DH |
108 | /* Bits allowed in normal kernel mappings: */ |
109 | pteval_t __default_kernel_pte_mask __read_mostly = ~0; | |
bd220a24 | 110 | EXPORT_SYMBOL_GPL(__supported_pte_mask); |
8a57f484 DH |
111 | /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */ |
112 | EXPORT_SYMBOL(__default_kernel_pte_mask); | |
bd220a24 | 113 | |
bd220a24 YL |
114 | int force_personality32; |
115 | ||
deed05b7 IM |
116 | /* |
117 | * noexec32=on|off | |
118 | * Control non executable heap for 32bit processes. | |
deed05b7 IM |
119 | * |
120 | * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default) | |
121 | * off PROT_READ implies PROT_EXEC | |
122 | */ | |
bd220a24 YL |
123 | static int __init nonx32_setup(char *str) |
124 | { | |
125 | if (!strcmp(str, "on")) | |
126 | force_personality32 &= ~READ_IMPLIES_EXEC; | |
127 | else if (!strcmp(str, "off")) | |
128 | force_personality32 |= READ_IMPLIES_EXEC; | |
129 | return 1; | |
130 | } | |
131 | __setup("noexec32=", nonx32_setup); | |
132 | ||
91f606a8 | 133 | static void sync_global_pgds_l5(unsigned long start, unsigned long end) |
141efad7 KS |
134 | { |
135 | unsigned long addr; | |
136 | ||
137 | for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { | |
138 | const pgd_t *pgd_ref = pgd_offset_k(addr); | |
139 | struct page *page; | |
140 | ||
141 | /* Check for overflow */ | |
142 | if (addr < start) | |
143 | break; | |
144 | ||
145 | if (pgd_none(*pgd_ref)) | |
146 | continue; | |
147 | ||
148 | spin_lock(&pgd_lock); | |
149 | list_for_each_entry(page, &pgd_list, lru) { | |
150 | pgd_t *pgd; | |
151 | spinlock_t *pgt_lock; | |
152 | ||
153 | pgd = (pgd_t *)page_address(page) + pgd_index(addr); | |
154 | /* the pgt_lock only for Xen */ | |
155 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | |
156 | spin_lock(pgt_lock); | |
157 | ||
158 | if (!pgd_none(*pgd_ref) && !pgd_none(*pgd)) | |
159 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | |
160 | ||
161 | if (pgd_none(*pgd)) | |
162 | set_pgd(pgd, *pgd_ref); | |
163 | ||
164 | spin_unlock(pgt_lock); | |
165 | } | |
166 | spin_unlock(&pgd_lock); | |
167 | } | |
168 | } | |
91f606a8 KS |
169 | |
170 | static void sync_global_pgds_l4(unsigned long start, unsigned long end) | |
6afb5157 | 171 | { |
fc5f9d5f | 172 | unsigned long addr; |
44235dcd | 173 | |
fc5f9d5f BH |
174 | for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { |
175 | pgd_t *pgd_ref = pgd_offset_k(addr); | |
f2a6a705 | 176 | const p4d_t *p4d_ref; |
44235dcd JF |
177 | struct page *page; |
178 | ||
f2a6a705 KS |
179 | /* |
180 | * With folded p4d, pgd_none() is always false, we need to | |
d9f6e12f | 181 | * handle synchronization on p4d level. |
f2a6a705 | 182 | */ |
c65e774f | 183 | MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref)); |
fc5f9d5f | 184 | p4d_ref = p4d_offset(pgd_ref, addr); |
f2a6a705 KS |
185 | |
186 | if (p4d_none(*p4d_ref)) | |
44235dcd JF |
187 | continue; |
188 | ||
a79e53d8 | 189 | spin_lock(&pgd_lock); |
44235dcd | 190 | list_for_each_entry(page, &pgd_list, lru) { |
be354f40 | 191 | pgd_t *pgd; |
f2a6a705 | 192 | p4d_t *p4d; |
617d34d9 JF |
193 | spinlock_t *pgt_lock; |
194 | ||
fc5f9d5f BH |
195 | pgd = (pgd_t *)page_address(page) + pgd_index(addr); |
196 | p4d = p4d_offset(pgd, addr); | |
a79e53d8 | 197 | /* the pgt_lock only for Xen */ |
617d34d9 JF |
198 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
199 | spin_lock(pgt_lock); | |
200 | ||
f2a6a705 | 201 | if (!p4d_none(*p4d_ref) && !p4d_none(*p4d)) |
dc4875f0 AK |
202 | BUG_ON(p4d_pgtable(*p4d) |
203 | != p4d_pgtable(*p4d_ref)); | |
617d34d9 | 204 | |
f2a6a705 KS |
205 | if (p4d_none(*p4d)) |
206 | set_p4d(p4d, *p4d_ref); | |
9661d5bc | 207 | |
617d34d9 | 208 | spin_unlock(pgt_lock); |
44235dcd | 209 | } |
a79e53d8 | 210 | spin_unlock(&pgd_lock); |
44235dcd | 211 | } |
6afb5157 | 212 | } |
91f606a8 KS |
213 | |
214 | /* | |
215 | * When memory was added make sure all the processes MM have | |
216 | * suitable PGD entries in the local PGD level page. | |
217 | */ | |
2b32ab03 | 218 | static void sync_global_pgds(unsigned long start, unsigned long end) |
91f606a8 | 219 | { |
ed7588d5 | 220 | if (pgtable_l5_enabled()) |
91f606a8 KS |
221 | sync_global_pgds_l5(start, end); |
222 | else | |
223 | sync_global_pgds_l4(start, end); | |
224 | } | |
6afb5157 | 225 | |
8d6ea967 MS |
226 | /* |
227 | * NOTE: This function is marked __ref because it calls __init function | |
228 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | |
229 | */ | |
230 | static __ref void *spp_getpage(void) | |
14a62c34 | 231 | { |
1da177e4 | 232 | void *ptr; |
14a62c34 | 233 | |
1da177e4 | 234 | if (after_bootmem) |
75f296d9 | 235 | ptr = (void *) get_zeroed_page(GFP_ATOMIC); |
1da177e4 | 236 | else |
15c3c114 | 237 | ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
14a62c34 TG |
238 | |
239 | if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { | |
240 | panic("set_pte_phys: cannot allocate page data %s\n", | |
241 | after_bootmem ? "after bootmem" : ""); | |
242 | } | |
1da177e4 | 243 | |
10f22dde | 244 | pr_debug("spp_getpage %p\n", ptr); |
14a62c34 | 245 | |
1da177e4 | 246 | return ptr; |
14a62c34 | 247 | } |
1da177e4 | 248 | |
f2a6a705 | 249 | static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr) |
1da177e4 | 250 | { |
458a3e64 | 251 | if (pgd_none(*pgd)) { |
f2a6a705 KS |
252 | p4d_t *p4d = (p4d_t *)spp_getpage(); |
253 | pgd_populate(&init_mm, pgd, p4d); | |
254 | if (p4d != p4d_offset(pgd, 0)) | |
458a3e64 | 255 | printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", |
f2a6a705 KS |
256 | p4d, p4d_offset(pgd, 0)); |
257 | } | |
258 | return p4d_offset(pgd, vaddr); | |
259 | } | |
260 | ||
261 | static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr) | |
262 | { | |
263 | if (p4d_none(*p4d)) { | |
264 | pud_t *pud = (pud_t *)spp_getpage(); | |
265 | p4d_populate(&init_mm, p4d, pud); | |
266 | if (pud != pud_offset(p4d, 0)) | |
267 | printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", | |
268 | pud, pud_offset(p4d, 0)); | |
458a3e64 | 269 | } |
f2a6a705 | 270 | return pud_offset(p4d, vaddr); |
458a3e64 | 271 | } |
1da177e4 | 272 | |
f254f390 | 273 | static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) |
458a3e64 | 274 | { |
1da177e4 | 275 | if (pud_none(*pud)) { |
458a3e64 | 276 | pmd_t *pmd = (pmd_t *) spp_getpage(); |
bb23e403 | 277 | pud_populate(&init_mm, pud, pmd); |
458a3e64 | 278 | if (pmd != pmd_offset(pud, 0)) |
f2a6a705 | 279 | printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n", |
458a3e64 | 280 | pmd, pmd_offset(pud, 0)); |
1da177e4 | 281 | } |
458a3e64 TH |
282 | return pmd_offset(pud, vaddr); |
283 | } | |
284 | ||
f254f390 | 285 | static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) |
458a3e64 | 286 | { |
1da177e4 | 287 | if (pmd_none(*pmd)) { |
458a3e64 | 288 | pte_t *pte = (pte_t *) spp_getpage(); |
bb23e403 | 289 | pmd_populate_kernel(&init_mm, pmd, pte); |
458a3e64 | 290 | if (pte != pte_offset_kernel(pmd, 0)) |
f2a6a705 | 291 | printk(KERN_ERR "PAGETABLE BUG #03!\n"); |
1da177e4 | 292 | } |
458a3e64 TH |
293 | return pte_offset_kernel(pmd, vaddr); |
294 | } | |
295 | ||
f2a6a705 | 296 | static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) |
458a3e64 | 297 | { |
f2a6a705 KS |
298 | pmd_t *pmd = fill_pmd(pud, vaddr); |
299 | pte_t *pte = fill_pte(pmd, vaddr); | |
1da177e4 | 300 | |
1da177e4 LT |
301 | set_pte(pte, new_pte); |
302 | ||
303 | /* | |
304 | * It's enough to flush this one mapping. | |
305 | * (PGE mappings get flushed as well) | |
306 | */ | |
58430c5d | 307 | flush_tlb_one_kernel(vaddr); |
1da177e4 LT |
308 | } |
309 | ||
f2a6a705 KS |
310 | void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) |
311 | { | |
312 | p4d_t *p4d = p4d_page + p4d_index(vaddr); | |
313 | pud_t *pud = fill_pud(p4d, vaddr); | |
314 | ||
315 | __set_pte_vaddr(pud, vaddr, new_pte); | |
316 | } | |
317 | ||
318 | void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) | |
319 | { | |
320 | pud_t *pud = pud_page + pud_index(vaddr); | |
321 | ||
322 | __set_pte_vaddr(pud, vaddr, new_pte); | |
323 | } | |
324 | ||
458a3e64 | 325 | void set_pte_vaddr(unsigned long vaddr, pte_t pteval) |
0814e0ba EH |
326 | { |
327 | pgd_t *pgd; | |
f2a6a705 | 328 | p4d_t *p4d_page; |
0814e0ba EH |
329 | |
330 | pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); | |
331 | ||
332 | pgd = pgd_offset_k(vaddr); | |
333 | if (pgd_none(*pgd)) { | |
334 | printk(KERN_ERR | |
335 | "PGD FIXMAP MISSING, it should be setup in head.S!\n"); | |
336 | return; | |
337 | } | |
f2a6a705 KS |
338 | |
339 | p4d_page = p4d_offset(pgd, 0); | |
340 | set_pte_vaddr_p4d(p4d_page, vaddr, pteval); | |
0814e0ba EH |
341 | } |
342 | ||
458a3e64 | 343 | pmd_t * __init populate_extra_pmd(unsigned long vaddr) |
11124411 TH |
344 | { |
345 | pgd_t *pgd; | |
f2a6a705 | 346 | p4d_t *p4d; |
11124411 TH |
347 | pud_t *pud; |
348 | ||
349 | pgd = pgd_offset_k(vaddr); | |
f2a6a705 KS |
350 | p4d = fill_p4d(pgd, vaddr); |
351 | pud = fill_pud(p4d, vaddr); | |
458a3e64 TH |
352 | return fill_pmd(pud, vaddr); |
353 | } | |
354 | ||
355 | pte_t * __init populate_extra_pte(unsigned long vaddr) | |
356 | { | |
357 | pmd_t *pmd; | |
11124411 | 358 | |
458a3e64 TH |
359 | pmd = populate_extra_pmd(vaddr); |
360 | return fill_pte(pmd, vaddr); | |
11124411 TH |
361 | } |
362 | ||
3a9e189d JS |
363 | /* |
364 | * Create large page table mappings for a range of physical addresses. | |
365 | */ | |
366 | static void __init __init_extra_mapping(unsigned long phys, unsigned long size, | |
2df58b6d | 367 | enum page_cache_mode cache) |
3a9e189d JS |
368 | { |
369 | pgd_t *pgd; | |
f2a6a705 | 370 | p4d_t *p4d; |
3a9e189d JS |
371 | pud_t *pud; |
372 | pmd_t *pmd; | |
2df58b6d | 373 | pgprot_t prot; |
3a9e189d | 374 | |
2df58b6d | 375 | pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) | |
d0735693 | 376 | protval_4k_2_large(cachemode2protval(cache)); |
3a9e189d JS |
377 | BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); |
378 | for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { | |
379 | pgd = pgd_offset_k((unsigned long)__va(phys)); | |
380 | if (pgd_none(*pgd)) { | |
f2a6a705 KS |
381 | p4d = (p4d_t *) spp_getpage(); |
382 | set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE | | |
383 | _PAGE_USER)); | |
384 | } | |
385 | p4d = p4d_offset(pgd, (unsigned long)__va(phys)); | |
386 | if (p4d_none(*p4d)) { | |
3a9e189d | 387 | pud = (pud_t *) spp_getpage(); |
f2a6a705 | 388 | set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE | |
3a9e189d JS |
389 | _PAGE_USER)); |
390 | } | |
f2a6a705 | 391 | pud = pud_offset(p4d, (unsigned long)__va(phys)); |
3a9e189d JS |
392 | if (pud_none(*pud)) { |
393 | pmd = (pmd_t *) spp_getpage(); | |
394 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | | |
395 | _PAGE_USER)); | |
396 | } | |
397 | pmd = pmd_offset(pud, phys); | |
398 | BUG_ON(!pmd_none(*pmd)); | |
399 | set_pmd(pmd, __pmd(phys | pgprot_val(prot))); | |
400 | } | |
401 | } | |
402 | ||
403 | void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) | |
404 | { | |
2df58b6d | 405 | __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB); |
3a9e189d JS |
406 | } |
407 | ||
408 | void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) | |
409 | { | |
2df58b6d | 410 | __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC); |
3a9e189d JS |
411 | } |
412 | ||
31eedd82 | 413 | /* |
88f3aec7 IM |
414 | * The head.S code sets up the kernel high mapping: |
415 | * | |
416 | * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) | |
31eedd82 | 417 | * |
1e3b3081 | 418 | * phys_base holds the negative offset to the kernel, which is added |
31eedd82 TG |
419 | * to the compile time generated pmds. This results in invalid pmds up |
420 | * to the point where we hit the physaddr 0 mapping. | |
421 | * | |
e5f15b45 YL |
422 | * We limit the mappings to the region from _text to _brk_end. _brk_end |
423 | * is rounded up to the 2MB boundary. This catches the invalid pmds as | |
31eedd82 TG |
424 | * well, as they are located before _text: |
425 | */ | |
426 | void __init cleanup_highmap(void) | |
427 | { | |
428 | unsigned long vaddr = __START_KERNEL_map; | |
10054230 | 429 | unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE; |
e5f15b45 | 430 | unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; |
31eedd82 | 431 | pmd_t *pmd = level2_kernel_pgt; |
31eedd82 | 432 | |
10054230 YL |
433 | /* |
434 | * Native path, max_pfn_mapped is not set yet. | |
435 | * Xen has valid max_pfn_mapped set in | |
436 | * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable(). | |
437 | */ | |
438 | if (max_pfn_mapped) | |
439 | vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); | |
440 | ||
e5f15b45 | 441 | for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { |
2884f110 | 442 | if (pmd_none(*pmd)) |
31eedd82 TG |
443 | continue; |
444 | if (vaddr < (unsigned long) _text || vaddr > end) | |
445 | set_pmd(pmd, __pmd(0)); | |
446 | } | |
447 | } | |
448 | ||
59b3d020 TG |
449 | /* |
450 | * Create PTE level page table mapping for physical addresses. | |
451 | * It returns the last physical address mapped. | |
452 | */ | |
7b16eb89 | 453 | static unsigned long __meminit |
59b3d020 | 454 | phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end, |
eccd9064 | 455 | pgprot_t prot, bool init) |
4f9c11dd | 456 | { |
59b3d020 TG |
457 | unsigned long pages = 0, paddr_next; |
458 | unsigned long paddr_last = paddr_end; | |
459 | pte_t *pte; | |
4f9c11dd | 460 | int i; |
7b16eb89 | 461 | |
59b3d020 TG |
462 | pte = pte_page + pte_index(paddr); |
463 | i = pte_index(paddr); | |
4f9c11dd | 464 | |
59b3d020 TG |
465 | for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) { |
466 | paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE; | |
467 | if (paddr >= paddr_end) { | |
eceb3632 | 468 | if (!after_bootmem && |
3bce64f0 | 469 | !e820__mapped_any(paddr & PAGE_MASK, paddr_next, |
09821ff1 | 470 | E820_TYPE_RAM) && |
3bce64f0 | 471 | !e820__mapped_any(paddr & PAGE_MASK, paddr_next, |
09821ff1 | 472 | E820_TYPE_RESERVED_KERN)) |
eccd9064 | 473 | set_pte_init(pte, __pte(0), init); |
eceb3632 | 474 | continue; |
4f9c11dd JF |
475 | } |
476 | ||
b27a43c1 SS |
477 | /* |
478 | * We will re-use the existing mapping. | |
479 | * Xen for example has some special requirements, like mapping | |
480 | * pagetable pages as RO. So assume someone who pre-setup | |
481 | * these mappings are more intelligent. | |
482 | */ | |
dcb32d99 | 483 | if (!pte_none(*pte)) { |
876ee61a JB |
484 | if (!after_bootmem) |
485 | pages++; | |
4f9c11dd | 486 | continue; |
3afa3949 | 487 | } |
4f9c11dd JF |
488 | |
489 | if (0) | |
59b3d020 TG |
490 | pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr, |
491 | pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte); | |
4f9c11dd | 492 | pages++; |
eccd9064 | 493 | set_pte_init(pte, pfn_pte(paddr >> PAGE_SHIFT, prot), init); |
59b3d020 | 494 | paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE; |
4f9c11dd | 495 | } |
a2699e47 | 496 | |
4f9c11dd | 497 | update_page_count(PG_LEVEL_4K, pages); |
7b16eb89 | 498 | |
59b3d020 | 499 | return paddr_last; |
4f9c11dd JF |
500 | } |
501 | ||
59b3d020 TG |
502 | /* |
503 | * Create PMD level page table mapping for physical addresses. The virtual | |
504 | * and physical address have to be aligned at this level. | |
505 | * It returns the last physical address mapped. | |
506 | */ | |
cc615032 | 507 | static unsigned long __meminit |
59b3d020 | 508 | phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, |
eccd9064 | 509 | unsigned long page_size_mask, pgprot_t prot, bool init) |
44df75e6 | 510 | { |
59b3d020 TG |
511 | unsigned long pages = 0, paddr_next; |
512 | unsigned long paddr_last = paddr_end; | |
ce0c0e50 | 513 | |
59b3d020 | 514 | int i = pmd_index(paddr); |
44df75e6 | 515 | |
59b3d020 TG |
516 | for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) { |
517 | pmd_t *pmd = pmd_page + pmd_index(paddr); | |
4f9c11dd | 518 | pte_t *pte; |
b27a43c1 | 519 | pgprot_t new_prot = prot; |
44df75e6 | 520 | |
59b3d020 TG |
521 | paddr_next = (paddr & PMD_MASK) + PMD_SIZE; |
522 | if (paddr >= paddr_end) { | |
eceb3632 | 523 | if (!after_bootmem && |
3bce64f0 | 524 | !e820__mapped_any(paddr & PMD_MASK, paddr_next, |
09821ff1 | 525 | E820_TYPE_RAM) && |
3bce64f0 | 526 | !e820__mapped_any(paddr & PMD_MASK, paddr_next, |
09821ff1 | 527 | E820_TYPE_RESERVED_KERN)) |
eccd9064 | 528 | set_pmd_init(pmd, __pmd(0), init); |
eceb3632 | 529 | continue; |
44df75e6 | 530 | } |
6ad91658 | 531 | |
dcb32d99 | 532 | if (!pmd_none(*pmd)) { |
8ae3a5a8 JB |
533 | if (!pmd_large(*pmd)) { |
534 | spin_lock(&init_mm.page_table_lock); | |
973dc4f3 | 535 | pte = (pte_t *)pmd_page_vaddr(*pmd); |
59b3d020 | 536 | paddr_last = phys_pte_init(pte, paddr, |
eccd9064 BS |
537 | paddr_end, prot, |
538 | init); | |
8ae3a5a8 | 539 | spin_unlock(&init_mm.page_table_lock); |
a2699e47 | 540 | continue; |
8ae3a5a8 | 541 | } |
b27a43c1 SS |
542 | /* |
543 | * If we are ok with PG_LEVEL_2M mapping, then we will | |
544 | * use the existing mapping, | |
545 | * | |
546 | * Otherwise, we will split the large page mapping but | |
547 | * use the same existing protection bits except for | |
548 | * large page, so that we don't violate Intel's TLB | |
549 | * Application note (317080) which says, while changing | |
550 | * the page sizes, new and old translations should | |
551 | * not differ with respect to page frame and | |
552 | * attributes. | |
553 | */ | |
3afa3949 | 554 | if (page_size_mask & (1 << PG_LEVEL_2M)) { |
876ee61a JB |
555 | if (!after_bootmem) |
556 | pages++; | |
59b3d020 | 557 | paddr_last = paddr_next; |
b27a43c1 | 558 | continue; |
3afa3949 | 559 | } |
b27a43c1 | 560 | new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); |
4f9c11dd JF |
561 | } |
562 | ||
b50efd2a | 563 | if (page_size_mask & (1<<PG_LEVEL_2M)) { |
4f9c11dd | 564 | pages++; |
8ae3a5a8 | 565 | spin_lock(&init_mm.page_table_lock); |
334b2cea LT |
566 | set_pmd_init(pmd, |
567 | pfn_pmd(paddr >> PAGE_SHIFT, prot_sethuge(prot)), | |
eccd9064 | 568 | init); |
8ae3a5a8 | 569 | spin_unlock(&init_mm.page_table_lock); |
59b3d020 | 570 | paddr_last = paddr_next; |
6ad91658 | 571 | continue; |
4f9c11dd | 572 | } |
6ad91658 | 573 | |
868bf4d6 | 574 | pte = alloc_low_page(); |
eccd9064 | 575 | paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot, init); |
4f9c11dd | 576 | |
8ae3a5a8 | 577 | spin_lock(&init_mm.page_table_lock); |
eccd9064 | 578 | pmd_populate_kernel_init(&init_mm, pmd, pte, init); |
8ae3a5a8 | 579 | spin_unlock(&init_mm.page_table_lock); |
44df75e6 | 580 | } |
ce0c0e50 | 581 | update_page_count(PG_LEVEL_2M, pages); |
59b3d020 | 582 | return paddr_last; |
44df75e6 MT |
583 | } |
584 | ||
59b3d020 TG |
585 | /* |
586 | * Create PUD level page table mapping for physical addresses. The virtual | |
faa37933 TG |
587 | * and physical address do not have to be aligned at this level. KASLR can |
588 | * randomize virtual addresses up to this level. | |
59b3d020 TG |
589 | * It returns the last physical address mapped. |
590 | */ | |
cc615032 | 591 | static unsigned long __meminit |
59b3d020 | 592 | phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, |
c164fbb4 | 593 | unsigned long page_size_mask, pgprot_t _prot, bool init) |
14a62c34 | 594 | { |
59b3d020 TG |
595 | unsigned long pages = 0, paddr_next; |
596 | unsigned long paddr_last = paddr_end; | |
faa37933 TG |
597 | unsigned long vaddr = (unsigned long)__va(paddr); |
598 | int i = pud_index(vaddr); | |
44df75e6 | 599 | |
59b3d020 | 600 | for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) { |
faa37933 | 601 | pud_t *pud; |
1da177e4 | 602 | pmd_t *pmd; |
c164fbb4 | 603 | pgprot_t prot = _prot; |
1da177e4 | 604 | |
faa37933 TG |
605 | vaddr = (unsigned long)__va(paddr); |
606 | pud = pud_page + pud_index(vaddr); | |
59b3d020 | 607 | paddr_next = (paddr & PUD_MASK) + PUD_SIZE; |
faa37933 | 608 | |
59b3d020 | 609 | if (paddr >= paddr_end) { |
eceb3632 | 610 | if (!after_bootmem && |
3bce64f0 | 611 | !e820__mapped_any(paddr & PUD_MASK, paddr_next, |
09821ff1 | 612 | E820_TYPE_RAM) && |
3bce64f0 | 613 | !e820__mapped_any(paddr & PUD_MASK, paddr_next, |
09821ff1 | 614 | E820_TYPE_RESERVED_KERN)) |
eccd9064 | 615 | set_pud_init(pud, __pud(0), init); |
1da177e4 | 616 | continue; |
14a62c34 | 617 | } |
1da177e4 | 618 | |
dcb32d99 | 619 | if (!pud_none(*pud)) { |
a2699e47 | 620 | if (!pud_large(*pud)) { |
973dc4f3 | 621 | pmd = pmd_offset(pud, 0); |
59b3d020 TG |
622 | paddr_last = phys_pmd_init(pmd, paddr, |
623 | paddr_end, | |
624 | page_size_mask, | |
eccd9064 | 625 | prot, init); |
a2699e47 SS |
626 | continue; |
627 | } | |
b27a43c1 SS |
628 | /* |
629 | * If we are ok with PG_LEVEL_1G mapping, then we will | |
630 | * use the existing mapping. | |
631 | * | |
632 | * Otherwise, we will split the gbpage mapping but use | |
633 | * the same existing protection bits except for large | |
634 | * page, so that we don't violate Intel's TLB | |
635 | * Application note (317080) which says, while changing | |
636 | * the page sizes, new and old translations should | |
637 | * not differ with respect to page frame and | |
638 | * attributes. | |
639 | */ | |
3afa3949 | 640 | if (page_size_mask & (1 << PG_LEVEL_1G)) { |
876ee61a JB |
641 | if (!after_bootmem) |
642 | pages++; | |
59b3d020 | 643 | paddr_last = paddr_next; |
b27a43c1 | 644 | continue; |
3afa3949 | 645 | } |
b27a43c1 | 646 | prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); |
ef925766 AK |
647 | } |
648 | ||
b50efd2a | 649 | if (page_size_mask & (1<<PG_LEVEL_1G)) { |
ce0c0e50 | 650 | pages++; |
8ae3a5a8 | 651 | spin_lock(&init_mm.page_table_lock); |
334b2cea LT |
652 | set_pud_init(pud, |
653 | pfn_pud(paddr >> PAGE_SHIFT, prot_sethuge(prot)), | |
eccd9064 | 654 | init); |
8ae3a5a8 | 655 | spin_unlock(&init_mm.page_table_lock); |
59b3d020 | 656 | paddr_last = paddr_next; |
6ad91658 KM |
657 | continue; |
658 | } | |
659 | ||
868bf4d6 | 660 | pmd = alloc_low_page(); |
59b3d020 | 661 | paddr_last = phys_pmd_init(pmd, paddr, paddr_end, |
eccd9064 | 662 | page_size_mask, prot, init); |
8ae3a5a8 JB |
663 | |
664 | spin_lock(&init_mm.page_table_lock); | |
eccd9064 | 665 | pud_populate_init(&init_mm, pud, pmd, init); |
44df75e6 | 666 | spin_unlock(&init_mm.page_table_lock); |
1da177e4 | 667 | } |
a2699e47 | 668 | |
ce0c0e50 | 669 | update_page_count(PG_LEVEL_1G, pages); |
cc615032 | 670 | |
59b3d020 | 671 | return paddr_last; |
14a62c34 | 672 | } |
1da177e4 | 673 | |
7e82ea94 KS |
674 | static unsigned long __meminit |
675 | phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, | |
c164fbb4 | 676 | unsigned long page_size_mask, pgprot_t prot, bool init) |
7e82ea94 | 677 | { |
432c8332 KS |
678 | unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last; |
679 | ||
680 | paddr_last = paddr_end; | |
681 | vaddr = (unsigned long)__va(paddr); | |
682 | vaddr_end = (unsigned long)__va(paddr_end); | |
7e82ea94 | 683 | |
ed7588d5 | 684 | if (!pgtable_l5_enabled()) |
eccd9064 | 685 | return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, |
c164fbb4 | 686 | page_size_mask, prot, init); |
7e82ea94 | 687 | |
432c8332 KS |
688 | for (; vaddr < vaddr_end; vaddr = vaddr_next) { |
689 | p4d_t *p4d = p4d_page + p4d_index(vaddr); | |
7e82ea94 KS |
690 | pud_t *pud; |
691 | ||
432c8332 KS |
692 | vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE; |
693 | paddr = __pa(vaddr); | |
7e82ea94 KS |
694 | |
695 | if (paddr >= paddr_end) { | |
432c8332 | 696 | paddr_next = __pa(vaddr_next); |
7e82ea94 KS |
697 | if (!after_bootmem && |
698 | !e820__mapped_any(paddr & P4D_MASK, paddr_next, | |
699 | E820_TYPE_RAM) && | |
700 | !e820__mapped_any(paddr & P4D_MASK, paddr_next, | |
701 | E820_TYPE_RESERVED_KERN)) | |
eccd9064 | 702 | set_p4d_init(p4d, __p4d(0), init); |
7e82ea94 KS |
703 | continue; |
704 | } | |
705 | ||
706 | if (!p4d_none(*p4d)) { | |
707 | pud = pud_offset(p4d, 0); | |
432c8332 | 708 | paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end), |
c164fbb4 | 709 | page_size_mask, prot, init); |
7e82ea94 KS |
710 | continue; |
711 | } | |
712 | ||
713 | pud = alloc_low_page(); | |
432c8332 | 714 | paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end), |
c164fbb4 | 715 | page_size_mask, prot, init); |
7e82ea94 KS |
716 | |
717 | spin_lock(&init_mm.page_table_lock); | |
eccd9064 | 718 | p4d_populate_init(&init_mm, p4d, pud, init); |
7e82ea94 KS |
719 | spin_unlock(&init_mm.page_table_lock); |
720 | } | |
7e82ea94 KS |
721 | |
722 | return paddr_last; | |
723 | } | |
724 | ||
eccd9064 BS |
725 | static unsigned long __meminit |
726 | __kernel_physical_mapping_init(unsigned long paddr_start, | |
727 | unsigned long paddr_end, | |
728 | unsigned long page_size_mask, | |
c164fbb4 | 729 | pgprot_t prot, bool init) |
14a62c34 | 730 | { |
9b861528 | 731 | bool pgd_changed = false; |
59b3d020 | 732 | unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last; |
1da177e4 | 733 | |
59b3d020 TG |
734 | paddr_last = paddr_end; |
735 | vaddr = (unsigned long)__va(paddr_start); | |
736 | vaddr_end = (unsigned long)__va(paddr_end); | |
737 | vaddr_start = vaddr; | |
1da177e4 | 738 | |
59b3d020 TG |
739 | for (; vaddr < vaddr_end; vaddr = vaddr_next) { |
740 | pgd_t *pgd = pgd_offset_k(vaddr); | |
f2a6a705 | 741 | p4d_t *p4d; |
44df75e6 | 742 | |
59b3d020 | 743 | vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE; |
4f9c11dd | 744 | |
7e82ea94 KS |
745 | if (pgd_val(*pgd)) { |
746 | p4d = (p4d_t *)pgd_page_vaddr(*pgd); | |
747 | paddr_last = phys_p4d_init(p4d, __pa(vaddr), | |
59b3d020 | 748 | __pa(vaddr_end), |
eccd9064 | 749 | page_size_mask, |
c164fbb4 | 750 | prot, init); |
4f9c11dd JF |
751 | continue; |
752 | } | |
753 | ||
7e82ea94 KS |
754 | p4d = alloc_low_page(); |
755 | paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end), | |
c164fbb4 | 756 | page_size_mask, prot, init); |
8ae3a5a8 JB |
757 | |
758 | spin_lock(&init_mm.page_table_lock); | |
ed7588d5 | 759 | if (pgtable_l5_enabled()) |
eccd9064 | 760 | pgd_populate_init(&init_mm, pgd, p4d, init); |
7e82ea94 | 761 | else |
eccd9064 BS |
762 | p4d_populate_init(&init_mm, p4d_offset(pgd, vaddr), |
763 | (pud_t *) p4d, init); | |
764 | ||
8ae3a5a8 | 765 | spin_unlock(&init_mm.page_table_lock); |
9b861528 | 766 | pgd_changed = true; |
14a62c34 | 767 | } |
9b861528 HL |
768 | |
769 | if (pgd_changed) | |
5372e155 | 770 | sync_global_pgds(vaddr_start, vaddr_end - 1); |
9b861528 | 771 | |
59b3d020 | 772 | return paddr_last; |
b50efd2a | 773 | } |
7b16eb89 | 774 | |
eccd9064 BS |
775 | |
776 | /* | |
777 | * Create page table mapping for the physical memory for specific physical | |
778 | * addresses. Note that it can only be used to populate non-present entries. | |
779 | * The virtual and physical addresses have to be aligned on PMD level | |
780 | * down. It returns the last physical address mapped. | |
781 | */ | |
782 | unsigned long __meminit | |
783 | kernel_physical_mapping_init(unsigned long paddr_start, | |
784 | unsigned long paddr_end, | |
c164fbb4 | 785 | unsigned long page_size_mask, pgprot_t prot) |
eccd9064 BS |
786 | { |
787 | return __kernel_physical_mapping_init(paddr_start, paddr_end, | |
c164fbb4 | 788 | page_size_mask, prot, true); |
eccd9064 BS |
789 | } |
790 | ||
791 | /* | |
792 | * This function is similar to kernel_physical_mapping_init() above with the | |
793 | * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe() | |
794 | * when updating the mapping. The caller is responsible to flush the TLBs after | |
795 | * the function returns. | |
796 | */ | |
797 | unsigned long __meminit | |
798 | kernel_physical_mapping_change(unsigned long paddr_start, | |
799 | unsigned long paddr_end, | |
800 | unsigned long page_size_mask) | |
801 | { | |
802 | return __kernel_physical_mapping_init(paddr_start, paddr_end, | |
c164fbb4 LG |
803 | page_size_mask, PAGE_KERNEL, |
804 | false); | |
eccd9064 BS |
805 | } |
806 | ||
2b97690f | 807 | #ifndef CONFIG_NUMA |
d8fc3afc | 808 | void __init initmem_init(void) |
1f75d7e3 | 809 | { |
d7dc899a | 810 | memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); |
1f75d7e3 | 811 | } |
3551f88f | 812 | #endif |
1f75d7e3 | 813 | |
1da177e4 LT |
814 | void __init paging_init(void) |
815 | { | |
44df75e6 | 816 | sparse_init(); |
44b57280 YL |
817 | |
818 | /* | |
819 | * clear the default setting with node 0 | |
820 | * note: don't use nodes_clear here, that is really clearing when | |
821 | * numa support is not compiled in, and later node_set_state | |
822 | * will not set it back. | |
823 | */ | |
4b0ef1fe | 824 | node_clear_state(0, N_MEMORY); |
aa61ee7b | 825 | node_clear_state(0, N_NORMAL_MEMORY); |
44b57280 | 826 | |
4c0b2e5f | 827 | zone_sizes_init(); |
1da177e4 | 828 | } |
1da177e4 | 829 | |
8d400913 OS |
830 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
831 | #define PAGE_UNUSED 0xFD | |
832 | ||
faf1c000 OS |
833 | /* |
834 | * The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges | |
835 | * from unused_pmd_start to next PMD_SIZE boundary. | |
836 | */ | |
837 | static unsigned long unused_pmd_start __meminitdata; | |
838 | ||
839 | static void __meminit vmemmap_flush_unused_pmd(void) | |
840 | { | |
841 | if (!unused_pmd_start) | |
842 | return; | |
843 | /* | |
844 | * Clears (unused_pmd_start, PMD_END] | |
845 | */ | |
846 | memset((void *)unused_pmd_start, PAGE_UNUSED, | |
847 | ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start); | |
848 | unused_pmd_start = 0; | |
849 | } | |
850 | ||
851 | #ifdef CONFIG_MEMORY_HOTPLUG | |
8d400913 OS |
852 | /* Returns true if the PMD is completely unused and thus it can be freed */ |
853 | static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end) | |
854 | { | |
855 | unsigned long start = ALIGN_DOWN(addr, PMD_SIZE); | |
856 | ||
faf1c000 OS |
857 | /* |
858 | * Flush the unused range cache to ensure that memchr_inv() will work | |
859 | * for the whole range. | |
860 | */ | |
861 | vmemmap_flush_unused_pmd(); | |
8d400913 OS |
862 | memset((void *)addr, PAGE_UNUSED, end - addr); |
863 | ||
864 | return !memchr_inv((void *)start, PAGE_UNUSED, PMD_SIZE); | |
865 | } | |
faf1c000 | 866 | #endif |
8d400913 | 867 | |
faf1c000 | 868 | static void __meminit __vmemmap_use_sub_pmd(unsigned long start) |
8d400913 OS |
869 | { |
870 | /* | |
871 | * As we expect to add in the same granularity as we remove, it's | |
872 | * sufficient to mark only some piece used to block the memmap page from | |
873 | * getting removed when removing some other adjacent memmap (just in | |
874 | * case the first memmap never gets initialized e.g., because the memory | |
875 | * block never gets onlined). | |
876 | */ | |
877 | memset((void *)start, 0, sizeof(struct page)); | |
878 | } | |
879 | ||
faf1c000 OS |
880 | static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end) |
881 | { | |
882 | /* | |
883 | * We only optimize if the new used range directly follows the | |
884 | * previously unused range (esp., when populating consecutive sections). | |
885 | */ | |
886 | if (unused_pmd_start == start) { | |
887 | if (likely(IS_ALIGNED(end, PMD_SIZE))) | |
888 | unused_pmd_start = 0; | |
889 | else | |
890 | unused_pmd_start = end; | |
891 | return; | |
892 | } | |
893 | ||
894 | /* | |
895 | * If the range does not contiguously follows previous one, make sure | |
896 | * to mark the unused range of the previous one so it can be removed. | |
897 | */ | |
898 | vmemmap_flush_unused_pmd(); | |
899 | __vmemmap_use_sub_pmd(start); | |
900 | } | |
901 | ||
902 | ||
8d400913 OS |
903 | static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) |
904 | { | |
280abe14 AKR |
905 | const unsigned long page = ALIGN_DOWN(start, PMD_SIZE); |
906 | ||
faf1c000 OS |
907 | vmemmap_flush_unused_pmd(); |
908 | ||
8d400913 OS |
909 | /* |
910 | * Could be our memmap page is filled with PAGE_UNUSED already from a | |
911 | * previous remove. Make sure to reset it. | |
912 | */ | |
faf1c000 | 913 | __vmemmap_use_sub_pmd(start); |
8d400913 OS |
914 | |
915 | /* | |
916 | * Mark with PAGE_UNUSED the unused parts of the new memmap range | |
917 | */ | |
918 | if (!IS_ALIGNED(start, PMD_SIZE)) | |
280abe14 | 919 | memset((void *)page, PAGE_UNUSED, start - page); |
faf1c000 OS |
920 | |
921 | /* | |
922 | * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of | |
923 | * consecutive sections. Remember for the last added PMD where the | |
924 | * unused range begins. | |
925 | */ | |
8d400913 | 926 | if (!IS_ALIGNED(end, PMD_SIZE)) |
faf1c000 | 927 | unused_pmd_start = end; |
8d400913 OS |
928 | } |
929 | #endif | |
930 | ||
44df75e6 MT |
931 | /* |
932 | * Memory hotplug specific functions | |
44df75e6 | 933 | */ |
bc02af93 | 934 | #ifdef CONFIG_MEMORY_HOTPLUG |
ea085417 SZ |
935 | /* |
936 | * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need | |
937 | * updating. | |
938 | */ | |
3072e413 | 939 | static void update_end_of_memory_vars(u64 start, u64 size) |
ea085417 SZ |
940 | { |
941 | unsigned long end_pfn = PFN_UP(start + size); | |
942 | ||
943 | if (end_pfn > max_pfn) { | |
944 | max_pfn = end_pfn; | |
945 | max_low_pfn = end_pfn; | |
946 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; | |
947 | } | |
948 | } | |
949 | ||
24e6d5a5 | 950 | int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, |
f5637d3b | 951 | struct mhp_params *params) |
44df75e6 | 952 | { |
44df75e6 MT |
953 | int ret; |
954 | ||
f5637d3b | 955 | ret = __add_pages(nid, start_pfn, nr_pages, params); |
fe8b868e | 956 | WARN_ON_ONCE(ret); |
44df75e6 | 957 | |
ea085417 | 958 | /* update max_pfn, max_low_pfn and high_memory */ |
3072e413 MH |
959 | update_end_of_memory_vars(start_pfn << PAGE_SHIFT, |
960 | nr_pages << PAGE_SHIFT); | |
ea085417 | 961 | |
44df75e6 | 962 | return ret; |
44df75e6 | 963 | } |
3072e413 | 964 | |
940519f0 | 965 | int arch_add_memory(int nid, u64 start, u64 size, |
f5637d3b | 966 | struct mhp_params *params) |
3072e413 MH |
967 | { |
968 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
969 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
970 | ||
bfeb022f | 971 | init_memory_mapping(start, start + size, params->pgprot); |
3072e413 | 972 | |
f5637d3b | 973 | return add_pages(nid, start_pfn, nr_pages, params); |
3072e413 | 974 | } |
44df75e6 | 975 | |
a7e6c701 | 976 | static void __meminit free_pagetable(struct page *page, int order) |
ae9aae9e | 977 | { |
ae9aae9e WC |
978 | unsigned long magic; |
979 | unsigned int nr_pages = 1 << order; | |
4b94ffdc | 980 | |
ae9aae9e WC |
981 | /* bootmem page has reserved flag */ |
982 | if (PageReserved(page)) { | |
983 | __ClearPageReserved(page); | |
ae9aae9e | 984 | |
c5e97ed1 | 985 | magic = page->index; |
ae9aae9e WC |
986 | if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { |
987 | while (nr_pages--) | |
988 | put_page_bootmem(page++); | |
989 | } else | |
170a5a7e JL |
990 | while (nr_pages--) |
991 | free_reserved_page(page++); | |
ae9aae9e WC |
992 | } else |
993 | free_pages((unsigned long)page_address(page), order); | |
ae9aae9e WC |
994 | } |
995 | ||
a7e6c701 | 996 | static void __meminit free_hugepage_table(struct page *page, |
24b6d416 | 997 | struct vmem_altmap *altmap) |
a7e6c701 DW |
998 | { |
999 | if (altmap) | |
1000 | vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE); | |
1001 | else | |
1002 | free_pagetable(page, get_order(PMD_SIZE)); | |
1003 | } | |
1004 | ||
1005 | static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) | |
ae9aae9e WC |
1006 | { |
1007 | pte_t *pte; | |
1008 | int i; | |
1009 | ||
1010 | for (i = 0; i < PTRS_PER_PTE; i++) { | |
1011 | pte = pte_start + i; | |
dcb32d99 | 1012 | if (!pte_none(*pte)) |
ae9aae9e WC |
1013 | return; |
1014 | } | |
1015 | ||
1016 | /* free a pte talbe */ | |
a7e6c701 | 1017 | free_pagetable(pmd_page(*pmd), 0); |
ae9aae9e WC |
1018 | spin_lock(&init_mm.page_table_lock); |
1019 | pmd_clear(pmd); | |
1020 | spin_unlock(&init_mm.page_table_lock); | |
1021 | } | |
1022 | ||
a7e6c701 | 1023 | static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) |
ae9aae9e WC |
1024 | { |
1025 | pmd_t *pmd; | |
1026 | int i; | |
1027 | ||
1028 | for (i = 0; i < PTRS_PER_PMD; i++) { | |
1029 | pmd = pmd_start + i; | |
dcb32d99 | 1030 | if (!pmd_none(*pmd)) |
ae9aae9e WC |
1031 | return; |
1032 | } | |
1033 | ||
1034 | /* free a pmd talbe */ | |
a7e6c701 | 1035 | free_pagetable(pud_page(*pud), 0); |
ae9aae9e WC |
1036 | spin_lock(&init_mm.page_table_lock); |
1037 | pud_clear(pud); | |
1038 | spin_unlock(&init_mm.page_table_lock); | |
1039 | } | |
1040 | ||
a7e6c701 | 1041 | static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d) |
f2a6a705 KS |
1042 | { |
1043 | pud_t *pud; | |
1044 | int i; | |
1045 | ||
1046 | for (i = 0; i < PTRS_PER_PUD; i++) { | |
1047 | pud = pud_start + i; | |
1048 | if (!pud_none(*pud)) | |
1049 | return; | |
1050 | } | |
1051 | ||
1052 | /* free a pud talbe */ | |
a7e6c701 | 1053 | free_pagetable(p4d_page(*p4d), 0); |
f2a6a705 KS |
1054 | spin_lock(&init_mm.page_table_lock); |
1055 | p4d_clear(p4d); | |
1056 | spin_unlock(&init_mm.page_table_lock); | |
1057 | } | |
1058 | ||
ae9aae9e WC |
1059 | static void __meminit |
1060 | remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, | |
a7e6c701 | 1061 | bool direct) |
ae9aae9e WC |
1062 | { |
1063 | unsigned long next, pages = 0; | |
1064 | pte_t *pte; | |
ae9aae9e WC |
1065 | phys_addr_t phys_addr; |
1066 | ||
1067 | pte = pte_start + pte_index(addr); | |
1068 | for (; addr < end; addr = next, pte++) { | |
1069 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1070 | if (next > end) | |
1071 | next = end; | |
1072 | ||
1073 | if (!pte_present(*pte)) | |
1074 | continue; | |
1075 | ||
1076 | /* | |
1077 | * We mapped [0,1G) memory as identity mapping when | |
1078 | * initializing, in arch/x86/kernel/head_64.S. These | |
1079 | * pagetables cannot be removed. | |
1080 | */ | |
1081 | phys_addr = pte_val(*pte) + (addr & PAGE_MASK); | |
1082 | if (phys_addr < (phys_addr_t)0x40000000) | |
1083 | return; | |
1084 | ||
8e2df191 OS |
1085 | if (!direct) |
1086 | free_pagetable(pte_page(*pte), 0); | |
ae9aae9e | 1087 | |
8e2df191 OS |
1088 | spin_lock(&init_mm.page_table_lock); |
1089 | pte_clear(&init_mm, addr, pte); | |
1090 | spin_unlock(&init_mm.page_table_lock); | |
ae9aae9e | 1091 | |
8e2df191 OS |
1092 | /* For non-direct mapping, pages means nothing. */ |
1093 | pages++; | |
ae9aae9e WC |
1094 | } |
1095 | ||
1096 | /* Call free_pte_table() in remove_pmd_table(). */ | |
1097 | flush_tlb_all(); | |
1098 | if (direct) | |
1099 | update_page_count(PG_LEVEL_4K, -pages); | |
1100 | } | |
1101 | ||
1102 | static void __meminit | |
1103 | remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, | |
24b6d416 | 1104 | bool direct, struct vmem_altmap *altmap) |
ae9aae9e WC |
1105 | { |
1106 | unsigned long next, pages = 0; | |
1107 | pte_t *pte_base; | |
1108 | pmd_t *pmd; | |
ae9aae9e WC |
1109 | |
1110 | pmd = pmd_start + pmd_index(addr); | |
1111 | for (; addr < end; addr = next, pmd++) { | |
1112 | next = pmd_addr_end(addr, end); | |
1113 | ||
1114 | if (!pmd_present(*pmd)) | |
1115 | continue; | |
1116 | ||
1117 | if (pmd_large(*pmd)) { | |
1118 | if (IS_ALIGNED(addr, PMD_SIZE) && | |
1119 | IS_ALIGNED(next, PMD_SIZE)) { | |
1120 | if (!direct) | |
a7e6c701 DW |
1121 | free_hugepage_table(pmd_page(*pmd), |
1122 | altmap); | |
ae9aae9e WC |
1123 | |
1124 | spin_lock(&init_mm.page_table_lock); | |
1125 | pmd_clear(pmd); | |
1126 | spin_unlock(&init_mm.page_table_lock); | |
1127 | pages++; | |
8d400913 OS |
1128 | } |
1129 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
1130 | else if (vmemmap_pmd_is_unused(addr, next)) { | |
a7e6c701 DW |
1131 | free_hugepage_table(pmd_page(*pmd), |
1132 | altmap); | |
ae9aae9e WC |
1133 | spin_lock(&init_mm.page_table_lock); |
1134 | pmd_clear(pmd); | |
1135 | spin_unlock(&init_mm.page_table_lock); | |
ae9aae9e | 1136 | } |
8d400913 | 1137 | #endif |
ae9aae9e WC |
1138 | continue; |
1139 | } | |
1140 | ||
1141 | pte_base = (pte_t *)pmd_page_vaddr(*pmd); | |
a7e6c701 DW |
1142 | remove_pte_table(pte_base, addr, next, direct); |
1143 | free_pte_table(pte_base, pmd); | |
ae9aae9e WC |
1144 | } |
1145 | ||
1146 | /* Call free_pmd_table() in remove_pud_table(). */ | |
1147 | if (direct) | |
1148 | update_page_count(PG_LEVEL_2M, -pages); | |
1149 | } | |
1150 | ||
1151 | static void __meminit | |
1152 | remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, | |
24b6d416 | 1153 | struct vmem_altmap *altmap, bool direct) |
ae9aae9e WC |
1154 | { |
1155 | unsigned long next, pages = 0; | |
1156 | pmd_t *pmd_base; | |
1157 | pud_t *pud; | |
ae9aae9e WC |
1158 | |
1159 | pud = pud_start + pud_index(addr); | |
1160 | for (; addr < end; addr = next, pud++) { | |
1161 | next = pud_addr_end(addr, end); | |
1162 | ||
1163 | if (!pud_present(*pud)) | |
1164 | continue; | |
1165 | ||
69ccfe74 OS |
1166 | if (pud_large(*pud) && |
1167 | IS_ALIGNED(addr, PUD_SIZE) && | |
1168 | IS_ALIGNED(next, PUD_SIZE)) { | |
1169 | spin_lock(&init_mm.page_table_lock); | |
1170 | pud_clear(pud); | |
1171 | spin_unlock(&init_mm.page_table_lock); | |
1172 | pages++; | |
ae9aae9e WC |
1173 | continue; |
1174 | } | |
1175 | ||
e6ab9c4d | 1176 | pmd_base = pmd_offset(pud, 0); |
24b6d416 | 1177 | remove_pmd_table(pmd_base, addr, next, direct, altmap); |
a7e6c701 | 1178 | free_pmd_table(pmd_base, pud); |
ae9aae9e WC |
1179 | } |
1180 | ||
1181 | if (direct) | |
1182 | update_page_count(PG_LEVEL_1G, -pages); | |
1183 | } | |
1184 | ||
f2a6a705 KS |
1185 | static void __meminit |
1186 | remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, | |
24b6d416 | 1187 | struct vmem_altmap *altmap, bool direct) |
f2a6a705 KS |
1188 | { |
1189 | unsigned long next, pages = 0; | |
1190 | pud_t *pud_base; | |
1191 | p4d_t *p4d; | |
1192 | ||
1193 | p4d = p4d_start + p4d_index(addr); | |
1194 | for (; addr < end; addr = next, p4d++) { | |
1195 | next = p4d_addr_end(addr, end); | |
1196 | ||
1197 | if (!p4d_present(*p4d)) | |
1198 | continue; | |
1199 | ||
1200 | BUILD_BUG_ON(p4d_large(*p4d)); | |
1201 | ||
e6ab9c4d | 1202 | pud_base = pud_offset(p4d, 0); |
24b6d416 | 1203 | remove_pud_table(pud_base, addr, next, altmap, direct); |
98fe3633 JG |
1204 | /* |
1205 | * For 4-level page tables we do not want to free PUDs, but in the | |
1206 | * 5-level case we should free them. This code will have to change | |
1207 | * to adapt for boot-time switching between 4 and 5 level page tables. | |
1208 | */ | |
ed7588d5 | 1209 | if (pgtable_l5_enabled()) |
a7e6c701 | 1210 | free_pud_table(pud_base, p4d); |
f2a6a705 KS |
1211 | } |
1212 | ||
1213 | if (direct) | |
1214 | update_page_count(PG_LEVEL_512G, -pages); | |
1215 | } | |
1216 | ||
ae9aae9e WC |
1217 | /* start and end are both virtual address. */ |
1218 | static void __meminit | |
24b6d416 CH |
1219 | remove_pagetable(unsigned long start, unsigned long end, bool direct, |
1220 | struct vmem_altmap *altmap) | |
ae9aae9e WC |
1221 | { |
1222 | unsigned long next; | |
5255e0a7 | 1223 | unsigned long addr; |
ae9aae9e | 1224 | pgd_t *pgd; |
f2a6a705 | 1225 | p4d_t *p4d; |
ae9aae9e | 1226 | |
5255e0a7 YI |
1227 | for (addr = start; addr < end; addr = next) { |
1228 | next = pgd_addr_end(addr, end); | |
ae9aae9e | 1229 | |
5255e0a7 | 1230 | pgd = pgd_offset_k(addr); |
ae9aae9e WC |
1231 | if (!pgd_present(*pgd)) |
1232 | continue; | |
1233 | ||
e6ab9c4d | 1234 | p4d = p4d_offset(pgd, 0); |
24b6d416 | 1235 | remove_p4d_table(p4d, addr, next, altmap, direct); |
ae9aae9e WC |
1236 | } |
1237 | ||
ae9aae9e WC |
1238 | flush_tlb_all(); |
1239 | } | |
1240 | ||
24b6d416 CH |
1241 | void __ref vmemmap_free(unsigned long start, unsigned long end, |
1242 | struct vmem_altmap *altmap) | |
0197518c | 1243 | { |
e19d1126 FK |
1244 | VM_BUG_ON(!PAGE_ALIGNED(start)); |
1245 | VM_BUG_ON(!PAGE_ALIGNED(end)); | |
8e2df191 | 1246 | |
24b6d416 | 1247 | remove_pagetable(start, end, false, altmap); |
0197518c TC |
1248 | } |
1249 | ||
bbcab878 TC |
1250 | static void __meminit |
1251 | kernel_physical_mapping_remove(unsigned long start, unsigned long end) | |
1252 | { | |
1253 | start = (unsigned long)__va(start); | |
1254 | end = (unsigned long)__va(end); | |
1255 | ||
24b6d416 | 1256 | remove_pagetable(start, end, true, NULL); |
bbcab878 TC |
1257 | } |
1258 | ||
65a2aa5f | 1259 | void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) |
24d335ca WC |
1260 | { |
1261 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
1262 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
24d335ca | 1263 | |
feee6b29 | 1264 | __remove_pages(start_pfn, nr_pages, altmap); |
4b94ffdc | 1265 | kernel_physical_mapping_remove(start, start + size); |
24d335ca | 1266 | } |
45e0b78b KM |
1267 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
1268 | ||
81ac3ad9 | 1269 | static struct kcore_list kcore_vsyscall; |
1da177e4 | 1270 | |
94b43c3d YL |
1271 | static void __init register_page_bootmem_info(void) |
1272 | { | |
47010c04 | 1273 | #if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) |
94b43c3d YL |
1274 | int i; |
1275 | ||
1276 | for_each_online_node(i) | |
1277 | register_page_bootmem_info_node(NODE_DATA(i)); | |
1278 | #endif | |
1279 | } | |
1280 | ||
6eb82f99 JR |
1281 | /* |
1282 | * Pre-allocates page-table pages for the vmalloc area in the kernel page-table. | |
1283 | * Only the level which needs to be synchronized between all page-tables is | |
1284 | * allocated because the synchronization can be expensive. | |
1285 | */ | |
1286 | static void __init preallocate_vmalloc_pages(void) | |
1287 | { | |
1288 | unsigned long addr; | |
1289 | const char *lvl; | |
1290 | ||
1a167ddd | 1291 | for (addr = VMALLOC_START; addr <= VMEMORY_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) { |
6eb82f99 JR |
1292 | pgd_t *pgd = pgd_offset_k(addr); |
1293 | p4d_t *p4d; | |
1294 | pud_t *pud; | |
1295 | ||
995909a4 JR |
1296 | lvl = "p4d"; |
1297 | p4d = p4d_alloc(&init_mm, pgd, addr); | |
1298 | if (!p4d) | |
1299 | goto failed; | |
6eb82f99 JR |
1300 | |
1301 | if (pgtable_l5_enabled()) | |
1302 | continue; | |
1303 | ||
7a27ef5e JR |
1304 | /* |
1305 | * The goal here is to allocate all possibly required | |
1306 | * hardware page tables pointed to by the top hardware | |
1307 | * level. | |
1308 | * | |
1309 | * On 4-level systems, the P4D layer is folded away and | |
1310 | * the above code does no preallocation. Below, go down | |
1311 | * to the pud _software_ level to ensure the second | |
1312 | * hardware level is allocated on 4-level systems too. | |
1313 | */ | |
995909a4 JR |
1314 | lvl = "pud"; |
1315 | pud = pud_alloc(&init_mm, p4d, addr); | |
1316 | if (!pud) | |
1317 | goto failed; | |
6eb82f99 JR |
1318 | } |
1319 | ||
1320 | return; | |
1321 | ||
1322 | failed: | |
1323 | ||
1324 | /* | |
1325 | * The pages have to be there now or they will be missing in | |
1326 | * process page-tables later. | |
1327 | */ | |
1328 | panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl); | |
1329 | } | |
1330 | ||
1da177e4 LT |
1331 | void __init mem_init(void) |
1332 | { | |
0dc243ae | 1333 | pci_iommu_alloc(); |
1da177e4 | 1334 | |
48ddb154 | 1335 | /* clear_bss() already clear the empty_zero_page */ |
1da177e4 | 1336 | |
bced0e32 | 1337 | /* this will put all memory onto the freelists */ |
c6ffc5ca | 1338 | memblock_free_all(); |
1da177e4 | 1339 | after_bootmem = 1; |
6f84f8d1 | 1340 | x86_init.hyper.init_after_bootmem(); |
1da177e4 | 1341 | |
353b1e7b PT |
1342 | /* |
1343 | * Must be done after boot memory is put on freelist, because here we | |
1344 | * might set fields in deferred struct pages that have not yet been | |
c6ffc5ca | 1345 | * initialized, and memblock_free_all() initializes all the reserved |
353b1e7b PT |
1346 | * deferred pages for us. |
1347 | */ | |
1348 | register_page_bootmem_info(); | |
1349 | ||
1da177e4 | 1350 | /* Register memory areas for /proc/kcore */ |
cd026ca2 JZ |
1351 | if (get_gate_vma(&init_mm)) |
1352 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); | |
1da177e4 | 1353 | |
6eb82f99 | 1354 | preallocate_vmalloc_pages(); |
1da177e4 LT |
1355 | } |
1356 | ||
ecd09650 DJ |
1357 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
1358 | int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask) | |
1359 | { | |
1360 | /* | |
1361 | * More CPUs always led to greater speedups on tested systems, up to | |
1362 | * all the nodes' CPUs. Use all since the system is otherwise idle | |
1363 | * now. | |
1364 | */ | |
1365 | return max_t(int, cpumask_weight(node_cpumask), 1); | |
1366 | } | |
1367 | #endif | |
1368 | ||
502f6604 | 1369 | int kernel_set_to_readonly; |
16239630 | 1370 | |
67df197b AV |
1371 | void mark_rodata_ro(void) |
1372 | { | |
74e08179 | 1373 | unsigned long start = PFN_ALIGN(_text); |
fc8d7826 | 1374 | unsigned long rodata_start = PFN_ALIGN(__start_rodata); |
2d0004d1 KC |
1375 | unsigned long end = (unsigned long)__end_rodata_hpage_align; |
1376 | unsigned long text_end = PFN_ALIGN(_etext); | |
1377 | unsigned long rodata_end = PFN_ALIGN(__end_rodata); | |
45e2a9d4 | 1378 | unsigned long all_end; |
8f0f996e | 1379 | |
6fb14755 | 1380 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
e3ebadd9 | 1381 | (end - start) >> 10); |
984bb80d AV |
1382 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
1383 | ||
16239630 SR |
1384 | kernel_set_to_readonly = 1; |
1385 | ||
984bb80d | 1386 | /* |
72212675 YL |
1387 | * The rodata/data/bss/brk section (but not the kernel text!) |
1388 | * should also be not-executable. | |
45e2a9d4 KC |
1389 | * |
1390 | * We align all_end to PMD_SIZE because the existing mapping | |
1391 | * is a full PMD. If we would align _brk_end to PAGE_SIZE we | |
1392 | * split the PMD and the reminder between _brk_end and the end | |
1393 | * of the PMD will remain mapped executable. | |
1394 | * | |
1395 | * Any PMD which was setup after the one which covers _brk_end | |
1396 | * has been zapped already via cleanup_highmem(). | |
984bb80d | 1397 | */ |
45e2a9d4 | 1398 | all_end = roundup((unsigned long)_brk_end, PMD_SIZE); |
ab76f7b4 | 1399 | set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); |
67df197b | 1400 | |
59566b0b SRV |
1401 | set_ftrace_ops_ro(); |
1402 | ||
0c42f392 | 1403 | #ifdef CONFIG_CPA_DEBUG |
10f22dde | 1404 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); |
6d238cc4 | 1405 | set_memory_rw(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 1406 | |
10f22dde | 1407 | printk(KERN_INFO "Testing CPA: again\n"); |
6d238cc4 | 1408 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 1409 | #endif |
74e08179 | 1410 | |
5494c3a6 KC |
1411 | free_kernel_image_pages("unused kernel image (text/rodata gap)", |
1412 | (void *)text_end, (void *)rodata_start); | |
1413 | free_kernel_image_pages("unused kernel image (rodata/data gap)", | |
1414 | (void *)rodata_end, (void *)_sdata); | |
e1a58320 SS |
1415 | |
1416 | debug_checkwx(); | |
67df197b | 1417 | } |
4e4eee0e | 1418 | |
078eb6aa PT |
1419 | /* |
1420 | * Block size is the minimum amount of memory which can be hotplugged or | |
1421 | * hotremoved. It must be power of two and must be equal or larger than | |
1422 | * MIN_MEMORY_BLOCK_SIZE. | |
1423 | */ | |
1424 | #define MAX_BLOCK_SIZE (2UL << 30) | |
1425 | ||
1426 | /* Amount of ram needed to start using large blocks */ | |
1427 | #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30) | |
1428 | ||
f642fb58 | 1429 | /* Adjustable memory block size */ |
1430 | static unsigned long set_memory_block_size; | |
1431 | int __init set_memory_block_size_order(unsigned int order) | |
1432 | { | |
1433 | unsigned long size = 1UL << order; | |
1434 | ||
1435 | if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE) | |
1436 | return -EINVAL; | |
1437 | ||
1438 | set_memory_block_size = size; | |
1439 | return 0; | |
1440 | } | |
1441 | ||
982792c7 | 1442 | static unsigned long probe_memory_block_size(void) |
1dc41aa6 | 1443 | { |
078eb6aa PT |
1444 | unsigned long boot_mem_end = max_pfn << PAGE_SHIFT; |
1445 | unsigned long bz; | |
982792c7 | 1446 | |
f642fb58 | 1447 | /* If memory block size has been set, then use it */ |
1448 | bz = set_memory_block_size; | |
1449 | if (bz) | |
078eb6aa | 1450 | goto done; |
982792c7 | 1451 | |
078eb6aa PT |
1452 | /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */ |
1453 | if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) { | |
1454 | bz = MIN_MEMORY_BLOCK_SIZE; | |
1455 | goto done; | |
1456 | } | |
1457 | ||
fe124c95 DJ |
1458 | /* |
1459 | * Use max block size to minimize overhead on bare metal, where | |
1460 | * alignment for memory hotplug isn't a concern. | |
1461 | */ | |
1462 | if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) { | |
1463 | bz = MAX_BLOCK_SIZE; | |
1464 | goto done; | |
1465 | } | |
1466 | ||
078eb6aa PT |
1467 | /* Find the largest allowed block size that aligns to memory end */ |
1468 | for (bz = MAX_BLOCK_SIZE; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) { | |
1469 | if (IS_ALIGNED(boot_mem_end, bz)) | |
1470 | break; | |
1471 | } | |
1472 | done: | |
43c75f93 | 1473 | pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20); |
982792c7 YL |
1474 | |
1475 | return bz; | |
1476 | } | |
1477 | ||
1478 | static unsigned long memory_block_size_probed; | |
1479 | unsigned long memory_block_size_bytes(void) | |
1480 | { | |
1481 | if (!memory_block_size_probed) | |
1482 | memory_block_size_probed = probe_memory_block_size(); | |
1483 | ||
1484 | return memory_block_size_probed; | |
1485 | } | |
1486 | ||
0889eba5 CL |
1487 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
1488 | /* | |
1489 | * Initialise the sparsemem vmemmap using huge-pages at the PMD level. | |
1490 | */ | |
c2b91e2e YL |
1491 | static long __meminitdata addr_start, addr_end; |
1492 | static void __meminitdata *p_start, *p_end; | |
1493 | static int __meminitdata node_start; | |
1494 | ||
2045a3b8 FC |
1495 | void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, |
1496 | unsigned long addr, unsigned long next) | |
0889eba5 | 1497 | { |
2045a3b8 FC |
1498 | pte_t entry; |
1499 | ||
1500 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, | |
1501 | PAGE_KERNEL_LARGE); | |
1502 | set_pmd(pmd, __pmd(pte_val(entry))); | |
1503 | ||
1504 | /* check to see if we have contiguous blocks */ | |
1505 | if (p_end != p || node_start != node) { | |
1506 | if (p_start) | |
1507 | pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", | |
1508 | addr_start, addr_end-1, p_start, p_end-1, node_start); | |
1509 | addr_start = addr; | |
1510 | node_start = node; | |
1511 | p_start = p; | |
1512 | } | |
8e2cdbcb | 1513 | |
2045a3b8 FC |
1514 | addr_end = addr + PMD_SIZE; |
1515 | p_end = p + PMD_SIZE; | |
7c934d39 | 1516 | |
2045a3b8 FC |
1517 | if (!IS_ALIGNED(addr, PMD_SIZE) || |
1518 | !IS_ALIGNED(next, PMD_SIZE)) | |
1519 | vmemmap_use_new_sub_pmd(addr, next); | |
1520 | } | |
8d400913 | 1521 | |
2045a3b8 FC |
1522 | int __meminit vmemmap_check_pmd(pmd_t *pmd, int node, |
1523 | unsigned long addr, unsigned long next) | |
1524 | { | |
1525 | int large = pmd_large(*pmd); | |
8d400913 | 1526 | |
2045a3b8 FC |
1527 | if (pmd_large(*pmd)) { |
1528 | vmemmap_verify((pte_t *)pmd, node, addr, next); | |
1529 | vmemmap_use_sub_pmd(addr, next); | |
0889eba5 | 1530 | } |
2045a3b8 FC |
1531 | |
1532 | return large; | |
0889eba5 | 1533 | } |
c2b91e2e | 1534 | |
7b73d978 CH |
1535 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |
1536 | struct vmem_altmap *altmap) | |
e8216da5 JW |
1537 | { |
1538 | int err; | |
1539 | ||
e19d1126 FK |
1540 | VM_BUG_ON(!PAGE_ALIGNED(start)); |
1541 | VM_BUG_ON(!PAGE_ALIGNED(end)); | |
8e2df191 | 1542 | |
2d7a2171 | 1543 | if (end - start < PAGES_PER_SECTION * sizeof(struct page)) |
1d9cfee7 | 1544 | err = vmemmap_populate_basepages(start, end, node, NULL); |
e9c0a3f0 | 1545 | else if (boot_cpu_has(X86_FEATURE_PSE)) |
4b94ffdc DW |
1546 | err = vmemmap_populate_hugepages(start, end, node, altmap); |
1547 | else if (altmap) { | |
1548 | pr_err_once("%s: no cpu support for altmap allocations\n", | |
1549 | __func__); | |
1550 | err = -ENOMEM; | |
1551 | } else | |
1d9cfee7 | 1552 | err = vmemmap_populate_basepages(start, end, node, NULL); |
e8216da5 | 1553 | if (!err) |
5372e155 | 1554 | sync_global_pgds(start, end - 1); |
e8216da5 JW |
1555 | return err; |
1556 | } | |
1557 | ||
426e5c42 | 1558 | #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE |
46723bfa | 1559 | void register_page_bootmem_memmap(unsigned long section_nr, |
15670bfe | 1560 | struct page *start_page, unsigned long nr_pages) |
46723bfa YI |
1561 | { |
1562 | unsigned long addr = (unsigned long)start_page; | |
15670bfe | 1563 | unsigned long end = (unsigned long)(start_page + nr_pages); |
46723bfa YI |
1564 | unsigned long next; |
1565 | pgd_t *pgd; | |
f2a6a705 | 1566 | p4d_t *p4d; |
46723bfa YI |
1567 | pud_t *pud; |
1568 | pmd_t *pmd; | |
15670bfe | 1569 | unsigned int nr_pmd_pages; |
46723bfa YI |
1570 | struct page *page; |
1571 | ||
1572 | for (; addr < end; addr = next) { | |
1573 | pte_t *pte = NULL; | |
1574 | ||
1575 | pgd = pgd_offset_k(addr); | |
1576 | if (pgd_none(*pgd)) { | |
1577 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1578 | continue; | |
1579 | } | |
1580 | get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO); | |
1581 | ||
f2a6a705 KS |
1582 | p4d = p4d_offset(pgd, addr); |
1583 | if (p4d_none(*p4d)) { | |
1584 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1585 | continue; | |
1586 | } | |
1587 | get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO); | |
1588 | ||
1589 | pud = pud_offset(p4d, addr); | |
46723bfa YI |
1590 | if (pud_none(*pud)) { |
1591 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1592 | continue; | |
1593 | } | |
1594 | get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO); | |
1595 | ||
2d7a2171 | 1596 | if (!boot_cpu_has(X86_FEATURE_PSE)) { |
46723bfa YI |
1597 | next = (addr + PAGE_SIZE) & PAGE_MASK; |
1598 | pmd = pmd_offset(pud, addr); | |
1599 | if (pmd_none(*pmd)) | |
1600 | continue; | |
1601 | get_page_bootmem(section_nr, pmd_page(*pmd), | |
1602 | MIX_SECTION_INFO); | |
1603 | ||
1604 | pte = pte_offset_kernel(pmd, addr); | |
1605 | if (pte_none(*pte)) | |
1606 | continue; | |
1607 | get_page_bootmem(section_nr, pte_page(*pte), | |
1608 | SECTION_INFO); | |
1609 | } else { | |
1610 | next = pmd_addr_end(addr, end); | |
1611 | ||
1612 | pmd = pmd_offset(pud, addr); | |
1613 | if (pmd_none(*pmd)) | |
1614 | continue; | |
1615 | ||
15670bfe | 1616 | nr_pmd_pages = 1 << get_order(PMD_SIZE); |
46723bfa | 1617 | page = pmd_page(*pmd); |
15670bfe | 1618 | while (nr_pmd_pages--) |
46723bfa YI |
1619 | get_page_bootmem(section_nr, page++, |
1620 | SECTION_INFO); | |
1621 | } | |
1622 | } | |
1623 | } | |
1624 | #endif | |
1625 | ||
c2b91e2e YL |
1626 | void __meminit vmemmap_populate_print_last(void) |
1627 | { | |
1628 | if (p_start) { | |
c9cdaeb2 | 1629 | pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", |
c2b91e2e YL |
1630 | addr_start, addr_end-1, p_start, p_end-1, node_start); |
1631 | p_start = NULL; | |
1632 | p_end = NULL; | |
1633 | node_start = 0; | |
1634 | } | |
1635 | } | |
0889eba5 | 1636 | #endif |