Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
8f6aac41 CL |
2 | /* |
3 | * Virtual Memory Map support | |
4 | * | |
cde53535 | 5 | * (C) 2007 sgi. Christoph Lameter. |
8f6aac41 CL |
6 | * |
7 | * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, | |
8 | * virt_to_page, page_address() to be implemented as a base offset | |
9 | * calculation without memory access. | |
10 | * | |
11 | * However, virtual mappings need a page table and TLBs. Many Linux | |
12 | * architectures already map their physical space using 1-1 mappings | |
b595076a | 13 | * via TLBs. For those arches the virtual memory map is essentially |
8f6aac41 CL |
14 | * for free if we use the same page size as the 1-1 mappings. In that |
15 | * case the overhead consists of a few additional pages that are | |
16 | * allocated to create a view of memory for vmemmap. | |
17 | * | |
29c71111 AW |
18 | * The architecture is expected to provide a vmemmap_populate() function |
19 | * to instantiate the mapping. | |
8f6aac41 CL |
20 | */ |
21 | #include <linux/mm.h> | |
22 | #include <linux/mmzone.h> | |
97ad1087 | 23 | #include <linux/memblock.h> |
4b94ffdc | 24 | #include <linux/memremap.h> |
8f6aac41 | 25 | #include <linux/highmem.h> |
5a0e3ad6 | 26 | #include <linux/slab.h> |
8f6aac41 CL |
27 | #include <linux/spinlock.h> |
28 | #include <linux/vmalloc.h> | |
8bca44bb | 29 | #include <linux/sched.h> |
f41f2ed4 MS |
30 | #include <linux/pgtable.h> |
31 | #include <linux/bootmem_info.h> | |
32 | ||
8f6aac41 CL |
33 | #include <asm/dma.h> |
34 | #include <asm/pgalloc.h> | |
f41f2ed4 MS |
35 | #include <asm/tlbflush.h> |
36 | ||
e5408417 | 37 | #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP |
f41f2ed4 MS |
38 | /** |
39 | * struct vmemmap_remap_walk - walk vmemmap page table | |
40 | * | |
41 | * @remap_pte: called for each lowest-level entry (PTE). | |
3bc2b6a7 | 42 | * @nr_walked: the number of walked pte. |
f41f2ed4 MS |
43 | * @reuse_page: the page which is reused for the tail vmemmap pages. |
44 | * @reuse_addr: the virtual address of the @reuse_page page. | |
ad2fa371 MS |
45 | * @vmemmap_pages: the list head of the vmemmap pages that can be freed |
46 | * or is mapped from. | |
f41f2ed4 MS |
47 | */ |
48 | struct vmemmap_remap_walk { | |
49 | void (*remap_pte)(pte_t *pte, unsigned long addr, | |
50 | struct vmemmap_remap_walk *walk); | |
3bc2b6a7 | 51 | unsigned long nr_walked; |
f41f2ed4 MS |
52 | struct page *reuse_page; |
53 | unsigned long reuse_addr; | |
54 | struct list_head *vmemmap_pages; | |
55 | }; | |
56 | ||
d8d55f56 | 57 | static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) |
3bc2b6a7 MS |
58 | { |
59 | pmd_t __pmd; | |
60 | int i; | |
61 | unsigned long addr = start; | |
62 | struct page *page = pmd_page(*pmd); | |
63 | pte_t *pgtable = pte_alloc_one_kernel(&init_mm); | |
64 | ||
65 | if (!pgtable) | |
66 | return -ENOMEM; | |
67 | ||
68 | pmd_populate_kernel(&init_mm, &__pmd, pgtable); | |
69 | ||
70 | for (i = 0; i < PMD_SIZE / PAGE_SIZE; i++, addr += PAGE_SIZE) { | |
71 | pte_t entry, *pte; | |
72 | pgprot_t pgprot = PAGE_KERNEL; | |
73 | ||
74 | entry = mk_pte(page + i, pgprot); | |
75 | pte = pte_offset_kernel(&__pmd, addr); | |
76 | set_pte_at(&init_mm, addr, pte, entry); | |
77 | } | |
78 | ||
d8d55f56 MS |
79 | spin_lock(&init_mm.page_table_lock); |
80 | if (likely(pmd_leaf(*pmd))) { | |
81 | /* Make pte visible before pmd. See comment in pmd_install(). */ | |
82 | smp_wmb(); | |
83 | pmd_populate_kernel(&init_mm, pmd, pgtable); | |
84 | flush_tlb_kernel_range(start, start + PMD_SIZE); | |
85 | } else { | |
86 | pte_free_kernel(&init_mm, pgtable); | |
87 | } | |
88 | spin_unlock(&init_mm.page_table_lock); | |
3bc2b6a7 MS |
89 | |
90 | return 0; | |
91 | } | |
92 | ||
d8d55f56 MS |
93 | static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) |
94 | { | |
95 | int leaf; | |
96 | ||
97 | spin_lock(&init_mm.page_table_lock); | |
98 | leaf = pmd_leaf(*pmd); | |
99 | spin_unlock(&init_mm.page_table_lock); | |
100 | ||
101 | if (!leaf) | |
102 | return 0; | |
103 | ||
104 | return __split_vmemmap_huge_pmd(pmd, start); | |
105 | } | |
106 | ||
f41f2ed4 MS |
107 | static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr, |
108 | unsigned long end, | |
109 | struct vmemmap_remap_walk *walk) | |
110 | { | |
111 | pte_t *pte = pte_offset_kernel(pmd, addr); | |
112 | ||
113 | /* | |
114 | * The reuse_page is found 'first' in table walk before we start | |
115 | * remapping (which is calling @walk->remap_pte). | |
116 | */ | |
117 | if (!walk->reuse_page) { | |
118 | walk->reuse_page = pte_page(*pte); | |
119 | /* | |
120 | * Because the reuse address is part of the range that we are | |
121 | * walking, skip the reuse address range. | |
122 | */ | |
123 | addr += PAGE_SIZE; | |
124 | pte++; | |
3bc2b6a7 | 125 | walk->nr_walked++; |
f41f2ed4 MS |
126 | } |
127 | ||
3bc2b6a7 | 128 | for (; addr != end; addr += PAGE_SIZE, pte++) { |
f41f2ed4 | 129 | walk->remap_pte(pte, addr, walk); |
3bc2b6a7 MS |
130 | walk->nr_walked++; |
131 | } | |
f41f2ed4 MS |
132 | } |
133 | ||
3bc2b6a7 MS |
134 | static int vmemmap_pmd_range(pud_t *pud, unsigned long addr, |
135 | unsigned long end, | |
136 | struct vmemmap_remap_walk *walk) | |
f41f2ed4 MS |
137 | { |
138 | pmd_t *pmd; | |
139 | unsigned long next; | |
140 | ||
141 | pmd = pmd_offset(pud, addr); | |
142 | do { | |
d8d55f56 MS |
143 | int ret; |
144 | ||
145 | ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK); | |
146 | if (ret) | |
147 | return ret; | |
f41f2ed4 MS |
148 | |
149 | next = pmd_addr_end(addr, end); | |
150 | vmemmap_pte_range(pmd, addr, next, walk); | |
151 | } while (pmd++, addr = next, addr != end); | |
3bc2b6a7 MS |
152 | |
153 | return 0; | |
f41f2ed4 MS |
154 | } |
155 | ||
3bc2b6a7 MS |
156 | static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr, |
157 | unsigned long end, | |
158 | struct vmemmap_remap_walk *walk) | |
f41f2ed4 MS |
159 | { |
160 | pud_t *pud; | |
161 | unsigned long next; | |
162 | ||
163 | pud = pud_offset(p4d, addr); | |
164 | do { | |
3bc2b6a7 MS |
165 | int ret; |
166 | ||
f41f2ed4 | 167 | next = pud_addr_end(addr, end); |
3bc2b6a7 MS |
168 | ret = vmemmap_pmd_range(pud, addr, next, walk); |
169 | if (ret) | |
170 | return ret; | |
f41f2ed4 | 171 | } while (pud++, addr = next, addr != end); |
3bc2b6a7 MS |
172 | |
173 | return 0; | |
f41f2ed4 MS |
174 | } |
175 | ||
3bc2b6a7 MS |
176 | static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr, |
177 | unsigned long end, | |
178 | struct vmemmap_remap_walk *walk) | |
f41f2ed4 MS |
179 | { |
180 | p4d_t *p4d; | |
181 | unsigned long next; | |
182 | ||
183 | p4d = p4d_offset(pgd, addr); | |
184 | do { | |
3bc2b6a7 MS |
185 | int ret; |
186 | ||
f41f2ed4 | 187 | next = p4d_addr_end(addr, end); |
3bc2b6a7 MS |
188 | ret = vmemmap_pud_range(p4d, addr, next, walk); |
189 | if (ret) | |
190 | return ret; | |
f41f2ed4 | 191 | } while (p4d++, addr = next, addr != end); |
3bc2b6a7 MS |
192 | |
193 | return 0; | |
f41f2ed4 MS |
194 | } |
195 | ||
3bc2b6a7 MS |
196 | static int vmemmap_remap_range(unsigned long start, unsigned long end, |
197 | struct vmemmap_remap_walk *walk) | |
f41f2ed4 MS |
198 | { |
199 | unsigned long addr = start; | |
200 | unsigned long next; | |
201 | pgd_t *pgd; | |
202 | ||
203 | VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE)); | |
204 | VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE)); | |
205 | ||
206 | pgd = pgd_offset_k(addr); | |
207 | do { | |
3bc2b6a7 MS |
208 | int ret; |
209 | ||
f41f2ed4 | 210 | next = pgd_addr_end(addr, end); |
3bc2b6a7 MS |
211 | ret = vmemmap_p4d_range(pgd, addr, next, walk); |
212 | if (ret) | |
213 | return ret; | |
f41f2ed4 MS |
214 | } while (pgd++, addr = next, addr != end); |
215 | ||
216 | /* | |
217 | * We only change the mapping of the vmemmap virtual address range | |
218 | * [@start + PAGE_SIZE, end), so we only need to flush the TLB which | |
219 | * belongs to the range. | |
220 | */ | |
221 | flush_tlb_kernel_range(start + PAGE_SIZE, end); | |
3bc2b6a7 MS |
222 | |
223 | return 0; | |
f41f2ed4 MS |
224 | } |
225 | ||
226 | /* | |
227 | * Free a vmemmap page. A vmemmap page can be allocated from the memblock | |
228 | * allocator or buddy allocator. If the PG_reserved flag is set, it means | |
229 | * that it allocated from the memblock allocator, just free it via the | |
230 | * free_bootmem_page(). Otherwise, use __free_page(). | |
231 | */ | |
232 | static inline void free_vmemmap_page(struct page *page) | |
233 | { | |
234 | if (PageReserved(page)) | |
235 | free_bootmem_page(page); | |
236 | else | |
237 | __free_page(page); | |
238 | } | |
239 | ||
240 | /* Free a list of the vmemmap pages */ | |
241 | static void free_vmemmap_page_list(struct list_head *list) | |
242 | { | |
243 | struct page *page, *next; | |
244 | ||
245 | list_for_each_entry_safe(page, next, list, lru) { | |
246 | list_del(&page->lru); | |
247 | free_vmemmap_page(page); | |
248 | } | |
249 | } | |
250 | ||
251 | static void vmemmap_remap_pte(pte_t *pte, unsigned long addr, | |
252 | struct vmemmap_remap_walk *walk) | |
253 | { | |
254 | /* | |
255 | * Remap the tail pages as read-only to catch illegal write operation | |
256 | * to the tail pages. | |
257 | */ | |
258 | pgprot_t pgprot = PAGE_KERNEL_RO; | |
259 | pte_t entry = mk_pte(walk->reuse_page, pgprot); | |
260 | struct page *page = pte_page(*pte); | |
261 | ||
3bc2b6a7 | 262 | list_add_tail(&page->lru, walk->vmemmap_pages); |
f41f2ed4 MS |
263 | set_pte_at(&init_mm, addr, pte, entry); |
264 | } | |
265 | ||
e7d32485 MS |
266 | /* |
267 | * How many struct page structs need to be reset. When we reuse the head | |
268 | * struct page, the special metadata (e.g. page->flags or page->mapping) | |
269 | * cannot copy to the tail struct page structs. The invalid value will be | |
270 | * checked in the free_tail_pages_check(). In order to avoid the message | |
271 | * of "corrupted mapping in tail page". We need to reset at least 3 (one | |
272 | * head struct page struct and two tail struct page structs) struct page | |
273 | * structs. | |
274 | */ | |
275 | #define NR_RESET_STRUCT_PAGE 3 | |
276 | ||
277 | static inline void reset_struct_pages(struct page *start) | |
278 | { | |
279 | int i; | |
280 | struct page *from = start + NR_RESET_STRUCT_PAGE; | |
281 | ||
282 | for (i = 0; i < NR_RESET_STRUCT_PAGE; i++) | |
283 | memcpy(start + i, from, sizeof(*from)); | |
284 | } | |
285 | ||
3bc2b6a7 MS |
286 | static void vmemmap_restore_pte(pte_t *pte, unsigned long addr, |
287 | struct vmemmap_remap_walk *walk) | |
288 | { | |
289 | pgprot_t pgprot = PAGE_KERNEL; | |
290 | struct page *page; | |
291 | void *to; | |
292 | ||
293 | BUG_ON(pte_page(*pte) != walk->reuse_page); | |
294 | ||
295 | page = list_first_entry(walk->vmemmap_pages, struct page, lru); | |
296 | list_del(&page->lru); | |
297 | to = page_to_virt(page); | |
298 | copy_page(to, (void *)walk->reuse_addr); | |
e7d32485 | 299 | reset_struct_pages(to); |
3bc2b6a7 MS |
300 | |
301 | set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot)); | |
302 | } | |
303 | ||
f41f2ed4 MS |
304 | /** |
305 | * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end) | |
306 | * to the page which @reuse is mapped to, then free vmemmap | |
307 | * which the range are mapped to. | |
308 | * @start: start address of the vmemmap virtual address range that we want | |
309 | * to remap. | |
310 | * @end: end address of the vmemmap virtual address range that we want to | |
311 | * remap. | |
312 | * @reuse: reuse address. | |
313 | * | |
3bc2b6a7 | 314 | * Return: %0 on success, negative error code otherwise. |
f41f2ed4 | 315 | */ |
3bc2b6a7 MS |
316 | int vmemmap_remap_free(unsigned long start, unsigned long end, |
317 | unsigned long reuse) | |
f41f2ed4 | 318 | { |
3bc2b6a7 | 319 | int ret; |
f41f2ed4 MS |
320 | LIST_HEAD(vmemmap_pages); |
321 | struct vmemmap_remap_walk walk = { | |
322 | .remap_pte = vmemmap_remap_pte, | |
323 | .reuse_addr = reuse, | |
324 | .vmemmap_pages = &vmemmap_pages, | |
325 | }; | |
326 | ||
327 | /* | |
328 | * In order to make remapping routine most efficient for the huge pages, | |
329 | * the routine of vmemmap page table walking has the following rules | |
330 | * (see more details from the vmemmap_pte_range()): | |
331 | * | |
332 | * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE) | |
333 | * should be continuous. | |
334 | * - The @reuse address is part of the range [@reuse, @end) that we are | |
335 | * walking which is passed to vmemmap_remap_range(). | |
336 | * - The @reuse address is the first in the complete range. | |
337 | * | |
338 | * So we need to make sure that @start and @reuse meet the above rules. | |
339 | */ | |
340 | BUG_ON(start - reuse != PAGE_SIZE); | |
341 | ||
d8d55f56 | 342 | mmap_read_lock(&init_mm); |
3bc2b6a7 | 343 | ret = vmemmap_remap_range(reuse, end, &walk); |
3bc2b6a7 MS |
344 | if (ret && walk.nr_walked) { |
345 | end = reuse + walk.nr_walked * PAGE_SIZE; | |
346 | /* | |
347 | * vmemmap_pages contains pages from the previous | |
348 | * vmemmap_remap_range call which failed. These | |
349 | * are pages which were removed from the vmemmap. | |
350 | * They will be restored in the following call. | |
351 | */ | |
352 | walk = (struct vmemmap_remap_walk) { | |
353 | .remap_pte = vmemmap_restore_pte, | |
354 | .reuse_addr = reuse, | |
355 | .vmemmap_pages = &vmemmap_pages, | |
356 | }; | |
ad2fa371 | 357 | |
3bc2b6a7 MS |
358 | vmemmap_remap_range(reuse, end, &walk); |
359 | } | |
360 | mmap_read_unlock(&init_mm); | |
ad2fa371 | 361 | |
3bc2b6a7 | 362 | free_vmemmap_page_list(&vmemmap_pages); |
ad2fa371 | 363 | |
3bc2b6a7 | 364 | return ret; |
ad2fa371 MS |
365 | } |
366 | ||
367 | static int alloc_vmemmap_page_list(unsigned long start, unsigned long end, | |
368 | gfp_t gfp_mask, struct list_head *list) | |
369 | { | |
370 | unsigned long nr_pages = (end - start) >> PAGE_SHIFT; | |
371 | int nid = page_to_nid((struct page *)start); | |
372 | struct page *page, *next; | |
373 | ||
374 | while (nr_pages--) { | |
375 | page = alloc_pages_node(nid, gfp_mask, 0); | |
376 | if (!page) | |
377 | goto out; | |
378 | list_add_tail(&page->lru, list); | |
379 | } | |
380 | ||
381 | return 0; | |
382 | out: | |
383 | list_for_each_entry_safe(page, next, list, lru) | |
384 | __free_pages(page, 0); | |
385 | return -ENOMEM; | |
386 | } | |
387 | ||
388 | /** | |
389 | * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end) | |
390 | * to the page which is from the @vmemmap_pages | |
391 | * respectively. | |
392 | * @start: start address of the vmemmap virtual address range that we want | |
393 | * to remap. | |
394 | * @end: end address of the vmemmap virtual address range that we want to | |
395 | * remap. | |
396 | * @reuse: reuse address. | |
397 | * @gfp_mask: GFP flag for allocating vmemmap pages. | |
3bc2b6a7 MS |
398 | * |
399 | * Return: %0 on success, negative error code otherwise. | |
ad2fa371 MS |
400 | */ |
401 | int vmemmap_remap_alloc(unsigned long start, unsigned long end, | |
402 | unsigned long reuse, gfp_t gfp_mask) | |
403 | { | |
404 | LIST_HEAD(vmemmap_pages); | |
405 | struct vmemmap_remap_walk walk = { | |
406 | .remap_pte = vmemmap_restore_pte, | |
407 | .reuse_addr = reuse, | |
408 | .vmemmap_pages = &vmemmap_pages, | |
409 | }; | |
410 | ||
411 | /* See the comment in the vmemmap_remap_free(). */ | |
412 | BUG_ON(start - reuse != PAGE_SIZE); | |
413 | ||
ad2fa371 MS |
414 | if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages)) |
415 | return -ENOMEM; | |
416 | ||
3bc2b6a7 | 417 | mmap_read_lock(&init_mm); |
ad2fa371 | 418 | vmemmap_remap_range(reuse, end, &walk); |
3bc2b6a7 | 419 | mmap_read_unlock(&init_mm); |
ad2fa371 MS |
420 | |
421 | return 0; | |
422 | } | |
e5408417 | 423 | #endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */ |
ad2fa371 | 424 | |
8f6aac41 CL |
425 | /* |
426 | * Allocate a block of memory to be used to back the virtual memory map | |
427 | * or to back the page tables that are used to create the mapping. | |
428 | * Uses the main allocators if they are available, else bootmem. | |
429 | */ | |
e0dc3a53 | 430 | |
bd721ea7 | 431 | static void * __ref __earlyonly_bootmem_alloc(int node, |
e0dc3a53 KH |
432 | unsigned long size, |
433 | unsigned long align, | |
434 | unsigned long goal) | |
435 | { | |
eb31d559 | 436 | return memblock_alloc_try_nid_raw(size, align, goal, |
97ad1087 | 437 | MEMBLOCK_ALLOC_ACCESSIBLE, node); |
e0dc3a53 KH |
438 | } |
439 | ||
8f6aac41 CL |
440 | void * __meminit vmemmap_alloc_block(unsigned long size, int node) |
441 | { | |
442 | /* If the main allocator is up use that, fallback to bootmem. */ | |
443 | if (slab_is_available()) { | |
fcdaf842 MH |
444 | gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; |
445 | int order = get_order(size); | |
446 | static bool warned; | |
f52407ce SL |
447 | struct page *page; |
448 | ||
fcdaf842 | 449 | page = alloc_pages_node(node, gfp_mask, order); |
8f6aac41 CL |
450 | if (page) |
451 | return page_address(page); | |
fcdaf842 MH |
452 | |
453 | if (!warned) { | |
454 | warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, | |
455 | "vmemmap alloc failure: order:%u", order); | |
456 | warned = true; | |
457 | } | |
8f6aac41 CL |
458 | return NULL; |
459 | } else | |
e0dc3a53 | 460 | return __earlyonly_bootmem_alloc(node, size, size, |
8f6aac41 CL |
461 | __pa(MAX_DMA_ADDRESS)); |
462 | } | |
463 | ||
56993b4e AK |
464 | static void * __meminit altmap_alloc_block_buf(unsigned long size, |
465 | struct vmem_altmap *altmap); | |
466 | ||
9bdac914 | 467 | /* need to make sure size is all the same during early stage */ |
56993b4e AK |
468 | void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node, |
469 | struct vmem_altmap *altmap) | |
9bdac914 | 470 | { |
56993b4e AK |
471 | void *ptr; |
472 | ||
473 | if (altmap) | |
474 | return altmap_alloc_block_buf(size, altmap); | |
9bdac914 | 475 | |
56993b4e | 476 | ptr = sparse_buffer_alloc(size); |
35fd1eb1 PT |
477 | if (!ptr) |
478 | ptr = vmemmap_alloc_block(size, node); | |
9bdac914 YL |
479 | return ptr; |
480 | } | |
481 | ||
4b94ffdc DW |
482 | static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap) |
483 | { | |
484 | return altmap->base_pfn + altmap->reserve + altmap->alloc | |
485 | + altmap->align; | |
486 | } | |
487 | ||
488 | static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) | |
489 | { | |
490 | unsigned long allocated = altmap->alloc + altmap->align; | |
491 | ||
492 | if (altmap->free > allocated) | |
493 | return altmap->free - allocated; | |
494 | return 0; | |
495 | } | |
496 | ||
56993b4e AK |
497 | static void * __meminit altmap_alloc_block_buf(unsigned long size, |
498 | struct vmem_altmap *altmap) | |
4b94ffdc | 499 | { |
eb804533 | 500 | unsigned long pfn, nr_pfns, nr_align; |
4b94ffdc DW |
501 | |
502 | if (size & ~PAGE_MASK) { | |
503 | pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", | |
504 | __func__, size); | |
505 | return NULL; | |
506 | } | |
507 | ||
eb804533 | 508 | pfn = vmem_altmap_next_pfn(altmap); |
4b94ffdc | 509 | nr_pfns = size >> PAGE_SHIFT; |
eb804533 CH |
510 | nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); |
511 | nr_align = ALIGN(pfn, nr_align) - pfn; | |
512 | if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) | |
513 | return NULL; | |
514 | ||
515 | altmap->alloc += nr_pfns; | |
516 | altmap->align += nr_align; | |
517 | pfn += nr_align; | |
518 | ||
4b94ffdc DW |
519 | pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", |
520 | __func__, pfn, altmap->alloc, altmap->align, nr_pfns); | |
eb804533 | 521 | return __va(__pfn_to_phys(pfn)); |
4b94ffdc DW |
522 | } |
523 | ||
8f6aac41 CL |
524 | void __meminit vmemmap_verify(pte_t *pte, int node, |
525 | unsigned long start, unsigned long end) | |
526 | { | |
527 | unsigned long pfn = pte_pfn(*pte); | |
528 | int actual_node = early_pfn_to_nid(pfn); | |
529 | ||
b41ad14c | 530 | if (node_distance(actual_node, node) > LOCAL_DISTANCE) |
1170532b JP |
531 | pr_warn("[%lx-%lx] potential offnode page_structs\n", |
532 | start, end - 1); | |
8f6aac41 CL |
533 | } |
534 | ||
1d9cfee7 AK |
535 | pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, |
536 | struct vmem_altmap *altmap) | |
8f6aac41 | 537 | { |
29c71111 AW |
538 | pte_t *pte = pte_offset_kernel(pmd, addr); |
539 | if (pte_none(*pte)) { | |
540 | pte_t entry; | |
1d9cfee7 AK |
541 | void *p; |
542 | ||
56993b4e | 543 | p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap); |
29c71111 | 544 | if (!p) |
9dce07f1 | 545 | return NULL; |
29c71111 AW |
546 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |
547 | set_pte_at(&init_mm, addr, pte, entry); | |
548 | } | |
549 | return pte; | |
8f6aac41 CL |
550 | } |
551 | ||
f7f99100 PT |
552 | static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) |
553 | { | |
554 | void *p = vmemmap_alloc_block(size, node); | |
555 | ||
556 | if (!p) | |
557 | return NULL; | |
558 | memset(p, 0, size); | |
559 | ||
560 | return p; | |
561 | } | |
562 | ||
29c71111 | 563 | pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) |
8f6aac41 | 564 | { |
29c71111 AW |
565 | pmd_t *pmd = pmd_offset(pud, addr); |
566 | if (pmd_none(*pmd)) { | |
f7f99100 | 567 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
29c71111 | 568 | if (!p) |
9dce07f1 | 569 | return NULL; |
29c71111 | 570 | pmd_populate_kernel(&init_mm, pmd, p); |
8f6aac41 | 571 | } |
29c71111 | 572 | return pmd; |
8f6aac41 | 573 | } |
8f6aac41 | 574 | |
c2febafc | 575 | pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) |
8f6aac41 | 576 | { |
c2febafc | 577 | pud_t *pud = pud_offset(p4d, addr); |
29c71111 | 578 | if (pud_none(*pud)) { |
f7f99100 | 579 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
29c71111 | 580 | if (!p) |
9dce07f1 | 581 | return NULL; |
29c71111 AW |
582 | pud_populate(&init_mm, pud, p); |
583 | } | |
584 | return pud; | |
585 | } | |
8f6aac41 | 586 | |
c2febafc KS |
587 | p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) |
588 | { | |
589 | p4d_t *p4d = p4d_offset(pgd, addr); | |
590 | if (p4d_none(*p4d)) { | |
f7f99100 | 591 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
c2febafc KS |
592 | if (!p) |
593 | return NULL; | |
594 | p4d_populate(&init_mm, p4d, p); | |
595 | } | |
596 | return p4d; | |
597 | } | |
598 | ||
29c71111 AW |
599 | pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) |
600 | { | |
601 | pgd_t *pgd = pgd_offset_k(addr); | |
602 | if (pgd_none(*pgd)) { | |
f7f99100 | 603 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
29c71111 | 604 | if (!p) |
9dce07f1 | 605 | return NULL; |
29c71111 | 606 | pgd_populate(&init_mm, pgd, p); |
8f6aac41 | 607 | } |
29c71111 | 608 | return pgd; |
8f6aac41 CL |
609 | } |
610 | ||
1d9cfee7 AK |
611 | int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, |
612 | int node, struct vmem_altmap *altmap) | |
8f6aac41 | 613 | { |
0aad818b | 614 | unsigned long addr = start; |
29c71111 | 615 | pgd_t *pgd; |
c2febafc | 616 | p4d_t *p4d; |
29c71111 AW |
617 | pud_t *pud; |
618 | pmd_t *pmd; | |
619 | pte_t *pte; | |
8f6aac41 | 620 | |
29c71111 AW |
621 | for (; addr < end; addr += PAGE_SIZE) { |
622 | pgd = vmemmap_pgd_populate(addr, node); | |
623 | if (!pgd) | |
624 | return -ENOMEM; | |
c2febafc KS |
625 | p4d = vmemmap_p4d_populate(pgd, addr, node); |
626 | if (!p4d) | |
627 | return -ENOMEM; | |
628 | pud = vmemmap_pud_populate(p4d, addr, node); | |
29c71111 AW |
629 | if (!pud) |
630 | return -ENOMEM; | |
631 | pmd = vmemmap_pmd_populate(pud, addr, node); | |
632 | if (!pmd) | |
633 | return -ENOMEM; | |
1d9cfee7 | 634 | pte = vmemmap_pte_populate(pmd, addr, node, altmap); |
29c71111 AW |
635 | if (!pte) |
636 | return -ENOMEM; | |
637 | vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); | |
8f6aac41 | 638 | } |
29c71111 AW |
639 | |
640 | return 0; | |
8f6aac41 | 641 | } |
8f6aac41 | 642 | |
e9c0a3f0 DW |
643 | struct page * __meminit __populate_section_memmap(unsigned long pfn, |
644 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap) | |
8f6aac41 | 645 | { |
6cda7204 WY |
646 | unsigned long start = (unsigned long) pfn_to_page(pfn); |
647 | unsigned long end = start + nr_pages * sizeof(struct page); | |
648 | ||
649 | if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) || | |
650 | !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION))) | |
651 | return NULL; | |
0aad818b | 652 | |
7b73d978 | 653 | if (vmemmap_populate(start, end, nid, altmap)) |
8f6aac41 CL |
654 | return NULL; |
655 | ||
e9c0a3f0 | 656 | return pfn_to_page(pfn); |
8f6aac41 | 657 | } |