Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
8f6aac41 CL |
2 | /* |
3 | * Virtual Memory Map support | |
4 | * | |
cde53535 | 5 | * (C) 2007 sgi. Christoph Lameter. |
8f6aac41 CL |
6 | * |
7 | * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, | |
8 | * virt_to_page, page_address() to be implemented as a base offset | |
9 | * calculation without memory access. | |
10 | * | |
11 | * However, virtual mappings need a page table and TLBs. Many Linux | |
12 | * architectures already map their physical space using 1-1 mappings | |
b595076a | 13 | * via TLBs. For those arches the virtual memory map is essentially |
8f6aac41 CL |
14 | * for free if we use the same page size as the 1-1 mappings. In that |
15 | * case the overhead consists of a few additional pages that are | |
16 | * allocated to create a view of memory for vmemmap. | |
17 | * | |
29c71111 AW |
18 | * The architecture is expected to provide a vmemmap_populate() function |
19 | * to instantiate the mapping. | |
8f6aac41 CL |
20 | */ |
21 | #include <linux/mm.h> | |
22 | #include <linux/mmzone.h> | |
97ad1087 | 23 | #include <linux/memblock.h> |
4b94ffdc | 24 | #include <linux/memremap.h> |
8f6aac41 | 25 | #include <linux/highmem.h> |
5a0e3ad6 | 26 | #include <linux/slab.h> |
8f6aac41 CL |
27 | #include <linux/spinlock.h> |
28 | #include <linux/vmalloc.h> | |
8bca44bb | 29 | #include <linux/sched.h> |
f41f2ed4 | 30 | |
8f6aac41 CL |
31 | #include <asm/dma.h> |
32 | #include <asm/pgalloc.h> | |
ad2fa371 | 33 | |
8f6aac41 CL |
34 | /* |
35 | * Allocate a block of memory to be used to back the virtual memory map | |
36 | * or to back the page tables that are used to create the mapping. | |
37 | * Uses the main allocators if they are available, else bootmem. | |
38 | */ | |
e0dc3a53 | 39 | |
bd721ea7 | 40 | static void * __ref __earlyonly_bootmem_alloc(int node, |
e0dc3a53 KH |
41 | unsigned long size, |
42 | unsigned long align, | |
43 | unsigned long goal) | |
44 | { | |
eb31d559 | 45 | return memblock_alloc_try_nid_raw(size, align, goal, |
97ad1087 | 46 | MEMBLOCK_ALLOC_ACCESSIBLE, node); |
e0dc3a53 KH |
47 | } |
48 | ||
8f6aac41 CL |
49 | void * __meminit vmemmap_alloc_block(unsigned long size, int node) |
50 | { | |
51 | /* If the main allocator is up use that, fallback to bootmem. */ | |
52 | if (slab_is_available()) { | |
fcdaf842 MH |
53 | gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; |
54 | int order = get_order(size); | |
55 | static bool warned; | |
f52407ce SL |
56 | struct page *page; |
57 | ||
fcdaf842 | 58 | page = alloc_pages_node(node, gfp_mask, order); |
8f6aac41 CL |
59 | if (page) |
60 | return page_address(page); | |
fcdaf842 MH |
61 | |
62 | if (!warned) { | |
63 | warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, | |
64 | "vmemmap alloc failure: order:%u", order); | |
65 | warned = true; | |
66 | } | |
8f6aac41 CL |
67 | return NULL; |
68 | } else | |
e0dc3a53 | 69 | return __earlyonly_bootmem_alloc(node, size, size, |
8f6aac41 CL |
70 | __pa(MAX_DMA_ADDRESS)); |
71 | } | |
72 | ||
56993b4e AK |
73 | static void * __meminit altmap_alloc_block_buf(unsigned long size, |
74 | struct vmem_altmap *altmap); | |
75 | ||
9bdac914 | 76 | /* need to make sure size is all the same during early stage */ |
56993b4e AK |
77 | void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node, |
78 | struct vmem_altmap *altmap) | |
9bdac914 | 79 | { |
56993b4e AK |
80 | void *ptr; |
81 | ||
82 | if (altmap) | |
83 | return altmap_alloc_block_buf(size, altmap); | |
9bdac914 | 84 | |
56993b4e | 85 | ptr = sparse_buffer_alloc(size); |
35fd1eb1 PT |
86 | if (!ptr) |
87 | ptr = vmemmap_alloc_block(size, node); | |
9bdac914 YL |
88 | return ptr; |
89 | } | |
90 | ||
4b94ffdc DW |
91 | static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap) |
92 | { | |
93 | return altmap->base_pfn + altmap->reserve + altmap->alloc | |
94 | + altmap->align; | |
95 | } | |
96 | ||
97 | static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) | |
98 | { | |
99 | unsigned long allocated = altmap->alloc + altmap->align; | |
100 | ||
101 | if (altmap->free > allocated) | |
102 | return altmap->free - allocated; | |
103 | return 0; | |
104 | } | |
105 | ||
56993b4e AK |
106 | static void * __meminit altmap_alloc_block_buf(unsigned long size, |
107 | struct vmem_altmap *altmap) | |
4b94ffdc | 108 | { |
eb804533 | 109 | unsigned long pfn, nr_pfns, nr_align; |
4b94ffdc DW |
110 | |
111 | if (size & ~PAGE_MASK) { | |
112 | pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", | |
113 | __func__, size); | |
114 | return NULL; | |
115 | } | |
116 | ||
eb804533 | 117 | pfn = vmem_altmap_next_pfn(altmap); |
4b94ffdc | 118 | nr_pfns = size >> PAGE_SHIFT; |
eb804533 CH |
119 | nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); |
120 | nr_align = ALIGN(pfn, nr_align) - pfn; | |
121 | if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) | |
122 | return NULL; | |
123 | ||
124 | altmap->alloc += nr_pfns; | |
125 | altmap->align += nr_align; | |
126 | pfn += nr_align; | |
127 | ||
4b94ffdc DW |
128 | pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", |
129 | __func__, pfn, altmap->alloc, altmap->align, nr_pfns); | |
eb804533 | 130 | return __va(__pfn_to_phys(pfn)); |
4b94ffdc DW |
131 | } |
132 | ||
8f6aac41 CL |
133 | void __meminit vmemmap_verify(pte_t *pte, int node, |
134 | unsigned long start, unsigned long end) | |
135 | { | |
136 | unsigned long pfn = pte_pfn(*pte); | |
137 | int actual_node = early_pfn_to_nid(pfn); | |
138 | ||
b41ad14c | 139 | if (node_distance(actual_node, node) > LOCAL_DISTANCE) |
abd62377 | 140 | pr_warn_once("[%lx-%lx] potential offnode page_structs\n", |
1170532b | 141 | start, end - 1); |
8f6aac41 CL |
142 | } |
143 | ||
1d9cfee7 | 144 | pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, |
4917f55b JM |
145 | struct vmem_altmap *altmap, |
146 | struct page *reuse) | |
8f6aac41 | 147 | { |
29c71111 AW |
148 | pte_t *pte = pte_offset_kernel(pmd, addr); |
149 | if (pte_none(*pte)) { | |
150 | pte_t entry; | |
1d9cfee7 AK |
151 | void *p; |
152 | ||
4917f55b JM |
153 | if (!reuse) { |
154 | p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap); | |
155 | if (!p) | |
156 | return NULL; | |
157 | } else { | |
158 | /* | |
159 | * When a PTE/PMD entry is freed from the init_mm | |
f673bd7c | 160 | * there's a free_pages() call to this page allocated |
4917f55b JM |
161 | * above. Thus this get_page() is paired with the |
162 | * put_page_testzero() on the freeing path. | |
163 | * This can only called by certain ZONE_DEVICE path, | |
164 | * and through vmemmap_populate_compound_pages() when | |
165 | * slab is available. | |
166 | */ | |
167 | get_page(reuse); | |
168 | p = page_to_virt(reuse); | |
169 | } | |
29c71111 AW |
170 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |
171 | set_pte_at(&init_mm, addr, pte, entry); | |
172 | } | |
173 | return pte; | |
8f6aac41 CL |
174 | } |
175 | ||
f7f99100 PT |
176 | static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) |
177 | { | |
178 | void *p = vmemmap_alloc_block(size, node); | |
179 | ||
180 | if (!p) | |
181 | return NULL; | |
182 | memset(p, 0, size); | |
183 | ||
184 | return p; | |
185 | } | |
186 | ||
29c71111 | 187 | pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) |
8f6aac41 | 188 | { |
29c71111 AW |
189 | pmd_t *pmd = pmd_offset(pud, addr); |
190 | if (pmd_none(*pmd)) { | |
f7f99100 | 191 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
29c71111 | 192 | if (!p) |
9dce07f1 | 193 | return NULL; |
29c71111 | 194 | pmd_populate_kernel(&init_mm, pmd, p); |
8f6aac41 | 195 | } |
29c71111 | 196 | return pmd; |
8f6aac41 | 197 | } |
8f6aac41 | 198 | |
c2febafc | 199 | pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) |
8f6aac41 | 200 | { |
c2febafc | 201 | pud_t *pud = pud_offset(p4d, addr); |
29c71111 | 202 | if (pud_none(*pud)) { |
f7f99100 | 203 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
29c71111 | 204 | if (!p) |
9dce07f1 | 205 | return NULL; |
29c71111 AW |
206 | pud_populate(&init_mm, pud, p); |
207 | } | |
208 | return pud; | |
209 | } | |
8f6aac41 | 210 | |
c2febafc KS |
211 | p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) |
212 | { | |
213 | p4d_t *p4d = p4d_offset(pgd, addr); | |
214 | if (p4d_none(*p4d)) { | |
f7f99100 | 215 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
c2febafc KS |
216 | if (!p) |
217 | return NULL; | |
218 | p4d_populate(&init_mm, p4d, p); | |
219 | } | |
220 | return p4d; | |
221 | } | |
222 | ||
29c71111 AW |
223 | pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) |
224 | { | |
225 | pgd_t *pgd = pgd_offset_k(addr); | |
226 | if (pgd_none(*pgd)) { | |
f7f99100 | 227 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
29c71111 | 228 | if (!p) |
9dce07f1 | 229 | return NULL; |
29c71111 | 230 | pgd_populate(&init_mm, pgd, p); |
8f6aac41 | 231 | } |
29c71111 | 232 | return pgd; |
8f6aac41 CL |
233 | } |
234 | ||
2beea70a | 235 | static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node, |
4917f55b JM |
236 | struct vmem_altmap *altmap, |
237 | struct page *reuse) | |
8f6aac41 | 238 | { |
29c71111 | 239 | pgd_t *pgd; |
c2febafc | 240 | p4d_t *p4d; |
29c71111 AW |
241 | pud_t *pud; |
242 | pmd_t *pmd; | |
243 | pte_t *pte; | |
8f6aac41 | 244 | |
2beea70a JM |
245 | pgd = vmemmap_pgd_populate(addr, node); |
246 | if (!pgd) | |
247 | return NULL; | |
248 | p4d = vmemmap_p4d_populate(pgd, addr, node); | |
249 | if (!p4d) | |
250 | return NULL; | |
251 | pud = vmemmap_pud_populate(p4d, addr, node); | |
252 | if (!pud) | |
253 | return NULL; | |
254 | pmd = vmemmap_pmd_populate(pud, addr, node); | |
255 | if (!pmd) | |
256 | return NULL; | |
4917f55b | 257 | pte = vmemmap_pte_populate(pmd, addr, node, altmap, reuse); |
2beea70a JM |
258 | if (!pte) |
259 | return NULL; | |
260 | vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); | |
261 | ||
262 | return pte; | |
263 | } | |
264 | ||
265 | static int __meminit vmemmap_populate_range(unsigned long start, | |
266 | unsigned long end, int node, | |
4917f55b JM |
267 | struct vmem_altmap *altmap, |
268 | struct page *reuse) | |
2beea70a JM |
269 | { |
270 | unsigned long addr = start; | |
271 | pte_t *pte; | |
272 | ||
29c71111 | 273 | for (; addr < end; addr += PAGE_SIZE) { |
4917f55b | 274 | pte = vmemmap_populate_address(addr, node, altmap, reuse); |
29c71111 AW |
275 | if (!pte) |
276 | return -ENOMEM; | |
8f6aac41 | 277 | } |
29c71111 AW |
278 | |
279 | return 0; | |
8f6aac41 | 280 | } |
8f6aac41 | 281 | |
2beea70a JM |
282 | int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, |
283 | int node, struct vmem_altmap *altmap) | |
284 | { | |
4917f55b JM |
285 | return vmemmap_populate_range(start, end, node, altmap, NULL); |
286 | } | |
287 | ||
288 | /* | |
289 | * For compound pages bigger than section size (e.g. x86 1G compound | |
290 | * pages with 2M subsection size) fill the rest of sections as tail | |
291 | * pages. | |
292 | * | |
293 | * Note that memremap_pages() resets @nr_range value and will increment | |
294 | * it after each range successful onlining. Thus the value or @nr_range | |
295 | * at section memmap populate corresponds to the in-progress range | |
296 | * being onlined here. | |
297 | */ | |
298 | static bool __meminit reuse_compound_section(unsigned long start_pfn, | |
299 | struct dev_pagemap *pgmap) | |
300 | { | |
301 | unsigned long nr_pages = pgmap_vmemmap_nr(pgmap); | |
302 | unsigned long offset = start_pfn - | |
303 | PHYS_PFN(pgmap->ranges[pgmap->nr_range].start); | |
304 | ||
305 | return !IS_ALIGNED(offset, nr_pages) && nr_pages > PAGES_PER_SUBSECTION; | |
306 | } | |
307 | ||
308 | static pte_t * __meminit compound_section_tail_page(unsigned long addr) | |
309 | { | |
310 | pte_t *pte; | |
311 | ||
312 | addr -= PAGE_SIZE; | |
313 | ||
314 | /* | |
315 | * Assuming sections are populated sequentially, the previous section's | |
316 | * page data can be reused. | |
317 | */ | |
318 | pte = pte_offset_kernel(pmd_off_k(addr), addr); | |
319 | if (!pte) | |
320 | return NULL; | |
321 | ||
322 | return pte; | |
323 | } | |
324 | ||
325 | static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, | |
326 | unsigned long start, | |
327 | unsigned long end, int node, | |
328 | struct dev_pagemap *pgmap) | |
329 | { | |
330 | unsigned long size, addr; | |
331 | pte_t *pte; | |
332 | int rc; | |
333 | ||
334 | if (reuse_compound_section(start_pfn, pgmap)) { | |
335 | pte = compound_section_tail_page(start); | |
336 | if (!pte) | |
337 | return -ENOMEM; | |
338 | ||
339 | /* | |
340 | * Reuse the page that was populated in the prior iteration | |
341 | * with just tail struct pages. | |
342 | */ | |
343 | return vmemmap_populate_range(start, end, node, NULL, | |
344 | pte_page(*pte)); | |
345 | } | |
346 | ||
347 | size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page)); | |
348 | for (addr = start; addr < end; addr += size) { | |
55896f93 | 349 | unsigned long next, last = addr + size; |
4917f55b JM |
350 | |
351 | /* Populate the head page vmemmap page */ | |
352 | pte = vmemmap_populate_address(addr, node, NULL, NULL); | |
353 | if (!pte) | |
354 | return -ENOMEM; | |
355 | ||
356 | /* Populate the tail pages vmemmap page */ | |
357 | next = addr + PAGE_SIZE; | |
358 | pte = vmemmap_populate_address(next, node, NULL, NULL); | |
359 | if (!pte) | |
360 | return -ENOMEM; | |
361 | ||
362 | /* | |
363 | * Reuse the previous page for the rest of tail pages | |
ee65728e | 364 | * See layout diagram in Documentation/mm/vmemmap_dedup.rst |
4917f55b JM |
365 | */ |
366 | next += PAGE_SIZE; | |
367 | rc = vmemmap_populate_range(next, last, node, NULL, | |
368 | pte_page(*pte)); | |
369 | if (rc) | |
370 | return -ENOMEM; | |
371 | } | |
372 | ||
373 | return 0; | |
2beea70a JM |
374 | } |
375 | ||
e9c0a3f0 | 376 | struct page * __meminit __populate_section_memmap(unsigned long pfn, |
e3246d8f JM |
377 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap, |
378 | struct dev_pagemap *pgmap) | |
8f6aac41 | 379 | { |
6cda7204 WY |
380 | unsigned long start = (unsigned long) pfn_to_page(pfn); |
381 | unsigned long end = start + nr_pages * sizeof(struct page); | |
4917f55b | 382 | int r; |
6cda7204 WY |
383 | |
384 | if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) || | |
385 | !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION))) | |
386 | return NULL; | |
0aad818b | 387 | |
4917f55b JM |
388 | if (is_power_of_2(sizeof(struct page)) && |
389 | pgmap && pgmap_vmemmap_nr(pgmap) > 1 && !altmap) | |
390 | r = vmemmap_populate_compound_pages(pfn, start, end, nid, pgmap); | |
391 | else | |
392 | r = vmemmap_populate(start, end, nid, altmap); | |
393 | ||
394 | if (r < 0) | |
8f6aac41 CL |
395 | return NULL; |
396 | ||
e9c0a3f0 | 397 | return pfn_to_page(pfn); |
8f6aac41 | 398 | } |