Commit | Line | Data |
---|---|---|
14cf11af PM |
1 | /* |
2 | * PowerPC version | |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * | |
5 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | |
6 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
7 | * Copyright (C) 1996 Paul Mackerras | |
14cf11af PM |
8 | * |
9 | * Derived from "arch/i386/mm/init.c" | |
10 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
11 | * | |
12 | * Dave Engebretsen <engebret@us.ibm.com> | |
13 | * Rework for PPC64 port. | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU General Public License | |
17 | * as published by the Free Software Foundation; either version | |
18 | * 2 of the License, or (at your option) any later version. | |
19 | * | |
20 | */ | |
21 | ||
cec08e7a BH |
22 | #undef DEBUG |
23 | ||
14cf11af PM |
24 | #include <linux/signal.h> |
25 | #include <linux/sched.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/errno.h> | |
28 | #include <linux/string.h> | |
29 | #include <linux/types.h> | |
30 | #include <linux/mman.h> | |
31 | #include <linux/mm.h> | |
32 | #include <linux/swap.h> | |
33 | #include <linux/stddef.h> | |
34 | #include <linux/vmalloc.h> | |
35 | #include <linux/init.h> | |
36 | #include <linux/delay.h> | |
14cf11af PM |
37 | #include <linux/highmem.h> |
38 | #include <linux/idr.h> | |
39 | #include <linux/nodemask.h> | |
40 | #include <linux/module.h> | |
c9cf5528 | 41 | #include <linux/poison.h> |
95f72d1e | 42 | #include <linux/memblock.h> |
a4fe3ce7 | 43 | #include <linux/hugetlb.h> |
5a0e3ad6 | 44 | #include <linux/slab.h> |
18569c1f PM |
45 | #include <linux/of_fdt.h> |
46 | #include <linux/libfdt.h> | |
b584c254 | 47 | #include <linux/memremap.h> |
14cf11af PM |
48 | |
49 | #include <asm/pgalloc.h> | |
50 | #include <asm/page.h> | |
51 | #include <asm/prom.h> | |
14cf11af PM |
52 | #include <asm/rtas.h> |
53 | #include <asm/io.h> | |
54 | #include <asm/mmu_context.h> | |
55 | #include <asm/pgtable.h> | |
56 | #include <asm/mmu.h> | |
7c0f6ba6 | 57 | #include <linux/uaccess.h> |
14cf11af PM |
58 | #include <asm/smp.h> |
59 | #include <asm/machdep.h> | |
60 | #include <asm/tlb.h> | |
61 | #include <asm/eeh.h> | |
62 | #include <asm/processor.h> | |
63 | #include <asm/mmzone.h> | |
64 | #include <asm/cputable.h> | |
14cf11af | 65 | #include <asm/sections.h> |
14cf11af | 66 | #include <asm/iommu.h> |
14cf11af | 67 | #include <asm/vdso.h> |
800fc3ee DG |
68 | |
69 | #include "mmu_decl.h" | |
14cf11af | 70 | |
37dd2bad | 71 | phys_addr_t memstart_addr = ~0; |
79c3095f | 72 | EXPORT_SYMBOL_GPL(memstart_addr); |
37dd2bad | 73 | phys_addr_t kernstart_addr; |
79c3095f | 74 | EXPORT_SYMBOL_GPL(kernstart_addr); |
d7917ba7 | 75 | |
d29eff7b AW |
76 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
77 | /* | |
78 | * Given an address within the vmemmap, determine the pfn of the page that | |
79 | * represents the start of the section it is within. Note that we have to | |
80 | * do this by hand as the proffered address may not be correctly aligned. | |
81 | * Subtraction of non-aligned pointers produces undefined results. | |
82 | */ | |
09de9ff8 | 83 | static unsigned long __meminit vmemmap_section_start(unsigned long page) |
d29eff7b AW |
84 | { |
85 | unsigned long offset = page - ((unsigned long)(vmemmap)); | |
86 | ||
87 | /* Return the pfn of the start of the section. */ | |
88 | return (offset / sizeof(struct page)) & PAGE_SECTION_MASK; | |
89 | } | |
90 | ||
91 | /* | |
92 | * Check if this vmemmap page is already initialised. If any section | |
93 | * which overlaps this vmemmap page is initialised then this page is | |
94 | * initialised already. | |
95 | */ | |
09de9ff8 | 96 | static int __meminit vmemmap_populated(unsigned long start, int page_size) |
d29eff7b AW |
97 | { |
98 | unsigned long end = start + page_size; | |
16a05bff | 99 | start = (unsigned long)(pfn_to_page(vmemmap_section_start(start))); |
d29eff7b AW |
100 | |
101 | for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) | |
16a05bff | 102 | if (pfn_valid(page_to_pfn((struct page *)start))) |
d29eff7b AW |
103 | return 1; |
104 | ||
105 | return 0; | |
106 | } | |
107 | ||
39e46751 AK |
108 | /* |
109 | * vmemmap virtual address space management does not have a traditonal page | |
110 | * table to track which virtual struct pages are backed by physical mapping. | |
111 | * The virtual to physical mappings are tracked in a simple linked list | |
112 | * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at | |
113 | * all times where as the 'next' list maintains the available | |
114 | * vmemmap_backing structures which have been deleted from the | |
115 | * 'vmemmap_global' list during system runtime (memory hotplug remove | |
116 | * operation). The freed 'vmemmap_backing' structures are reused later when | |
117 | * new requests come in without allocating fresh memory. This pointer also | |
118 | * tracks the allocated 'vmemmap_backing' structures as we allocate one | |
119 | * full page memory at a time when we dont have any. | |
120 | */ | |
91eea67c | 121 | struct vmemmap_backing *vmemmap_list; |
bd8cb03d | 122 | static struct vmemmap_backing *next; |
39e46751 AK |
123 | |
124 | /* | |
125 | * The same pointer 'next' tracks individual chunks inside the allocated | |
126 | * full page during the boot time and again tracks the freeed nodes during | |
127 | * runtime. It is racy but it does not happen as they are separated by the | |
128 | * boot process. Will create problem if some how we have memory hotplug | |
129 | * operation during boot !! | |
130 | */ | |
bd8cb03d LZ |
131 | static int num_left; |
132 | static int num_freed; | |
91eea67c MN |
133 | |
134 | static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) | |
135 | { | |
bd8cb03d LZ |
136 | struct vmemmap_backing *vmem_back; |
137 | /* get from freed entries first */ | |
138 | if (num_freed) { | |
139 | num_freed--; | |
140 | vmem_back = next; | |
141 | next = next->list; | |
142 | ||
143 | return vmem_back; | |
144 | } | |
91eea67c MN |
145 | |
146 | /* allocate a page when required and hand out chunks */ | |
bd8cb03d | 147 | if (!num_left) { |
91eea67c MN |
148 | next = vmemmap_alloc_block(PAGE_SIZE, node); |
149 | if (unlikely(!next)) { | |
150 | WARN_ON(1); | |
151 | return NULL; | |
152 | } | |
153 | num_left = PAGE_SIZE / sizeof(struct vmemmap_backing); | |
154 | } | |
155 | ||
156 | num_left--; | |
157 | ||
158 | return next++; | |
159 | } | |
160 | ||
161 | static __meminit void vmemmap_list_populate(unsigned long phys, | |
162 | unsigned long start, | |
163 | int node) | |
164 | { | |
165 | struct vmemmap_backing *vmem_back; | |
166 | ||
167 | vmem_back = vmemmap_list_alloc(node); | |
168 | if (unlikely(!vmem_back)) { | |
169 | WARN_ON(1); | |
170 | return; | |
171 | } | |
172 | ||
173 | vmem_back->phys = phys; | |
174 | vmem_back->virt_addr = start; | |
175 | vmem_back->list = vmemmap_list; | |
176 | ||
177 | vmemmap_list = vmem_back; | |
178 | } | |
179 | ||
7b73d978 CH |
180 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |
181 | struct vmem_altmap *altmap) | |
71b0bfe4 LZ |
182 | { |
183 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; | |
184 | ||
185 | /* Align to the page size of the linear mapping. */ | |
186 | start = _ALIGN_DOWN(start, page_size); | |
187 | ||
188 | pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); | |
189 | ||
190 | for (; start < end; start += page_size) { | |
9ef34630 | 191 | void *p = NULL; |
1dace6c6 | 192 | int rc; |
71b0bfe4 LZ |
193 | |
194 | if (vmemmap_populated(start, page_size)) | |
195 | continue; | |
196 | ||
9ef34630 OH |
197 | /* |
198 | * Allocate from the altmap first if we have one. This may | |
199 | * fail due to alignment issues when using 16MB hugepages, so | |
200 | * fall back to system memory if the altmap allocation fail. | |
201 | */ | |
a8fc357b CH |
202 | if (altmap) |
203 | p = altmap_alloc_block_buf(page_size, altmap); | |
9ef34630 | 204 | if (!p) |
a8fc357b | 205 | p = vmemmap_alloc_block_buf(page_size, node); |
71b0bfe4 LZ |
206 | if (!p) |
207 | return -ENOMEM; | |
208 | ||
209 | vmemmap_list_populate(__pa(p), start, node); | |
210 | ||
211 | pr_debug(" * %016lx..%016lx allocated at %p\n", | |
212 | start, start + page_size, p); | |
213 | ||
1dace6c6 DG |
214 | rc = vmemmap_create_mapping(start, page_size, __pa(p)); |
215 | if (rc < 0) { | |
f2c2cbcc JP |
216 | pr_warn("%s: Unable to create vmemmap mapping: %d\n", |
217 | __func__, rc); | |
1dace6c6 DG |
218 | return -EFAULT; |
219 | } | |
71b0bfe4 LZ |
220 | } |
221 | ||
222 | return 0; | |
223 | } | |
224 | ||
225 | #ifdef CONFIG_MEMORY_HOTPLUG | |
bd8cb03d LZ |
226 | static unsigned long vmemmap_list_free(unsigned long start) |
227 | { | |
228 | struct vmemmap_backing *vmem_back, *vmem_back_prev; | |
229 | ||
230 | vmem_back_prev = vmem_back = vmemmap_list; | |
231 | ||
232 | /* look for it with prev pointer recorded */ | |
233 | for (; vmem_back; vmem_back = vmem_back->list) { | |
234 | if (vmem_back->virt_addr == start) | |
235 | break; | |
236 | vmem_back_prev = vmem_back; | |
237 | } | |
238 | ||
239 | if (unlikely(!vmem_back)) { | |
240 | WARN_ON(1); | |
241 | return 0; | |
242 | } | |
243 | ||
244 | /* remove it from vmemmap_list */ | |
245 | if (vmem_back == vmemmap_list) /* remove head */ | |
246 | vmemmap_list = vmem_back->list; | |
247 | else | |
248 | vmem_back_prev->list = vmem_back->list; | |
249 | ||
250 | /* next point to this freed entry */ | |
251 | vmem_back->list = next; | |
252 | next = vmem_back; | |
253 | num_freed++; | |
254 | ||
255 | return vmem_back->phys; | |
256 | } | |
257 | ||
24b6d416 CH |
258 | void __ref vmemmap_free(unsigned long start, unsigned long end, |
259 | struct vmem_altmap *altmap) | |
d29eff7b | 260 | { |
cec08e7a | 261 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; |
d7d9b612 | 262 | unsigned long page_order = get_order(page_size); |
9ef34630 OH |
263 | unsigned long alt_start = ~0, alt_end = ~0; |
264 | unsigned long base_pfn; | |
d29eff7b | 265 | |
d29eff7b | 266 | start = _ALIGN_DOWN(start, page_size); |
9ef34630 OH |
267 | if (altmap) { |
268 | alt_start = altmap->base_pfn; | |
269 | alt_end = altmap->base_pfn + altmap->reserve + | |
270 | altmap->free + altmap->alloc + altmap->align; | |
271 | } | |
d29eff7b | 272 | |
71b0bfe4 | 273 | pr_debug("vmemmap_free %lx...%lx\n", start, end); |
32a74949 | 274 | |
d29eff7b | 275 | for (; start < end; start += page_size) { |
d7d9b612 OH |
276 | unsigned long nr_pages, addr; |
277 | struct page *page; | |
d29eff7b | 278 | |
71b0bfe4 LZ |
279 | /* |
280 | * the section has already be marked as invalid, so | |
281 | * vmemmap_populated() true means some other sections still | |
282 | * in this page, so skip it. | |
283 | */ | |
d29eff7b AW |
284 | if (vmemmap_populated(start, page_size)) |
285 | continue; | |
286 | ||
71b0bfe4 | 287 | addr = vmemmap_list_free(start); |
d7d9b612 OH |
288 | if (!addr) |
289 | continue; | |
290 | ||
291 | page = pfn_to_page(addr >> PAGE_SHIFT); | |
292 | nr_pages = 1 << page_order; | |
9ef34630 | 293 | base_pfn = PHYS_PFN(addr); |
d7d9b612 | 294 | |
9ef34630 | 295 | if (base_pfn >= alt_start && base_pfn < alt_end) { |
b584c254 OH |
296 | vmem_altmap_free(altmap, nr_pages); |
297 | } else if (PageReserved(page)) { | |
d7d9b612 OH |
298 | /* allocated from bootmem */ |
299 | if (page_size < PAGE_SIZE) { | |
300 | /* | |
301 | * this shouldn't happen, but if it is | |
302 | * the case, leave the memory there | |
303 | */ | |
304 | WARN_ON_ONCE(1); | |
305 | } else { | |
306 | while (nr_pages--) | |
307 | free_reserved_page(page++); | |
308 | } | |
309 | } else { | |
310 | free_pages((unsigned long)(__va(addr)), page_order); | |
71b0bfe4 | 311 | } |
d7d9b612 OH |
312 | |
313 | vmemmap_remove_mapping(start, page_size); | |
d29eff7b | 314 | } |
0197518c | 315 | } |
71b0bfe4 | 316 | #endif |
f7e3334a NF |
317 | void register_page_bootmem_memmap(unsigned long section_nr, |
318 | struct page *start_page, unsigned long size) | |
319 | { | |
320 | } | |
cd3db0c4 | 321 | |
7e7dc66a | 322 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
1a01dc87 | 323 | |
4e003747 | 324 | #ifdef CONFIG_PPC_BOOK3S_64 |
1fd6c022 ME |
325 | static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT); |
326 | ||
c610ec60 ME |
327 | static int __init parse_disable_radix(char *p) |
328 | { | |
1fd6c022 ME |
329 | bool val; |
330 | ||
cec4e9b2 | 331 | if (!p) |
1fd6c022 ME |
332 | val = true; |
333 | else if (kstrtobool(p, &val)) | |
334 | return -EINVAL; | |
335 | ||
336 | disable_radix = val; | |
337 | ||
c610ec60 ME |
338 | return 0; |
339 | } | |
340 | early_param("disable_radix", parse_disable_radix); | |
341 | ||
18569c1f | 342 | /* |
cc3d2940 PM |
343 | * If we're running under a hypervisor, we need to check the contents of |
344 | * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do | |
345 | * radix. If not, we clear the radix feature bit so we fall back to hash. | |
18569c1f | 346 | */ |
7559952e | 347 | static void __init early_check_vec5(void) |
18569c1f PM |
348 | { |
349 | unsigned long root, chosen; | |
350 | int size; | |
351 | const u8 *vec5; | |
014d02cb | 352 | u8 mmu_supported; |
18569c1f PM |
353 | |
354 | root = of_get_flat_dt_root(); | |
355 | chosen = of_get_flat_dt_subnode_by_name(root, "chosen"); | |
014d02cb SJS |
356 | if (chosen == -FDT_ERR_NOTFOUND) { |
357 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; | |
18569c1f | 358 | return; |
014d02cb | 359 | } |
18569c1f | 360 | vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size); |
014d02cb SJS |
361 | if (!vec5) { |
362 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; | |
18569c1f | 363 | return; |
014d02cb SJS |
364 | } |
365 | if (size <= OV5_INDX(OV5_MMU_SUPPORT)) { | |
cc3d2940 | 366 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; |
014d02cb SJS |
367 | return; |
368 | } | |
369 | ||
370 | /* Check for supported configuration */ | |
371 | mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] & | |
372 | OV5_FEAT(OV5_MMU_SUPPORT); | |
373 | if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) { | |
374 | /* Hypervisor only supports radix - check enabled && GTSE */ | |
375 | if (!early_radix_enabled()) { | |
376 | pr_warn("WARNING: Ignoring cmdline option disable_radix\n"); | |
377 | } | |
378 | if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] & | |
379 | OV5_FEAT(OV5_RADIX_GTSE))) { | |
380 | pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n"); | |
381 | } | |
382 | /* Do radix anyway - the hypervisor said we had to */ | |
383 | cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX; | |
384 | } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) { | |
385 | /* Hypervisor only supports hash - disable radix */ | |
386 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; | |
387 | } | |
18569c1f PM |
388 | } |
389 | ||
1a01dc87 ME |
390 | void __init mmu_early_init_devtree(void) |
391 | { | |
c610ec60 | 392 | /* Disable radix mode based on kernel command line. */ |
fc36a903 | 393 | if (disable_radix) |
5a25b6f5 | 394 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; |
bacf9cf8 | 395 | |
18569c1f PM |
396 | /* |
397 | * Check /chosen/ibm,architecture-vec-5 if running as a guest. | |
398 | * When running bare-metal, we can use radix if we like | |
399 | * even though the ibm,architecture-vec-5 property created by | |
400 | * skiboot doesn't have the necessary bits set. | |
401 | */ | |
014d02cb | 402 | if (!(mfmsr() & MSR_HV)) |
18569c1f PM |
403 | early_check_vec5(); |
404 | ||
b8f1b4f8 | 405 | if (early_radix_enabled()) |
2537b09c ME |
406 | radix__early_init_devtree(); |
407 | else | |
bacf9cf8 | 408 | hash__early_init_devtree(); |
1a01dc87 | 409 | } |
4e003747 | 410 | #endif /* CONFIG_PPC_BOOK3S_64 */ |