powerpc/mm/hash64: Map all the kernel regions in the same 0xc range
[linux-2.6-block.git] / arch / powerpc / mm / mem.c
CommitLineData
14cf11af
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
14cf11af
PM
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 *
18 */
19
4b16f8e2 20#include <linux/export.h>
14cf11af
PM
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/errno.h>
24#include <linux/string.h>
5a0e3ad6 25#include <linux/gfp.h>
14cf11af
PM
26#include <linux/types.h>
27#include <linux/mm.h>
28#include <linux/stddef.h>
29#include <linux/init.h>
57c8a661 30#include <linux/memblock.h>
14cf11af
PM
31#include <linux/highmem.h>
32#include <linux/initrd.h>
33#include <linux/pagemap.h>
4e8ad3e8 34#include <linux/suspend.h>
0895ecda 35#include <linux/hugetlb.h>
c40dd2f7 36#include <linux/slab.h>
16d0f5c4 37#include <linux/vmalloc.h>
b584c254 38#include <linux/memremap.h>
14cf11af
PM
39
40#include <asm/pgalloc.h>
41#include <asm/prom.h>
42#include <asm/io.h>
43#include <asm/mmu_context.h>
44#include <asm/pgtable.h>
45#include <asm/mmu.h>
46#include <asm/smp.h>
47#include <asm/machdep.h>
48#include <asm/btext.h>
49#include <asm/tlb.h>
7c8c6b97 50#include <asm/sections.h>
db7f37de 51#include <asm/sparsemem.h>
ab1f9dac 52#include <asm/vdso.h>
2c419bde 53#include <asm/fixmap.h>
a9327296 54#include <asm/swiotlb.h>
8a3e3d31 55#include <asm/rtas.h>
14cf11af 56
14cf11af
PM
57#include "mmu_decl.h"
58
59#ifndef CPU_FTR_COHERENT_ICACHE
60#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
61#define CPU_FTR_NOEXECUTE 0
62#endif
63
a84fcd46 64unsigned long long memory_limit;
51c3c62b 65bool init_mem_is_free;
7c8c6b97 66
2c419bde
KG
67#ifdef CONFIG_HIGHMEM
68pte_t *kmap_pte;
8040bda3 69EXPORT_SYMBOL(kmap_pte);
2c419bde 70pgprot_t kmap_prot;
2c419bde 71EXPORT_SYMBOL(kmap_prot);
2c419bde
KG
72
73static inline pte_t *virt_to_kpte(unsigned long vaddr)
74{
75 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
76 vaddr), vaddr), vaddr);
77}
78#endif
79
8b150478 80pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
14cf11af
PM
81 unsigned long size, pgprot_t vma_prot)
82{
83 if (ppc_md.phys_mem_access_prot)
8b150478 84 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
14cf11af 85
8b150478 86 if (!page_is_ram(pfn))
64b3d0e8
BH
87 vma_prot = pgprot_noncached(vma_prot);
88
14cf11af
PM
89 return vma_prot;
90}
91EXPORT_SYMBOL(phys_mem_access_prot);
92
23fd0775
PM
93#ifdef CONFIG_MEMORY_HOTPLUG
94
bc02af93
YG
95#ifdef CONFIG_NUMA
96int memory_add_physaddr_to_nid(u64 start)
97{
98 return hot_add_scn_to_nid(start);
99}
100#endif
101
29ab6c47 102int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
fecbfabe
BH
103{
104 return -ENODEV;
105}
106
107int __weak remove_section_mapping(unsigned long start, unsigned long end)
108{
109 return -ENODEV;
110}
111
bde709a7 112int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
24e6d5a5 113 bool want_memblock)
23fd0775 114{
23fd0775
PM
115 unsigned long start_pfn = start >> PAGE_SHIFT;
116 unsigned long nr_pages = size >> PAGE_SHIFT;
1dace6c6 117 int rc;
23fd0775 118
438cc81a
DG
119 resize_hpt_for_hotplug(memblock_phys_mem_size());
120
2d0eee14 121 start = (unsigned long)__va(start);
29ab6c47 122 rc = create_section_mapping(start, start + size, nid);
1dace6c6 123 if (rc) {
f2c2cbcc 124 pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
1dace6c6
DG
125 start, start + size, rc);
126 return -EFAULT;
127 }
fb5924fd 128 flush_inval_dcache_range(start, start + size);
54b79248 129
24e6d5a5 130 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
23fd0775 131}
24d335ca
WC
132
133#ifdef CONFIG_MEMORY_HOTREMOVE
2c2a5af6
OS
134int __meminit arch_remove_memory(int nid, u64 start, u64 size,
135 struct vmem_altmap *altmap)
24d335ca
WC
136{
137 unsigned long start_pfn = start >> PAGE_SHIFT;
138 unsigned long nr_pages = size >> PAGE_SHIFT;
b584c254 139 struct page *page;
9ac8cde9 140 int ret;
24d335ca 141
b584c254
OH
142 /*
143 * If we have an altmap then we need to skip over any reserved PFNs
144 * when querying the zone.
145 */
146 page = pfn_to_page(start_pfn);
b584c254
OH
147 if (altmap)
148 page += vmem_altmap_offset(altmap);
149
da024512 150 ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
16d0f5c4
AB
151 if (ret)
152 return ret;
153
154 /* Remove htab bolted mappings for this section of memory */
155 start = (unsigned long)__va(start);
fb5924fd 156 flush_inval_dcache_range(start, start + size);
16d0f5c4
AB
157 ret = remove_section_mapping(start, start + size);
158
159 /* Ensure all vmalloc mappings are flushed in case they also
160 * hit that section of memory
161 */
162 vm_unmap_aliases();
9ac8cde9 163
f172acbf
LV
164 if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
165 pr_warn("Hash collision while resizing HPT\n");
438cc81a 166
9ac8cde9 167 return ret;
24d335ca
WC
168}
169#endif
0d579944 170#endif /* CONFIG_MEMORY_HOTPLUG */
a99824f3 171
7c8c6b97 172#ifndef CONFIG_NEED_MULTIPLE_NODES
9bd9be00 173void __init mem_topology_setup(void)
7c8c6b97 174{
95f72d1e 175 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
10239733 176 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
7c8c6b97 177#ifdef CONFIG_HIGHMEM
d7917ba7 178 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
7c8c6b97
PM
179#endif
180
4e8309ba
CS
181 /* Place all memblock_regions in the same node and merge contiguous
182 * memblock_regions
183 */
d7dc899a 184 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
9bd9be00 185}
c67c3cb4 186
9bd9be00
NP
187void __init initmem_init(void)
188{
7c8c6b97 189 /* XXX need to clip this if using highmem? */
c67c3cb4 190 sparse_memory_present_with_active_regions(0);
21098b9e 191 sparse_init();
7c8c6b97
PM
192}
193
4e8ad3e8
JB
194/* mark pages that don't exist as nosave */
195static int __init mark_nonram_nosave(void)
196{
28be7072
BH
197 struct memblock_region *reg, *prev = NULL;
198
199 for_each_memblock(memory, reg) {
200 if (prev &&
c7fc2de0
YL
201 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
202 register_nosave_region(memblock_region_memory_end_pfn(prev),
203 memblock_region_memory_base_pfn(reg));
28be7072 204 prev = reg;
4e8ad3e8 205 }
4e8ad3e8
JB
206 return 0;
207}
6db35ad2
SW
208#else /* CONFIG_NEED_MULTIPLE_NODES */
209static int __init mark_nonram_nosave(void)
210{
211 return 0;
212}
213#endif
4e8ad3e8 214
1c98025c 215/*
25078dc1
CH
216 * Zones usage:
217 *
218 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
219 * everything else. GFP_DMA32 page allocations automatically fall back to
220 * ZONE_DMA.
221 *
222 * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to
223 * inform the generic DMA mapping code. 32-bit only devices (if not handled
224 * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get
225 * otherwise served by ZONE_DMA.
1c98025c 226 */
25078dc1 227static unsigned long max_zone_pfns[MAX_NR_ZONES];
1c98025c 228
7c8c6b97
PM
229/*
230 * paging_init() sets up the page tables - in fact we've already done this.
231 */
232void __init paging_init(void)
233{
f7ba2991 234 unsigned long long total_ram = memblock_phys_mem_size();
95f72d1e 235 phys_addr_t top_of_ram = memblock_end_of_DRAM();
7c8c6b97 236
2c419bde
KG
237#ifdef CONFIG_PPC32
238 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
239 unsigned long end = __fix_to_virt(FIX_HOLE);
240
241 for (; v < end; v += PAGE_SIZE)
c766ee72 242 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
2c419bde
KG
243#endif
244
7c8c6b97 245#ifdef CONFIG_HIGHMEM
c766ee72 246 map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
2c419bde
KG
247 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
248
249 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
7c8c6b97
PM
250 kmap_prot = PAGE_KERNEL;
251#endif /* CONFIG_HIGHMEM */
252
f7ba2991 253 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
fb610635 254 (unsigned long long)top_of_ram, total_ram);
e110b281 255 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
2bf3016f 256 (long int)((top_of_ram - total_ram) >> 20));
1c98025c 257
25078dc1
CH
258#ifdef CONFIG_ZONE_DMA
259 max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 0x7fffffffUL >> PAGE_SHIFT);
260#endif
261 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
7c8c6b97 262#ifdef CONFIG_HIGHMEM
25078dc1 263 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
c67c3cb4 264#endif
25078dc1 265
c67c3cb4 266 free_area_init_nodes(max_zone_pfns);
4e8ad3e8
JB
267
268 mark_nonram_nosave();
7c8c6b97 269}
7c8c6b97
PM
270
271void __init mem_init(void)
272{
28efc35f
SW
273 /*
274 * book3s is limited to 16 page sizes due to encoding this in
275 * a 4-bit field for slices.
276 */
277 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
278
a9327296 279#ifdef CONFIG_SWIOTLB
688ba1db 280 swiotlb_init(0);
a9327296
FT
281#endif
282
7c8c6b97 283 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
602ddc70 284 set_max_mapnr(max_pfn);
c6ffc5ca 285 memblock_free_all();
7c8c6b97
PM
286
287#ifdef CONFIG_HIGHMEM
288 {
289 unsigned long pfn, highmem_mapnr;
290
d7917ba7 291 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
7c8c6b97 292 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
3d41e0f6 293 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
7c8c6b97 294 struct page *page = pfn_to_page(pfn);
369a9d85
JL
295 if (!memblock_is_reserved(paddr))
296 free_highmem_page(page);
7c8c6b97 297 }
7c8c6b97
PM
298 }
299#endif /* CONFIG_HIGHMEM */
300
3160b097
BB
301#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
302 /*
303 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
304 * functions.... do it here for the non-smp case.
305 */
306 per_cpu(next_tlbcam_idx, smp_processor_id()) =
307 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
308#endif
309
369a9d85 310 mem_init_print_info(NULL);
f637a49e
BH
311#ifdef CONFIG_PPC32
312 pr_info("Kernel virtual memory layout:\n");
313 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
314#ifdef CONFIG_HIGHMEM
315 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
316 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
317#endif /* CONFIG_HIGHMEM */
8b31e49d
BH
318#ifdef CONFIG_NOT_COHERENT_CACHE
319 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
320 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
321#endif /* CONFIG_NOT_COHERENT_CACHE */
f637a49e
BH
322 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
323 ioremap_bot, IOREMAP_TOP);
324 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
325 VMALLOC_START, VMALLOC_END);
326#endif /* CONFIG_PPC32 */
7c8c6b97
PM
327}
328
2773fcc8
DC
329void free_initmem(void)
330{
a9c0f41b 331 ppc_md.progress = ppc_printk_progress;
029d9252 332 mark_initmem_nx();
51c3c62b 333 init_mem_is_free = true;
5d585e5c 334 free_initmem_default(POISON_FREE_INITMEM);
2773fcc8
DC
335}
336
307cfe71
BH
337#ifdef CONFIG_BLK_DEV_INITRD
338void __init free_initrd_mem(unsigned long start, unsigned long end)
339{
dbe67df4 340 free_reserved_area((void *)start, (void *)end, -1, "initrd");
307cfe71
BH
341}
342#endif
343
14cf11af
PM
344/*
345 * This is called when a page has been modified by the kernel.
346 * It just marks the page as not i-cache clean. We do the i-cache
347 * flush later when the page is given to a user process, if necessary.
348 */
349void flush_dcache_page(struct page *page)
350{
351 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
352 return;
353 /* avoid an atomic op if possible */
354 if (test_bit(PG_arch_1, &page->flags))
355 clear_bit(PG_arch_1, &page->flags);
356}
357EXPORT_SYMBOL(flush_dcache_page);
358
359void flush_dcache_icache_page(struct page *page)
360{
0895ecda
DG
361#ifdef CONFIG_HUGETLB_PAGE
362 if (PageCompound(page)) {
363 flush_dcache_icache_hugepage(page);
364 return;
365 }
366#endif
968159c0 367#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
2f7d2b74
SW
368 /* On 8xx there is no need to kmap since highmem is not supported */
369 __flush_dcache_icache(page_address(page));
370#else
371 if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
2480b208 372 void *start = kmap_atomic(page);
0895ecda 373 __flush_dcache_icache(start);
2480b208 374 kunmap_atomic(start);
2f7d2b74
SW
375 } else {
376 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
0895ecda 377 }
14cf11af 378#endif
14cf11af 379}
249ba1ee 380EXPORT_SYMBOL(flush_dcache_icache_page);
0895ecda 381
14cf11af
PM
382void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
383{
384 clear_page(page);
385
14cf11af 386 /*
25985edc 387 * We shouldn't have to do this, but some versions of glibc
14cf11af
PM
388 * require it (ld.so assumes zero filled pages are icache clean)
389 * - Anton
390 */
09f5dc44 391 flush_dcache_page(pg);
14cf11af
PM
392}
393EXPORT_SYMBOL(clear_user_page);
394
395void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
396 struct page *pg)
397{
398 copy_page(vto, vfrom);
399
400 /*
401 * We should be able to use the following optimisation, however
402 * there are two problems.
403 * Firstly a bug in some versions of binutils meant PLT sections
404 * were not marked executable.
405 * Secondly the first word in the GOT section is blrl, used
406 * to establish the GOT address. Until recently the GOT was
407 * not marked executable.
408 * - Anton
409 */
410#if 0
411 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
412 return;
413#endif
414
09f5dc44 415 flush_dcache_page(pg);
14cf11af
PM
416}
417
418void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
419 unsigned long addr, int len)
420{
421 unsigned long maddr;
422
423 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
424 flush_icache_range(maddr, maddr + len);
425 kunmap(page);
426}
427EXPORT_SYMBOL(flush_icache_user_range);
428
429/*
430 * This is called at the end of handling a user page fault, when the
431 * fault has been handled by updating a PTE in the linux page tables.
432 * We use it to preload an HPTE into the hash table corresponding to
433 * the updated linux PTE.
434 *
01edcd89 435 * This must always be called with the pte lock held.
14cf11af
PM
436 */
437void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
4b3073e1 438 pte_t *ptep)
14cf11af 439{
5b3e84fc 440#ifdef CONFIG_PPC_BOOK3S
0ac52dd7
AK
441 /*
442 * We don't need to worry about _PAGE_PRESENT here because we are
443 * called with either mm->page_table_lock held or ptl lock held
444 */
34eb138e
CL
445 unsigned long trap;
446 bool is_exec;
171cb719 447
68662f85
NP
448 if (radix_enabled()) {
449 prefetch((void *)address);
a3dece6d 450 return;
68662f85 451 }
14cf11af 452
14cf11af 453 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
4b3073e1 454 if (!pte_young(*ptep) || address >= TASK_SIZE)
14cf11af 455 return;
14cf11af 456
3c726f8d
BH
457 /* We try to figure out if we are coming from an instruction
458 * access fault and pass that down to __hash_page so we avoid
459 * double-faulting on execution of fresh text. We have to test
460 * for regs NULL since init will get here first thing at boot
461 *
462 * We also avoid filling the hash if not coming from a fault
463 */
171cb719
GS
464
465 trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
466 switch (trap) {
467 case 0x300:
34eb138e 468 is_exec = false;
171cb719
GS
469 break;
470 case 0x400:
34eb138e 471 is_exec = true;
171cb719
GS
472 break;
473 default:
3c726f8d 474 return;
171cb719
GS
475 }
476
34eb138e 477 hash_preload(vma->vm_mm, address, is_exec, trap);
5b3e84fc 478#endif /* CONFIG_PPC_BOOK3S */
41151e77
BB
479#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
480 && defined(CONFIG_HUGETLB_PAGE)
481 if (is_vm_hugetlb_page(vma))
d93e4d7d 482 book3e_hugetlb_preload(vma, address, *ptep);
41151e77 483#endif
14cf11af 484}
c40dd2f7
AB
485
486/*
487 * System memory should not be in /proc/iomem but various tools expect it
488 * (eg kdump).
489 */
4f770924 490static int __init add_system_ram_resources(void)
c40dd2f7
AB
491{
492 struct memblock_region *reg;
493
494 for_each_memblock(memory, reg) {
495 struct resource *res;
496 unsigned long base = reg->base;
497 unsigned long size = reg->size;
498
499 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
500 WARN_ON(!res);
501
502 if (res) {
503 res->name = "System RAM";
504 res->start = base;
505 res->end = base + size - 1;
35d98e93 506 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
c40dd2f7
AB
507 WARN_ON(request_resource(&iomem_resource, res) < 0);
508 }
509 }
510
511 return 0;
512}
513subsys_initcall(add_system_ram_resources);
1d54cf2b 514
515#ifdef CONFIG_STRICT_DEVMEM
516/*
517 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
518 * is valid. The argument is a physical page number.
519 *
520 * Access has to be given to non-kernel-ram areas as well, these contain the
521 * PCI mmio resources as well as potential bios/acpi data regions.
522 */
523int devmem_is_allowed(unsigned long pfn)
524{
e256caa7
VH
525 if (page_is_rtas_user_buf(pfn))
526 return 1;
6c0cc627 527 if (iomem_is_exclusive(PFN_PHYS(pfn)))
1d54cf2b 528 return 0;
529 if (!page_is_ram(pfn))
530 return 1;
531 return 0;
532}
533#endif /* CONFIG_STRICT_DEVMEM */
26b52335
CL
534
535/*
536 * This is defined in kernel/resource.c but only powerpc needs to export it, for
537 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
538 */
539EXPORT_SYMBOL_GPL(walk_system_ram_range);