powerpc/mm: Add support for SPARSEMEM_VMEMMAP on 64-bit Book3E
[linux-2.6-block.git] / arch / powerpc / mm / init_64.c
CommitLineData
14cf11af
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
14cf11af
PM
8 *
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
11 *
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
cec08e7a
BH
22#undef DEBUG
23
14cf11af
PM
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/bootmem.h>
38#include <linux/highmem.h>
39#include <linux/idr.h>
40#include <linux/nodemask.h>
41#include <linux/module.h>
c9cf5528 42#include <linux/poison.h>
d9b2b2a2 43#include <linux/lmb.h>
14cf11af
PM
44
45#include <asm/pgalloc.h>
46#include <asm/page.h>
47#include <asm/prom.h>
14cf11af
PM
48#include <asm/rtas.h>
49#include <asm/io.h>
50#include <asm/mmu_context.h>
51#include <asm/pgtable.h>
52#include <asm/mmu.h>
53#include <asm/uaccess.h>
54#include <asm/smp.h>
55#include <asm/machdep.h>
56#include <asm/tlb.h>
57#include <asm/eeh.h>
58#include <asm/processor.h>
59#include <asm/mmzone.h>
60#include <asm/cputable.h>
14cf11af
PM
61#include <asm/sections.h>
62#include <asm/system.h>
63#include <asm/iommu.h>
64#include <asm/abs_addr.h>
65#include <asm/vdso.h>
800fc3ee
DG
66
67#include "mmu_decl.h"
14cf11af 68
94491685 69#ifdef CONFIG_PPC_STD_MMU_64
14cf11af
PM
70#if PGTABLE_RANGE > USER_VSID_RANGE
71#warning Limited user VSID range means pagetable space is wasted
72#endif
73
74#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
75#warning TASK_SIZE is smaller than it needs to be.
76#endif
94491685 77#endif /* CONFIG_PPC_STD_MMU_64 */
14cf11af 78
37dd2bad
KG
79phys_addr_t memstart_addr = ~0;
80phys_addr_t kernstart_addr;
d7917ba7 81
14cf11af
PM
82void free_initmem(void)
83{
84 unsigned long addr;
85
86 addr = (unsigned long)__init_begin;
87 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
c9cf5528 88 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
14cf11af 89 ClearPageReserved(virt_to_page(addr));
7835e98b 90 init_page_count(virt_to_page(addr));
14cf11af
PM
91 free_page(addr);
92 totalram_pages++;
93 }
94 printk ("Freeing unused kernel memory: %luk freed\n",
95 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
96}
97
98#ifdef CONFIG_BLK_DEV_INITRD
99void free_initrd_mem(unsigned long start, unsigned long end)
100{
101 if (start < end)
102 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
103 for (; start < end; start += PAGE_SIZE) {
104 ClearPageReserved(virt_to_page(start));
7835e98b 105 init_page_count(virt_to_page(start));
14cf11af
PM
106 free_page(start);
107 totalram_pages++;
108 }
109}
110#endif
111
df174e3b 112#ifdef CONFIG_PROC_KCORE
14cf11af
PM
113static struct kcore_list kcore_vmem;
114
115static int __init setup_kcore(void)
116{
117 int i;
118
119 for (i=0; i < lmb.memory.cnt; i++) {
120 unsigned long base, size;
121 struct kcore_list *kcore_mem;
122
123 base = lmb.memory.region[i].base;
124 size = lmb.memory.region[i].size;
125
126 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
127 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
128 if (!kcore_mem)
e48b1b45 129 panic("%s: kmalloc failed\n", __func__);
14cf11af
PM
130
131 kclist_add(kcore_mem, __va(base), size);
132 }
133
134 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
135
136 return 0;
137}
138module_init(setup_kcore);
df174e3b 139#endif
14cf11af 140
51cc5068 141static void pgd_ctor(void *addr)
14cf11af 142{
51cc5068
AD
143 memset(addr, 0, PGD_TABLE_SIZE);
144}
145
146static void pmd_ctor(void *addr)
147{
148 memset(addr, 0, PMD_TABLE_SIZE);
14cf11af
PM
149}
150
87655ff2 151static const unsigned int pgtable_cache_size[2] = {
517e2263 152 PGD_TABLE_SIZE, PMD_TABLE_SIZE
14cf11af
PM
153};
154static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
517e2263
HD
155#ifdef CONFIG_PPC_64K_PAGES
156 "pgd_cache", "pmd_cache",
157#else
158 "pgd_cache", "pud_pmd_cache",
3c726f8d 159#endif /* CONFIG_PPC_64K_PAGES */
517e2263 160};
14cf11af 161
f10a04c0 162#ifdef CONFIG_HUGETLB_PAGE
0d9ea754
JT
163/* Hugepages need an extra cache per hugepagesize, initialized in
164 * hugetlbpage.c. We can't put into the tables above, because HPAGE_SHIFT
165 * is not compile time constant. */
166struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+MMU_PAGE_COUNT];
f10a04c0 167#else
e18b890b 168struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
f10a04c0 169#endif
14cf11af
PM
170
171void pgtable_cache_init(void)
172{
51cc5068
AD
173 pgtable_cache[0] = kmem_cache_create(pgtable_cache_name[0], PGD_TABLE_SIZE, PGD_TABLE_SIZE, SLAB_PANIC, pgd_ctor);
174 pgtable_cache[1] = kmem_cache_create(pgtable_cache_name[1], PMD_TABLE_SIZE, PMD_TABLE_SIZE, SLAB_PANIC, pmd_ctor);
14cf11af 175}
d29eff7b
AW
176
177#ifdef CONFIG_SPARSEMEM_VMEMMAP
178/*
179 * Given an address within the vmemmap, determine the pfn of the page that
180 * represents the start of the section it is within. Note that we have to
181 * do this by hand as the proffered address may not be correctly aligned.
182 * Subtraction of non-aligned pointers produces undefined results.
183 */
09de9ff8 184static unsigned long __meminit vmemmap_section_start(unsigned long page)
d29eff7b
AW
185{
186 unsigned long offset = page - ((unsigned long)(vmemmap));
187
188 /* Return the pfn of the start of the section. */
189 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
190}
191
192/*
193 * Check if this vmemmap page is already initialised. If any section
194 * which overlaps this vmemmap page is initialised then this page is
195 * initialised already.
196 */
09de9ff8 197static int __meminit vmemmap_populated(unsigned long start, int page_size)
d29eff7b
AW
198{
199 unsigned long end = start + page_size;
200
201 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
202 if (pfn_valid(vmemmap_section_start(start)))
203 return 1;
204
205 return 0;
206}
207
32a74949
BH
208/* On hash-based CPUs, the vmemmap is bolted in the hash table.
209 *
210 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
211 * the vmalloc space using normal page tables, though the size of
212 * pages encoded in the PTEs can be different
213 */
214
215#ifdef CONFIG_PPC_BOOK3E
216static void __meminit vmemmap_create_mapping(unsigned long start,
217 unsigned long page_size,
218 unsigned long phys)
219{
220 /* Create a PTE encoding without page size */
221 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
222 _PAGE_KERNEL_RW;
223
224 /* PTEs only contain page size encodings up to 32M */
225 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
226
227 /* Encode the size in the PTE */
228 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
229
230 /* For each PTE for that area, map things. Note that we don't
231 * increment phys because all PTEs are of the large size and
232 * thus must have the low bits clear
233 */
234 for (i = 0; i < page_size; i += PAGE_SIZE)
235 BUG_ON(map_kernel_page(start + i, phys, flags));
236}
237#else /* CONFIG_PPC_BOOK3E */
238static void __meminit vmemmap_create_mapping(unsigned long start,
239 unsigned long page_size,
240 unsigned long phys)
241{
242 int mapped = htab_bolt_mapping(start, start + page_size, phys,
243 PAGE_KERNEL, mmu_vmemmap_psize,
244 mmu_kernel_ssize);
245 BUG_ON(mapped < 0);
246}
247#endif /* CONFIG_PPC_BOOK3E */
248
d29eff7b 249int __meminit vmemmap_populate(struct page *start_page,
cec08e7a 250 unsigned long nr_pages, int node)
d29eff7b 251{
d29eff7b
AW
252 unsigned long start = (unsigned long)start_page;
253 unsigned long end = (unsigned long)(start_page + nr_pages);
cec08e7a 254 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
d29eff7b 255
d29eff7b
AW
256 /* Align to the page size of the linear mapping. */
257 start = _ALIGN_DOWN(start, page_size);
258
32a74949
BH
259 pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
260 start_page, nr_pages, node);
261 pr_debug(" -> map %lx..%lx\n", start, end);
262
d29eff7b 263 for (; start < end; start += page_size) {
d29eff7b
AW
264 void *p;
265
266 if (vmemmap_populated(start, page_size))
267 continue;
268
269 p = vmemmap_alloc_block(page_size, node);
270 if (!p)
271 return -ENOMEM;
272
32a74949
BH
273 pr_debug(" * %016lx..%016lx allocated at %p\n",
274 start, start + page_size, p);
d29eff7b 275
32a74949 276 vmemmap_create_mapping(start, page_size, __pa(p));
d29eff7b
AW
277 }
278
279 return 0;
280}
cec08e7a 281#endif /* CONFIG_SPARSEMEM_VMEMMAP */