Merge tag 'drm-intel-fixes-2013-07-22' of git://people.freedesktop.org/~danvet/drm...
[linux-2.6-block.git] / arch / powerpc / mm / init_64.c
CommitLineData
14cf11af
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
14cf11af
PM
8 *
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
11 *
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
cec08e7a
BH
22#undef DEBUG
23
14cf11af
PM
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/bootmem.h>
38#include <linux/highmem.h>
39#include <linux/idr.h>
40#include <linux/nodemask.h>
41#include <linux/module.h>
c9cf5528 42#include <linux/poison.h>
95f72d1e 43#include <linux/memblock.h>
a4fe3ce7 44#include <linux/hugetlb.h>
5a0e3ad6 45#include <linux/slab.h>
14cf11af
PM
46
47#include <asm/pgalloc.h>
48#include <asm/page.h>
49#include <asm/prom.h>
14cf11af
PM
50#include <asm/rtas.h>
51#include <asm/io.h>
52#include <asm/mmu_context.h>
53#include <asm/pgtable.h>
54#include <asm/mmu.h>
55#include <asm/uaccess.h>
56#include <asm/smp.h>
57#include <asm/machdep.h>
58#include <asm/tlb.h>
59#include <asm/eeh.h>
60#include <asm/processor.h>
61#include <asm/mmzone.h>
62#include <asm/cputable.h>
14cf11af 63#include <asm/sections.h>
14cf11af 64#include <asm/iommu.h>
14cf11af 65#include <asm/vdso.h>
800fc3ee
DG
66
67#include "mmu_decl.h"
14cf11af 68
94491685 69#ifdef CONFIG_PPC_STD_MMU_64
14cf11af
PM
70#if PGTABLE_RANGE > USER_VSID_RANGE
71#warning Limited user VSID range means pagetable space is wasted
72#endif
73
74#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
75#warning TASK_SIZE is smaller than it needs to be.
76#endif
94491685 77#endif /* CONFIG_PPC_STD_MMU_64 */
14cf11af 78
37dd2bad 79phys_addr_t memstart_addr = ~0;
79c3095f 80EXPORT_SYMBOL_GPL(memstart_addr);
37dd2bad 81phys_addr_t kernstart_addr;
79c3095f 82EXPORT_SYMBOL_GPL(kernstart_addr);
d7917ba7 83
51cc5068 84static void pgd_ctor(void *addr)
14cf11af 85{
51cc5068
AD
86 memset(addr, 0, PGD_TABLE_SIZE);
87}
88
89static void pmd_ctor(void *addr)
90{
f940f528
AK
91#ifdef CONFIG_TRANSPARENT_HUGEPAGE
92 memset(addr, 0, PMD_TABLE_SIZE * 2);
93#else
51cc5068 94 memset(addr, 0, PMD_TABLE_SIZE);
f940f528 95#endif
14cf11af
PM
96}
97
a0668cdc
DG
98struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
99
100/*
101 * Create a kmem_cache() for pagetables. This is not used for PTE
102 * pages - they're linked to struct page, come from the normal free
103 * pages pool and have a different entry size (see real_pte_t) to
104 * everything else. Caches created by this function are used for all
105 * the higher level pagetables, and for hugepage pagetables.
106 */
107void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
108{
109 char *name;
110 unsigned long table_size = sizeof(void *) << shift;
111 unsigned long align = table_size;
112
113 /* When batching pgtable pointers for RCU freeing, we store
114 * the index size in the low bits. Table alignment must be
a4fe3ce7
DG
115 * big enough to fit it.
116 *
117 * Likewise, hugeapge pagetable pointers contain a (different)
118 * shift value in the low bits. All tables must be aligned so
119 * as to leave enough 0 bits in the address to contain it. */
120 unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
121 HUGEPD_SHIFT_MASK + 1);
a0668cdc
DG
122 struct kmem_cache *new;
123
124 /* It would be nice if this was a BUILD_BUG_ON(), but at the
125 * moment, gcc doesn't seem to recognize is_power_of_2 as a
126 * constant expression, so so much for that. */
127 BUG_ON(!is_power_of_2(minalign));
128 BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
129
130 if (PGT_CACHE(shift))
131 return; /* Already have a cache of this size */
132
133 align = max_t(unsigned long, align, minalign);
134 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
135 new = kmem_cache_create(name, table_size, align, 0, ctor);
cf9427b8 136 pgtable_cache[shift - 1] = new;
a0668cdc
DG
137 pr_debug("Allocated pgtable cache for order %d\n", shift);
138}
139
14cf11af
PM
140
141void pgtable_cache_init(void)
142{
a0668cdc 143 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
f940f528
AK
144 pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
145 if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
a0668cdc 146 panic("Couldn't allocate pgtable caches");
a0668cdc
DG
147 /* In all current configs, when the PUD index exists it's the
148 * same size as either the pgd or pmd index. Verify that the
149 * initialization above has also created a PUD cache. This
150 * will need re-examiniation if we add new possibilities for
151 * the pagetable layout. */
152 BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
14cf11af 153}
d29eff7b
AW
154
155#ifdef CONFIG_SPARSEMEM_VMEMMAP
156/*
157 * Given an address within the vmemmap, determine the pfn of the page that
158 * represents the start of the section it is within. Note that we have to
159 * do this by hand as the proffered address may not be correctly aligned.
160 * Subtraction of non-aligned pointers produces undefined results.
161 */
09de9ff8 162static unsigned long __meminit vmemmap_section_start(unsigned long page)
d29eff7b
AW
163{
164 unsigned long offset = page - ((unsigned long)(vmemmap));
165
166 /* Return the pfn of the start of the section. */
167 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
168}
169
170/*
171 * Check if this vmemmap page is already initialised. If any section
172 * which overlaps this vmemmap page is initialised then this page is
173 * initialised already.
174 */
09de9ff8 175static int __meminit vmemmap_populated(unsigned long start, int page_size)
d29eff7b
AW
176{
177 unsigned long end = start + page_size;
178
179 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
180 if (pfn_valid(vmemmap_section_start(start)))
181 return 1;
182
183 return 0;
184}
185
32a74949
BH
186/* On hash-based CPUs, the vmemmap is bolted in the hash table.
187 *
188 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
189 * the vmalloc space using normal page tables, though the size of
190 * pages encoded in the PTEs can be different
191 */
192
193#ifdef CONFIG_PPC_BOOK3E
194static void __meminit vmemmap_create_mapping(unsigned long start,
195 unsigned long page_size,
196 unsigned long phys)
197{
198 /* Create a PTE encoding without page size */
199 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
200 _PAGE_KERNEL_RW;
201
202 /* PTEs only contain page size encodings up to 32M */
203 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
204
205 /* Encode the size in the PTE */
206 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
207
208 /* For each PTE for that area, map things. Note that we don't
209 * increment phys because all PTEs are of the large size and
210 * thus must have the low bits clear
211 */
212 for (i = 0; i < page_size; i += PAGE_SIZE)
213 BUG_ON(map_kernel_page(start + i, phys, flags));
214}
215#else /* CONFIG_PPC_BOOK3E */
216static void __meminit vmemmap_create_mapping(unsigned long start,
217 unsigned long page_size,
218 unsigned long phys)
219{
220 int mapped = htab_bolt_mapping(start, start + page_size, phys,
83d5e64b
AK
221 pgprot_val(PAGE_KERNEL),
222 mmu_vmemmap_psize,
32a74949
BH
223 mmu_kernel_ssize);
224 BUG_ON(mapped < 0);
225}
226#endif /* CONFIG_PPC_BOOK3E */
227
91eea67c
MN
228struct vmemmap_backing *vmemmap_list;
229
230static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
231{
232 static struct vmemmap_backing *next;
233 static int num_left;
234
235 /* allocate a page when required and hand out chunks */
236 if (!next || !num_left) {
237 next = vmemmap_alloc_block(PAGE_SIZE, node);
238 if (unlikely(!next)) {
239 WARN_ON(1);
240 return NULL;
241 }
242 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
243 }
244
245 num_left--;
246
247 return next++;
248}
249
250static __meminit void vmemmap_list_populate(unsigned long phys,
251 unsigned long start,
252 int node)
253{
254 struct vmemmap_backing *vmem_back;
255
256 vmem_back = vmemmap_list_alloc(node);
257 if (unlikely(!vmem_back)) {
258 WARN_ON(1);
259 return;
260 }
261
262 vmem_back->phys = phys;
263 vmem_back->virt_addr = start;
264 vmem_back->list = vmemmap_list;
265
266 vmemmap_list = vmem_back;
267}
268
0aad818b 269int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
d29eff7b 270{
cec08e7a 271 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
d29eff7b 272
d29eff7b
AW
273 /* Align to the page size of the linear mapping. */
274 start = _ALIGN_DOWN(start, page_size);
275
0aad818b 276 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
32a74949 277
d29eff7b 278 for (; start < end; start += page_size) {
d29eff7b
AW
279 void *p;
280
281 if (vmemmap_populated(start, page_size))
282 continue;
283
284 p = vmemmap_alloc_block(page_size, node);
285 if (!p)
286 return -ENOMEM;
287
91eea67c
MN
288 vmemmap_list_populate(__pa(p), start, node);
289
32a74949
BH
290 pr_debug(" * %016lx..%016lx allocated at %p\n",
291 start, start + page_size, p);
d29eff7b 292
32a74949 293 vmemmap_create_mapping(start, page_size, __pa(p));
d29eff7b
AW
294 }
295
296 return 0;
297}
46723bfa 298
0aad818b 299void vmemmap_free(unsigned long start, unsigned long end)
0197518c
TC
300{
301}
302
cec08e7a 303#endif /* CONFIG_SPARSEMEM_VMEMMAP */
cd3db0c4 304