powerpc/mm: Flush radix process translations when setting MMU type
[linux-2.6-block.git] / arch / powerpc / mm / pgtable_64.c
CommitLineData
14cf11af
PM
1/*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
14cf11af
PM
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
14cf11af
PM
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
66b15db6 29#include <linux/export.h>
14cf11af
PM
30#include <linux/types.h>
31#include <linux/mman.h>
32#include <linux/mm.h>
33#include <linux/swap.h>
34#include <linux/stddef.h>
35#include <linux/vmalloc.h>
95f72d1e 36#include <linux/memblock.h>
5a0e3ad6 37#include <linux/slab.h>
06743521 38#include <linux/hugetlb.h>
14cf11af
PM
39
40#include <asm/pgalloc.h>
41#include <asm/page.h>
42#include <asm/prom.h>
14cf11af
PM
43#include <asm/io.h>
44#include <asm/mmu_context.h>
45#include <asm/pgtable.h>
46#include <asm/mmu.h>
14cf11af
PM
47#include <asm/smp.h>
48#include <asm/machdep.h>
49#include <asm/tlb.h>
0428491c 50#include <asm/trace.h>
14cf11af 51#include <asm/processor.h>
14cf11af 52#include <asm/cputable.h>
14cf11af 53#include <asm/sections.h>
5e203d68 54#include <asm/firmware.h>
68cf0d64 55#include <asm/dma.h>
1d0761d2 56#include <asm/powernv.h>
800fc3ee
DG
57
58#include "mmu_decl.h"
14cf11af 59
4e003747 60#ifdef CONFIG_PPC_BOOK3S_64
af81d787 61#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
78f1dbde
AK
62#error TASK_SIZE_USER64 exceeds user VSID range
63#endif
64#endif
14cf11af 65
50de596d
AK
66#ifdef CONFIG_PPC_BOOK3S_64
67/*
68 * partition table and process table for ISA 3.0
69 */
70struct prtb_entry *process_tb;
71struct patb_entry *partition_tb;
dd1842a2
AK
72/*
73 * page table size
74 */
75unsigned long __pte_index_size;
76EXPORT_SYMBOL(__pte_index_size);
77unsigned long __pmd_index_size;
78EXPORT_SYMBOL(__pmd_index_size);
79unsigned long __pud_index_size;
80EXPORT_SYMBOL(__pud_index_size);
81unsigned long __pgd_index_size;
82EXPORT_SYMBOL(__pgd_index_size);
83unsigned long __pmd_cache_index;
84EXPORT_SYMBOL(__pmd_cache_index);
85unsigned long __pte_table_size;
86EXPORT_SYMBOL(__pte_table_size);
87unsigned long __pmd_table_size;
88EXPORT_SYMBOL(__pmd_table_size);
89unsigned long __pud_table_size;
90EXPORT_SYMBOL(__pud_table_size);
91unsigned long __pgd_table_size;
92EXPORT_SYMBOL(__pgd_table_size);
a2f41eb9
AK
93unsigned long __pmd_val_bits;
94EXPORT_SYMBOL(__pmd_val_bits);
95unsigned long __pud_val_bits;
96EXPORT_SYMBOL(__pud_val_bits);
97unsigned long __pgd_val_bits;
98EXPORT_SYMBOL(__pgd_val_bits);
d6a9996e
AK
99unsigned long __kernel_virt_start;
100EXPORT_SYMBOL(__kernel_virt_start);
101unsigned long __kernel_virt_size;
102EXPORT_SYMBOL(__kernel_virt_size);
103unsigned long __vmalloc_start;
104EXPORT_SYMBOL(__vmalloc_start);
105unsigned long __vmalloc_end;
106EXPORT_SYMBOL(__vmalloc_end);
63ee9b2f
ME
107unsigned long __kernel_io_start;
108EXPORT_SYMBOL(__kernel_io_start);
d6a9996e
AK
109struct page *vmemmap;
110EXPORT_SYMBOL(vmemmap);
5ed7ecd0
AK
111unsigned long __pte_frag_nr;
112EXPORT_SYMBOL(__pte_frag_nr);
113unsigned long __pte_frag_size_shift;
114EXPORT_SYMBOL(__pte_frag_size_shift);
d6a9996e
AK
115unsigned long ioremap_bot;
116#else /* !CONFIG_PPC_BOOK3S_64 */
78f1dbde 117unsigned long ioremap_bot = IOREMAP_BASE;
d6a9996e 118#endif
a245067e 119
3d5134ee
BH
120/**
121 * __ioremap_at - Low level function to establish the page tables
122 * for an IO mapping
123 */
124void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
14cf11af
PM
125 unsigned long flags)
126{
127 unsigned long i;
128
a1f242ff 129 /* Make sure we have the base flags */
14cf11af
PM
130 if ((flags & _PAGE_PRESENT) == 0)
131 flags |= pgprot_val(PAGE_KERNEL);
132
a1f242ff 133 /* We don't support the 4K PFN hack with ioremap */
945537df 134 if (flags & H_PAGE_4K_PFN)
a1f242ff
BH
135 return NULL;
136
3d5134ee
BH
137 WARN_ON(pa & ~PAGE_MASK);
138 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
139 WARN_ON(size & ~PAGE_MASK);
140
14cf11af 141 for (i = 0; i < size; i += PAGE_SIZE)
a245067e 142 if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
14cf11af
PM
143 return NULL;
144
3d5134ee
BH
145 return (void __iomem *)ea;
146}
147
148/**
149 * __iounmap_from - Low level function to tear down the page tables
150 * for an IO mapping. This is used for mappings that
151 * are manipulated manually, like partial unmapping of
152 * PCI IOs or ISA space.
153 */
154void __iounmap_at(void *ea, unsigned long size)
155{
156 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
157 WARN_ON(size & ~PAGE_MASK);
158
159 unmap_kernel_range((unsigned long)ea, size);
14cf11af
PM
160}
161
1cdab55d
BH
162void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
163 unsigned long flags, void *caller)
14cf11af 164{
3d5134ee 165 phys_addr_t paligned;
14cf11af
PM
166 void __iomem *ret;
167
168 /*
169 * Choose an address to map it to.
170 * Once the imalloc system is running, we use it.
171 * Before that, we map using addresses going
172 * up from ioremap_bot. imalloc will use
173 * the addresses from ioremap_bot through
174 * IMALLOC_END
175 *
176 */
3d5134ee
BH
177 paligned = addr & PAGE_MASK;
178 size = PAGE_ALIGN(addr + size) - paligned;
14cf11af 179
3d5134ee 180 if ((size == 0) || (paligned == 0))
14cf11af
PM
181 return NULL;
182
f691fa10 183 if (slab_is_available()) {
14cf11af 184 struct vm_struct *area;
3d5134ee 185
1cdab55d
BH
186 area = __get_vm_area_caller(size, VM_IOREMAP,
187 ioremap_bot, IOREMAP_END,
188 caller);
14cf11af
PM
189 if (area == NULL)
190 return NULL;
7a9d1256
ME
191
192 area->phys_addr = paligned;
3d5134ee 193 ret = __ioremap_at(paligned, area->addr, size, flags);
14cf11af 194 if (!ret)
3d5134ee 195 vunmap(area->addr);
14cf11af 196 } else {
3d5134ee 197 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
14cf11af
PM
198 if (ret)
199 ioremap_bot += size;
200 }
3d5134ee
BH
201
202 if (ret)
203 ret += addr & ~PAGE_MASK;
14cf11af
PM
204 return ret;
205}
206
1cdab55d
BH
207void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
208 unsigned long flags)
209{
210 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
211}
4cb3cee0 212
68a64357 213void __iomem * ioremap(phys_addr_t addr, unsigned long size)
4cb3cee0 214{
72176dd0 215 unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0)));
1cdab55d 216 void *caller = __builtin_return_address(0);
4cb3cee0
BH
217
218 if (ppc_md.ioremap)
1cdab55d
BH
219 return ppc_md.ioremap(addr, size, flags, caller);
220 return __ioremap_caller(addr, size, flags, caller);
4cb3cee0
BH
221}
222
be135f40
AB
223void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
224{
72176dd0 225 unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0)));
be135f40
AB
226 void *caller = __builtin_return_address(0);
227
228 if (ppc_md.ioremap)
229 return ppc_md.ioremap(addr, size, flags, caller);
230 return __ioremap_caller(addr, size, flags, caller);
231}
232
40f1ce7f 233void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
4cb3cee0
BH
234 unsigned long flags)
235{
1cdab55d
BH
236 void *caller = __builtin_return_address(0);
237
a1f242ff 238 /* writeable implies dirty for kernel addresses */
c7d54842 239 if (flags & _PAGE_WRITE)
a1f242ff
BH
240 flags |= _PAGE_DIRTY;
241
ac29c640
AK
242 /* we don't want to let _PAGE_EXEC leak out */
243 flags &= ~_PAGE_EXEC;
244 /*
245 * Force kernel mapping.
246 */
ac29c640 247 flags &= ~_PAGE_USER;
812fadcb 248 flags |= _PAGE_PRIVILEGED;
55052eec 249
4cb3cee0 250 if (ppc_md.ioremap)
1cdab55d
BH
251 return ppc_md.ioremap(addr, size, flags, caller);
252 return __ioremap_caller(addr, size, flags, caller);
4cb3cee0
BH
253}
254
255
14cf11af
PM
256/*
257 * Unmap an IO region and remove it from imalloc'd list.
258 * Access to IO memory should be serialized by driver.
14cf11af 259 */
68a64357 260void __iounmap(volatile void __iomem *token)
14cf11af
PM
261{
262 void *addr;
263
f691fa10 264 if (!slab_is_available())
14cf11af
PM
265 return;
266
3d5134ee
BH
267 addr = (void *) ((unsigned long __force)
268 PCI_FIX_ADDR(token) & PAGE_MASK);
269 if ((unsigned long)addr < ioremap_bot) {
270 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
271 " at 0x%p\n", addr);
272 return;
273 }
274 vunmap(addr);
14cf11af
PM
275}
276
68a64357 277void iounmap(volatile void __iomem *token)
4cb3cee0
BH
278{
279 if (ppc_md.iounmap)
280 ppc_md.iounmap(token);
281 else
282 __iounmap(token);
283}
284
14cf11af 285EXPORT_SYMBOL(ioremap);
be135f40 286EXPORT_SYMBOL(ioremap_wc);
40f1ce7f 287EXPORT_SYMBOL(ioremap_prot);
14cf11af 288EXPORT_SYMBOL(__ioremap);
a302cb9d 289EXPORT_SYMBOL(__ioremap_at);
14cf11af 290EXPORT_SYMBOL(iounmap);
4cb3cee0 291EXPORT_SYMBOL(__iounmap);
a302cb9d 292EXPORT_SYMBOL(__iounmap_at);
5c1f6ee9 293
06743521
AK
294#ifndef __PAGETABLE_PUD_FOLDED
295/* 4 level page table */
296struct page *pgd_page(pgd_t pgd)
297{
298 if (pgd_huge(pgd))
299 return pte_page(pgd_pte(pgd));
300 return virt_to_page(pgd_page_vaddr(pgd));
301}
302#endif
303
304struct page *pud_page(pud_t pud)
305{
306 if (pud_huge(pud))
307 return pte_page(pud_pte(pud));
308 return virt_to_page(pud_page_vaddr(pud));
309}
310
074c2eae
AK
311/*
312 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
313 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
314 */
315struct page *pmd_page(pmd_t pmd)
316{
ebd31197 317 if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
e34aa03c 318 return pte_page(pmd_pte(pmd));
074c2eae
AK
319 return virt_to_page(pmd_page_vaddr(pmd));
320}
321
5c1f6ee9
AK
322#ifdef CONFIG_PPC_64K_PAGES
323static pte_t *get_from_cache(struct mm_struct *mm)
324{
325 void *pte_frag, *ret;
326
327 spin_lock(&mm->page_table_lock);
328 ret = mm->context.pte_frag;
329 if (ret) {
330 pte_frag = ret + PTE_FRAG_SIZE;
331 /*
332 * If we have taken up all the fragments mark PTE page NULL
333 */
334 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
335 pte_frag = NULL;
336 mm->context.pte_frag = pte_frag;
337 }
338 spin_unlock(&mm->page_table_lock);
339 return (pte_t *)ret;
340}
341
342static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
343{
344 void *ret = NULL;
de3b8761
BS
345 struct page *page;
346
347 if (!kernel) {
348 page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
349 if (!page)
350 return NULL;
351 if (!pgtable_page_ctor(page)) {
352 __free_page(page);
353 return NULL;
354 }
355 } else {
356 page = alloc_page(PGALLOC_GFP);
357 if (!page)
358 return NULL;
4f804943 359 }
5c1f6ee9
AK
360
361 ret = page_address(page);
362 spin_lock(&mm->page_table_lock);
363 /*
364 * If we find pgtable_page set, we return
365 * the allocated page with single fragement
366 * count.
367 */
368 if (likely(!mm->context.pte_frag)) {
fe896d18 369 set_page_count(page, PTE_FRAG_NR);
5c1f6ee9
AK
370 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
371 }
372 spin_unlock(&mm->page_table_lock);
373
5c1f6ee9
AK
374 return (pte_t *)ret;
375}
376
74701d59 377pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
5c1f6ee9
AK
378{
379 pte_t *pte;
380
381 pte = get_from_cache(mm);
382 if (pte)
383 return pte;
384
385 return __alloc_for_cache(mm, kernel);
386}
934828ed 387#endif /* CONFIG_PPC_64K_PAGES */
5c1f6ee9 388
74701d59 389void pte_fragment_free(unsigned long *table, int kernel)
5c1f6ee9
AK
390{
391 struct page *page = virt_to_page(table);
392 if (put_page_testzero(page)) {
393 if (!kernel)
394 pgtable_page_dtor(page);
2d4894b5 395 free_unref_page(page);
5c1f6ee9
AK
396 }
397}
398
399#ifdef CONFIG_SMP
5c1f6ee9
AK
400void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
401{
402 unsigned long pgf = (unsigned long)table;
403
404 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
405 pgf |= shift;
406 tlb_remove_table(tlb, (void *)pgf);
407}
408
409void __tlb_remove_table(void *_table)
410{
411 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
412 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
413
414 if (!shift)
415 /* PTE page needs special handling */
74701d59 416 pte_fragment_free(table, 0);
5c1f6ee9
AK
417 else {
418 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
419 kmem_cache_free(PGT_CACHE(shift), table);
420 }
421}
422#else
423void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
424{
425 if (!shift) {
426 /* PTE page needs special handling */
74701d59 427 pte_fragment_free(table, 0);
5c1f6ee9
AK
428 } else {
429 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
430 kmem_cache_free(PGT_CACHE(shift), table);
431 }
432}
433#endif
9d661958
PM
434
435#ifdef CONFIG_PPC_BOOK3S_64
436void __init mmu_partition_table_init(void)
437{
438 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
1d0761d2 439 unsigned long ptcr;
9d661958
PM
440
441 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
442 partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
443 MEMBLOCK_ALLOC_ANYWHERE));
444
445 /* Initialize the Partition Table with no entries */
446 memset((void *)partition_tb, 0, patb_size);
447
448 /*
449 * update partition table control register,
450 * 64 K size.
451 */
1d0761d2
AP
452 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
453 mtspr(SPRN_PTCR, ptcr);
454 powernv_set_nmmu_ptcr(ptcr);
9d661958
PM
455}
456
457void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
458 unsigned long dw1)
459{
16ed1416
PM
460 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
461
9d661958
PM
462 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
463 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
464
16ed1416
PM
465 /*
466 * Global flush of TLBs and partition table caches for this lpid.
467 * The type of flush (hash or radix) depends on what the previous
468 * use of this partition ID was, not the new use.
469 */
9d661958 470 asm volatile("ptesync" : : : "memory");
0428491c 471 if (old & PATB_HR) {
16ed1416
PM
472 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
473 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
62e984dd
AK
474 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
475 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
0428491c
BS
476 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
477 } else {
16ed1416
PM
478 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
479 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
0428491c
BS
480 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
481 }
9d661958
PM
482 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
483}
484EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
485#endif /* CONFIG_PPC_BOOK3S_64 */
cd65d697
BS
486
487#ifdef CONFIG_STRICT_KERNEL_RWX
488void mark_rodata_ro(void)
489{
490 if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
491 pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
492 return;
493 }
494
7614ff32
BS
495 if (radix_enabled())
496 radix__mark_rodata_ro();
497 else
cd65d697
BS
498 hash__mark_rodata_ro();
499}
029d9252
ME
500
501void mark_initmem_nx(void)
502{
503 if (radix_enabled())
504 radix__mark_initmem_nx();
505 else
506 hash__mark_initmem_nx();
507}
cd65d697 508#endif