powerpc/mm/hash64: Map all the kernel regions in the same 0xc range
[linux-2.6-block.git] / arch / powerpc / mm / pgtable_64.c
CommitLineData
14cf11af
PM
1/*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
14cf11af
PM
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
14cf11af
PM
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
66b15db6 29#include <linux/export.h>
14cf11af
PM
30#include <linux/types.h>
31#include <linux/mman.h>
32#include <linux/mm.h>
33#include <linux/swap.h>
34#include <linux/stddef.h>
35#include <linux/vmalloc.h>
5a0e3ad6 36#include <linux/slab.h>
06743521 37#include <linux/hugetlb.h>
14cf11af
PM
38
39#include <asm/pgalloc.h>
40#include <asm/page.h>
41#include <asm/prom.h>
14cf11af
PM
42#include <asm/io.h>
43#include <asm/mmu_context.h>
44#include <asm/pgtable.h>
45#include <asm/mmu.h>
14cf11af
PM
46#include <asm/smp.h>
47#include <asm/machdep.h>
48#include <asm/tlb.h>
14cf11af 49#include <asm/processor.h>
14cf11af 50#include <asm/cputable.h>
14cf11af 51#include <asm/sections.h>
5e203d68 52#include <asm/firmware.h>
68cf0d64 53#include <asm/dma.h>
800fc3ee
DG
54
55#include "mmu_decl.h"
14cf11af 56
14cf11af 57
50de596d
AK
58#ifdef CONFIG_PPC_BOOK3S_64
59/*
60 * partition table and process table for ISA 3.0
61 */
62struct prtb_entry *process_tb;
63struct patb_entry *partition_tb;
dd1842a2
AK
64/*
65 * page table size
66 */
67unsigned long __pte_index_size;
68EXPORT_SYMBOL(__pte_index_size);
69unsigned long __pmd_index_size;
70EXPORT_SYMBOL(__pmd_index_size);
71unsigned long __pud_index_size;
72EXPORT_SYMBOL(__pud_index_size);
73unsigned long __pgd_index_size;
74EXPORT_SYMBOL(__pgd_index_size);
fae22116
AK
75unsigned long __pud_cache_index;
76EXPORT_SYMBOL(__pud_cache_index);
dd1842a2
AK
77unsigned long __pte_table_size;
78EXPORT_SYMBOL(__pte_table_size);
79unsigned long __pmd_table_size;
80EXPORT_SYMBOL(__pmd_table_size);
81unsigned long __pud_table_size;
82EXPORT_SYMBOL(__pud_table_size);
83unsigned long __pgd_table_size;
84EXPORT_SYMBOL(__pgd_table_size);
a2f41eb9
AK
85unsigned long __pmd_val_bits;
86EXPORT_SYMBOL(__pmd_val_bits);
87unsigned long __pud_val_bits;
88EXPORT_SYMBOL(__pud_val_bits);
89unsigned long __pgd_val_bits;
90EXPORT_SYMBOL(__pgd_val_bits);
d6a9996e
AK
91unsigned long __kernel_virt_start;
92EXPORT_SYMBOL(__kernel_virt_start);
d6a9996e
AK
93unsigned long __vmalloc_start;
94EXPORT_SYMBOL(__vmalloc_start);
95unsigned long __vmalloc_end;
96EXPORT_SYMBOL(__vmalloc_end);
63ee9b2f
ME
97unsigned long __kernel_io_start;
98EXPORT_SYMBOL(__kernel_io_start);
a35a3c6f
AK
99unsigned long __kernel_io_end;
100EXPORT_SYMBOL(__kernel_io_end);
d6a9996e
AK
101struct page *vmemmap;
102EXPORT_SYMBOL(vmemmap);
5ed7ecd0
AK
103unsigned long __pte_frag_nr;
104EXPORT_SYMBOL(__pte_frag_nr);
105unsigned long __pte_frag_size_shift;
106EXPORT_SYMBOL(__pte_frag_size_shift);
d6a9996e
AK
107unsigned long ioremap_bot;
108#else /* !CONFIG_PPC_BOOK3S_64 */
78f1dbde 109unsigned long ioremap_bot = IOREMAP_BASE;
d6a9996e 110#endif
a245067e 111
3d5134ee
BH
112/**
113 * __ioremap_at - Low level function to establish the page tables
114 * for an IO mapping
115 */
c766ee72 116void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
14cf11af
PM
117{
118 unsigned long i;
119
a1f242ff 120 /* We don't support the 4K PFN hack with ioremap */
c766ee72 121 if (pgprot_val(prot) & H_PAGE_4K_PFN)
a1f242ff
BH
122 return NULL;
123
3d5134ee
BH
124 WARN_ON(pa & ~PAGE_MASK);
125 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
126 WARN_ON(size & ~PAGE_MASK);
127
14cf11af 128 for (i = 0; i < size; i += PAGE_SIZE)
c766ee72 129 if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
14cf11af
PM
130 return NULL;
131
3d5134ee
BH
132 return (void __iomem *)ea;
133}
134
135/**
136 * __iounmap_from - Low level function to tear down the page tables
137 * for an IO mapping. This is used for mappings that
138 * are manipulated manually, like partial unmapping of
139 * PCI IOs or ISA space.
140 */
141void __iounmap_at(void *ea, unsigned long size)
142{
143 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
144 WARN_ON(size & ~PAGE_MASK);
145
146 unmap_kernel_range((unsigned long)ea, size);
14cf11af
PM
147}
148
1cdab55d 149void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
c766ee72 150 pgprot_t prot, void *caller)
14cf11af 151{
3d5134ee 152 phys_addr_t paligned;
14cf11af
PM
153 void __iomem *ret;
154
155 /*
156 * Choose an address to map it to.
157 * Once the imalloc system is running, we use it.
158 * Before that, we map using addresses going
159 * up from ioremap_bot. imalloc will use
160 * the addresses from ioremap_bot through
161 * IMALLOC_END
162 *
163 */
3d5134ee
BH
164 paligned = addr & PAGE_MASK;
165 size = PAGE_ALIGN(addr + size) - paligned;
14cf11af 166
3d5134ee 167 if ((size == 0) || (paligned == 0))
14cf11af
PM
168 return NULL;
169
f691fa10 170 if (slab_is_available()) {
14cf11af 171 struct vm_struct *area;
3d5134ee 172
1cdab55d
BH
173 area = __get_vm_area_caller(size, VM_IOREMAP,
174 ioremap_bot, IOREMAP_END,
175 caller);
14cf11af
PM
176 if (area == NULL)
177 return NULL;
7a9d1256
ME
178
179 area->phys_addr = paligned;
c766ee72 180 ret = __ioremap_at(paligned, area->addr, size, prot);
14cf11af 181 if (!ret)
3d5134ee 182 vunmap(area->addr);
14cf11af 183 } else {
c766ee72 184 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
14cf11af
PM
185 if (ret)
186 ioremap_bot += size;
187 }
3d5134ee
BH
188
189 if (ret)
190 ret += addr & ~PAGE_MASK;
14cf11af
PM
191 return ret;
192}
193
1cdab55d
BH
194void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
195 unsigned long flags)
196{
c766ee72 197 return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
1cdab55d 198}
4cb3cee0 199
68a64357 200void __iomem * ioremap(phys_addr_t addr, unsigned long size)
4cb3cee0 201{
c766ee72 202 pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
1cdab55d 203 void *caller = __builtin_return_address(0);
4cb3cee0
BH
204
205 if (ppc_md.ioremap)
c766ee72
CL
206 return ppc_md.ioremap(addr, size, prot, caller);
207 return __ioremap_caller(addr, size, prot, caller);
4cb3cee0
BH
208}
209
be135f40
AB
210void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
211{
c766ee72 212 pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
be135f40
AB
213 void *caller = __builtin_return_address(0);
214
215 if (ppc_md.ioremap)
c766ee72
CL
216 return ppc_md.ioremap(addr, size, prot, caller);
217 return __ioremap_caller(addr, size, prot, caller);
be135f40
AB
218}
219
86c391bd
CL
220void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
221{
c766ee72 222 pgprot_t prot = pgprot_cached(PAGE_KERNEL);
86c391bd
CL
223 void *caller = __builtin_return_address(0);
224
225 if (ppc_md.ioremap)
c766ee72
CL
226 return ppc_md.ioremap(addr, size, prot, caller);
227 return __ioremap_caller(addr, size, prot, caller);
86c391bd
CL
228}
229
40f1ce7f 230void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
4cb3cee0
BH
231 unsigned long flags)
232{
26973fa5 233 pte_t pte = __pte(flags);
1cdab55d
BH
234 void *caller = __builtin_return_address(0);
235
a1f242ff 236 /* writeable implies dirty for kernel addresses */
26973fa5
CL
237 if (pte_write(pte))
238 pte = pte_mkdirty(pte);
a1f242ff 239
ac29c640 240 /* we don't want to let _PAGE_EXEC leak out */
26973fa5 241 pte = pte_exprotect(pte);
ac29c640
AK
242 /*
243 * Force kernel mapping.
244 */
26973fa5 245 pte = pte_mkprivileged(pte);
55052eec 246
4cb3cee0 247 if (ppc_md.ioremap)
26973fa5
CL
248 return ppc_md.ioremap(addr, size, pte_pgprot(pte), caller);
249 return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
4cb3cee0
BH
250}
251
252
14cf11af
PM
253/*
254 * Unmap an IO region and remove it from imalloc'd list.
255 * Access to IO memory should be serialized by driver.
14cf11af 256 */
68a64357 257void __iounmap(volatile void __iomem *token)
14cf11af
PM
258{
259 void *addr;
260
f691fa10 261 if (!slab_is_available())
14cf11af
PM
262 return;
263
3d5134ee
BH
264 addr = (void *) ((unsigned long __force)
265 PCI_FIX_ADDR(token) & PAGE_MASK);
266 if ((unsigned long)addr < ioremap_bot) {
267 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
268 " at 0x%p\n", addr);
269 return;
270 }
271 vunmap(addr);
14cf11af
PM
272}
273
68a64357 274void iounmap(volatile void __iomem *token)
4cb3cee0
BH
275{
276 if (ppc_md.iounmap)
277 ppc_md.iounmap(token);
278 else
279 __iounmap(token);
280}
281
14cf11af 282EXPORT_SYMBOL(ioremap);
be135f40 283EXPORT_SYMBOL(ioremap_wc);
40f1ce7f 284EXPORT_SYMBOL(ioremap_prot);
14cf11af 285EXPORT_SYMBOL(__ioremap);
a302cb9d 286EXPORT_SYMBOL(__ioremap_at);
14cf11af 287EXPORT_SYMBOL(iounmap);
4cb3cee0 288EXPORT_SYMBOL(__iounmap);
a302cb9d 289EXPORT_SYMBOL(__iounmap_at);
5c1f6ee9 290
06743521
AK
291#ifndef __PAGETABLE_PUD_FOLDED
292/* 4 level page table */
293struct page *pgd_page(pgd_t pgd)
294{
295 if (pgd_huge(pgd))
296 return pte_page(pgd_pte(pgd));
297 return virt_to_page(pgd_page_vaddr(pgd));
298}
299#endif
300
301struct page *pud_page(pud_t pud)
302{
303 if (pud_huge(pud))
304 return pte_page(pud_pte(pud));
305 return virt_to_page(pud_page_vaddr(pud));
306}
307
074c2eae
AK
308/*
309 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
310 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
311 */
312struct page *pmd_page(pmd_t pmd)
313{
ae28f17b 314 if (pmd_large(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
e34aa03c 315 return pte_page(pmd_pte(pmd));
074c2eae
AK
316 return virt_to_page(pmd_page_vaddr(pmd));
317}
318
cd65d697
BS
319#ifdef CONFIG_STRICT_KERNEL_RWX
320void mark_rodata_ro(void)
321{
322 if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
323 pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
324 return;
325 }
326
7614ff32
BS
327 if (radix_enabled())
328 radix__mark_rodata_ro();
329 else
cd65d697
BS
330 hash__mark_rodata_ro();
331}
029d9252
ME
332
333void mark_initmem_nx(void)
334{
335 if (radix_enabled())
336 radix__mark_initmem_nx();
337 else
338 hash__mark_initmem_nx();
339}
cd65d697 340#endif