2 * This file contains ioremap and related functions for 64-bit machines.
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/export.h>
30 #include <linux/types.h>
31 #include <linux/mman.h>
33 #include <linux/swap.h>
34 #include <linux/stddef.h>
35 #include <linux/vmalloc.h>
36 #include <linux/slab.h>
37 #include <linux/hugetlb.h>
39 #include <asm/pgalloc.h>
43 #include <asm/mmu_context.h>
44 #include <asm/pgtable.h>
47 #include <asm/machdep.h>
49 #include <asm/processor.h>
50 #include <asm/cputable.h>
51 #include <asm/sections.h>
52 #include <asm/firmware.h>
58 #ifdef CONFIG_PPC_BOOK3S_64
60 * partition table and process table for ISA 3.0
62 struct prtb_entry *process_tb;
63 struct patb_entry *partition_tb;
67 unsigned long __pte_index_size;
68 EXPORT_SYMBOL(__pte_index_size);
69 unsigned long __pmd_index_size;
70 EXPORT_SYMBOL(__pmd_index_size);
71 unsigned long __pud_index_size;
72 EXPORT_SYMBOL(__pud_index_size);
73 unsigned long __pgd_index_size;
74 EXPORT_SYMBOL(__pgd_index_size);
75 unsigned long __pmd_cache_index;
76 EXPORT_SYMBOL(__pmd_cache_index);
77 unsigned long __pud_cache_index;
78 EXPORT_SYMBOL(__pud_cache_index);
79 unsigned long __pte_table_size;
80 EXPORT_SYMBOL(__pte_table_size);
81 unsigned long __pmd_table_size;
82 EXPORT_SYMBOL(__pmd_table_size);
83 unsigned long __pud_table_size;
84 EXPORT_SYMBOL(__pud_table_size);
85 unsigned long __pgd_table_size;
86 EXPORT_SYMBOL(__pgd_table_size);
87 unsigned long __pmd_val_bits;
88 EXPORT_SYMBOL(__pmd_val_bits);
89 unsigned long __pud_val_bits;
90 EXPORT_SYMBOL(__pud_val_bits);
91 unsigned long __pgd_val_bits;
92 EXPORT_SYMBOL(__pgd_val_bits);
93 unsigned long __kernel_virt_start;
94 EXPORT_SYMBOL(__kernel_virt_start);
95 unsigned long __kernel_virt_size;
96 EXPORT_SYMBOL(__kernel_virt_size);
97 unsigned long __vmalloc_start;
98 EXPORT_SYMBOL(__vmalloc_start);
99 unsigned long __vmalloc_end;
100 EXPORT_SYMBOL(__vmalloc_end);
101 unsigned long __kernel_io_start;
102 EXPORT_SYMBOL(__kernel_io_start);
103 struct page *vmemmap;
104 EXPORT_SYMBOL(vmemmap);
105 unsigned long __pte_frag_nr;
106 EXPORT_SYMBOL(__pte_frag_nr);
107 unsigned long __pte_frag_size_shift;
108 EXPORT_SYMBOL(__pte_frag_size_shift);
109 unsigned long ioremap_bot;
110 #else /* !CONFIG_PPC_BOOK3S_64 */
111 unsigned long ioremap_bot = IOREMAP_BASE;
115 * __ioremap_at - Low level function to establish the page tables
118 void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
123 /* Make sure we have the base flags */
124 if ((flags & _PAGE_PRESENT) == 0)
125 flags |= pgprot_val(PAGE_KERNEL);
127 /* We don't support the 4K PFN hack with ioremap */
128 if (flags & H_PAGE_4K_PFN)
131 WARN_ON(pa & ~PAGE_MASK);
132 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
133 WARN_ON(size & ~PAGE_MASK);
135 for (i = 0; i < size; i += PAGE_SIZE)
136 if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
139 return (void __iomem *)ea;
143 * __iounmap_from - Low level function to tear down the page tables
144 * for an IO mapping. This is used for mappings that
145 * are manipulated manually, like partial unmapping of
146 * PCI IOs or ISA space.
148 void __iounmap_at(void *ea, unsigned long size)
150 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
151 WARN_ON(size & ~PAGE_MASK);
153 unmap_kernel_range((unsigned long)ea, size);
156 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
157 unsigned long flags, void *caller)
159 phys_addr_t paligned;
163 * Choose an address to map it to.
164 * Once the imalloc system is running, we use it.
165 * Before that, we map using addresses going
166 * up from ioremap_bot. imalloc will use
167 * the addresses from ioremap_bot through
171 paligned = addr & PAGE_MASK;
172 size = PAGE_ALIGN(addr + size) - paligned;
174 if ((size == 0) || (paligned == 0))
177 if (slab_is_available()) {
178 struct vm_struct *area;
180 area = __get_vm_area_caller(size, VM_IOREMAP,
181 ioremap_bot, IOREMAP_END,
186 area->phys_addr = paligned;
187 ret = __ioremap_at(paligned, area->addr, size, flags);
191 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
197 ret += addr & ~PAGE_MASK;
201 void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
204 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
207 void __iomem * ioremap(phys_addr_t addr, unsigned long size)
209 unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0)));
210 void *caller = __builtin_return_address(0);
213 return ppc_md.ioremap(addr, size, flags, caller);
214 return __ioremap_caller(addr, size, flags, caller);
217 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
219 unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0)));
220 void *caller = __builtin_return_address(0);
223 return ppc_md.ioremap(addr, size, flags, caller);
224 return __ioremap_caller(addr, size, flags, caller);
227 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
230 void *caller = __builtin_return_address(0);
232 /* writeable implies dirty for kernel addresses */
233 if (flags & _PAGE_WRITE)
234 flags |= _PAGE_DIRTY;
236 /* we don't want to let _PAGE_EXEC leak out */
237 flags &= ~_PAGE_EXEC;
239 * Force kernel mapping.
241 flags &= ~_PAGE_USER;
242 flags |= _PAGE_PRIVILEGED;
245 return ppc_md.ioremap(addr, size, flags, caller);
246 return __ioremap_caller(addr, size, flags, caller);
251 * Unmap an IO region and remove it from imalloc'd list.
252 * Access to IO memory should be serialized by driver.
254 void __iounmap(volatile void __iomem *token)
258 if (!slab_is_available())
261 addr = (void *) ((unsigned long __force)
262 PCI_FIX_ADDR(token) & PAGE_MASK);
263 if ((unsigned long)addr < ioremap_bot) {
264 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
271 void iounmap(volatile void __iomem *token)
274 ppc_md.iounmap(token);
279 EXPORT_SYMBOL(ioremap);
280 EXPORT_SYMBOL(ioremap_wc);
281 EXPORT_SYMBOL(ioremap_prot);
282 EXPORT_SYMBOL(__ioremap);
283 EXPORT_SYMBOL(__ioremap_at);
284 EXPORT_SYMBOL(iounmap);
285 EXPORT_SYMBOL(__iounmap);
286 EXPORT_SYMBOL(__iounmap_at);
288 #ifndef __PAGETABLE_PUD_FOLDED
289 /* 4 level page table */
290 struct page *pgd_page(pgd_t pgd)
293 return pte_page(pgd_pte(pgd));
294 return virt_to_page(pgd_page_vaddr(pgd));
298 struct page *pud_page(pud_t pud)
301 return pte_page(pud_pte(pud));
302 return virt_to_page(pud_page_vaddr(pud));
306 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
307 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
309 struct page *pmd_page(pmd_t pmd)
311 if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
312 return pte_page(pmd_pte(pmd));
313 return virt_to_page(pmd_page_vaddr(pmd));
316 #ifdef CONFIG_PPC_64K_PAGES
317 static pte_t *get_from_cache(struct mm_struct *mm)
319 void *pte_frag, *ret;
321 spin_lock(&mm->page_table_lock);
322 ret = mm->context.pte_frag;
324 pte_frag = ret + PTE_FRAG_SIZE;
326 * If we have taken up all the fragments mark PTE page NULL
328 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
330 mm->context.pte_frag = pte_frag;
332 spin_unlock(&mm->page_table_lock);
336 static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
342 page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
345 if (!pgtable_page_ctor(page)) {
350 page = alloc_page(PGALLOC_GFP);
355 ret = page_address(page);
356 spin_lock(&mm->page_table_lock);
358 * If we find pgtable_page set, we return
359 * the allocated page with single fragement
362 if (likely(!mm->context.pte_frag)) {
363 set_page_count(page, PTE_FRAG_NR);
364 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
366 spin_unlock(&mm->page_table_lock);
371 pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
375 pte = get_from_cache(mm);
379 return __alloc_for_cache(mm, kernel);
381 #endif /* CONFIG_PPC_64K_PAGES */
383 void pte_fragment_free(unsigned long *table, int kernel)
385 struct page *page = virt_to_page(table);
386 if (put_page_testzero(page)) {
388 pgtable_page_dtor(page);
389 free_unref_page(page);
394 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
396 unsigned long pgf = (unsigned long)table;
398 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
400 tlb_remove_table(tlb, (void *)pgf);
403 void __tlb_remove_table(void *_table)
405 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
406 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
409 /* PTE page needs special handling */
410 pte_fragment_free(table, 0);
412 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
413 kmem_cache_free(PGT_CACHE(shift), table);
417 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
420 /* PTE page needs special handling */
421 pte_fragment_free(table, 0);
423 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
424 kmem_cache_free(PGT_CACHE(shift), table);
429 #ifdef CONFIG_STRICT_KERNEL_RWX
430 void mark_rodata_ro(void)
432 if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
433 pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
438 radix__mark_rodata_ro();
440 hash__mark_rodata_ro();
443 void mark_initmem_nx(void)
446 radix__mark_initmem_nx();
448 hash__mark_initmem_nx();