Commit | Line | Data |
---|---|---|
15902bf6 MS |
1 | /* |
2 | * This file contains the routines setting up the linux page tables. | |
3 | * | |
4 | * Copyright (C) 2008 Michal Simek | |
5 | * Copyright (C) 2008 PetaLogix | |
6 | * | |
7 | * Copyright (C) 2007 Xilinx, Inc. All rights reserved. | |
8 | * | |
9 | * Derived from arch/ppc/mm/pgtable.c: | |
10 | * -- paulus | |
11 | * | |
12 | * Derived from arch/ppc/mm/init.c: | |
13 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
14 | * | |
15 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | |
16 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
17 | * Copyright (C) 1996 Paul Mackerras | |
18 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | |
19 | * | |
20 | * Derived from "arch/i386/mm/init.c" | |
21 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
22 | * | |
23 | * This file is subject to the terms and conditions of the GNU General | |
24 | * Public License. See the file COPYING in the main directory of this | |
25 | * archive for more details. | |
26 | * | |
27 | */ | |
28 | ||
d64af918 | 29 | #include <linux/export.h> |
15902bf6 | 30 | #include <linux/kernel.h> |
15902bf6 MS |
31 | #include <linux/types.h> |
32 | #include <linux/vmalloc.h> | |
33 | #include <linux/init.h> | |
34 | ||
35 | #include <asm/pgtable.h> | |
36 | #include <asm/pgalloc.h> | |
37 | #include <linux/io.h> | |
38 | #include <asm/mmu.h> | |
39 | #include <asm/sections.h> | |
41938761 | 40 | #include <asm/fixmap.h> |
15902bf6 | 41 | |
15902bf6 MS |
42 | unsigned long ioremap_base; |
43 | unsigned long ioremap_bot; | |
ee4bcdf1 | 44 | EXPORT_SYMBOL(ioremap_bot); |
15902bf6 | 45 | |
15902bf6 MS |
46 | #ifndef CONFIG_SMP |
47 | struct pgtable_cache_struct quicklists; | |
48 | #endif | |
49 | ||
50 | static void __iomem *__ioremap(phys_addr_t addr, unsigned long size, | |
51 | unsigned long flags) | |
52 | { | |
53 | unsigned long v, i; | |
54 | phys_addr_t p; | |
55 | int err; | |
56 | ||
57 | /* | |
58 | * Choose an address to map it to. | |
59 | * Once the vmalloc system is running, we use it. | |
60 | * Before then, we use space going down from ioremap_base | |
61 | * (ioremap_bot records where we're up to). | |
62 | */ | |
63 | p = addr & PAGE_MASK; | |
64 | size = PAGE_ALIGN(addr + size) - p; | |
65 | ||
66 | /* | |
67 | * Don't allow anybody to remap normal RAM that we're using. | |
68 | * mem_init() sets high_memory so only do the check after that. | |
69 | * | |
70 | * However, allow remap of rootfs: TBD | |
71 | */ | |
a66a6265 | 72 | |
15902bf6 MS |
73 | if (mem_init_done && |
74 | p >= memory_start && p < virt_to_phys(high_memory) && | |
a66a6265 MS |
75 | !(p >= __virt_to_phys((phys_addr_t)__bss_stop) && |
76 | p < __virt_to_phys((phys_addr_t)__bss_stop))) { | |
6bd55f0b MS |
77 | pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n", |
78 | (unsigned long)p, __builtin_return_address(0)); | |
15902bf6 MS |
79 | return NULL; |
80 | } | |
81 | ||
82 | if (size == 0) | |
83 | return NULL; | |
84 | ||
85 | /* | |
86 | * Is it already mapped? If the whole area is mapped then we're | |
87 | * done, otherwise remap it since we want to keep the virt addrs for | |
88 | * each request contiguous. | |
89 | * | |
90 | * We make the assumption here that if the bottom and top | |
91 | * of the range we want are mapped then it's mapped to the | |
92 | * same virt address (and this is contiguous). | |
93 | * -- Cort | |
94 | */ | |
95 | ||
96 | if (mem_init_done) { | |
97 | struct vm_struct *area; | |
98 | area = get_vm_area(size, VM_IOREMAP); | |
99 | if (area == NULL) | |
100 | return NULL; | |
cca5613f | 101 | v = (unsigned long) area->addr; |
15902bf6 MS |
102 | } else { |
103 | v = (ioremap_bot -= size); | |
104 | } | |
105 | ||
106 | if ((flags & _PAGE_PRESENT) == 0) | |
107 | flags |= _PAGE_KERNEL; | |
108 | if (flags & _PAGE_NO_CACHE) | |
109 | flags |= _PAGE_GUARDED; | |
110 | ||
111 | err = 0; | |
112 | for (i = 0; i < size && err == 0; i += PAGE_SIZE) | |
113 | err = map_page(v + i, p + i, flags); | |
114 | if (err) { | |
115 | if (mem_init_done) | |
116 | vfree((void *)v); | |
117 | return NULL; | |
118 | } | |
119 | ||
120 | return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK)); | |
121 | } | |
122 | ||
123 | void __iomem *ioremap(phys_addr_t addr, unsigned long size) | |
124 | { | |
125 | return __ioremap(addr, size, _PAGE_NO_CACHE); | |
126 | } | |
127 | EXPORT_SYMBOL(ioremap); | |
128 | ||
6bd55f0b | 129 | void iounmap(void __iomem *addr) |
15902bf6 | 130 | { |
6bd55f0b MS |
131 | if ((__force void *)addr > high_memory && |
132 | (unsigned long) addr < ioremap_bot) | |
15902bf6 MS |
133 | vfree((void *) (PAGE_MASK & (unsigned long) addr)); |
134 | } | |
135 | EXPORT_SYMBOL(iounmap); | |
136 | ||
137 | ||
138 | int map_page(unsigned long va, phys_addr_t pa, int flags) | |
139 | { | |
140 | pmd_t *pd; | |
141 | pte_t *pg; | |
142 | int err = -ENOMEM; | |
15902bf6 MS |
143 | /* Use upper 10 bits of VA to index the first level map */ |
144 | pd = pmd_offset(pgd_offset_k(va), va); | |
145 | /* Use middle 10 bits of VA to index the second-level map */ | |
146 | pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */ | |
147 | /* pg = pte_alloc_kernel(&init_mm, pd, va); */ | |
148 | ||
149 | if (pg != NULL) { | |
150 | err = 0; | |
151 | set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, | |
152 | __pgprot(flags))); | |
78ebfa88 | 153 | if (unlikely(mem_init_done)) |
6bd55f0b | 154 | _tlbie(va); |
15902bf6 | 155 | } |
15902bf6 MS |
156 | return err; |
157 | } | |
158 | ||
15902bf6 MS |
159 | /* |
160 | * Map in all of physical memory starting at CONFIG_KERNEL_START. | |
161 | */ | |
162 | void __init mapin_ram(void) | |
163 | { | |
164 | unsigned long v, p, s, f; | |
165 | ||
166 | v = CONFIG_KERNEL_START; | |
167 | p = memory_start; | |
7c0d2615 | 168 | for (s = 0; s < lowmem_size; s += PAGE_SIZE) { |
15902bf6 MS |
169 | f = _PAGE_PRESENT | _PAGE_ACCESSED | |
170 | _PAGE_SHARED | _PAGE_HWEXEC; | |
171 | if ((char *) v < _stext || (char *) v >= _etext) | |
172 | f |= _PAGE_WRENABLE; | |
173 | else | |
174 | /* On the MicroBlaze, no user access | |
175 | forces R/W kernel access */ | |
176 | f |= _PAGE_USER; | |
177 | map_page(v, p, f); | |
178 | v += PAGE_SIZE; | |
179 | p += PAGE_SIZE; | |
180 | } | |
181 | } | |
182 | ||
183 | /* is x a power of 2? */ | |
184 | #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) | |
185 | ||
15902bf6 MS |
186 | /* Scan the real Linux page tables and return a PTE pointer for |
187 | * a virtual address in a context. | |
188 | * Returns true (1) if PTE was found, zero otherwise. The pointer to | |
189 | * the PTE pointer is unmodified if PTE is not found. | |
190 | */ | |
191 | static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) | |
192 | { | |
193 | pgd_t *pgd; | |
194 | pmd_t *pmd; | |
195 | pte_t *pte; | |
196 | int retval = 0; | |
197 | ||
198 | pgd = pgd_offset(mm, addr & PAGE_MASK); | |
199 | if (pgd) { | |
200 | pmd = pmd_offset(pgd, addr & PAGE_MASK); | |
201 | if (pmd_present(*pmd)) { | |
202 | pte = pte_offset_kernel(pmd, addr & PAGE_MASK); | |
203 | if (pte) { | |
204 | retval = 1; | |
205 | *ptep = pte; | |
206 | } | |
207 | } | |
208 | } | |
209 | return retval; | |
210 | } | |
211 | ||
212 | /* Find physical address for this virtual address. Normally used by | |
213 | * I/O functions, but anyone can call it. | |
214 | */ | |
215 | unsigned long iopa(unsigned long addr) | |
216 | { | |
217 | unsigned long pa; | |
218 | ||
219 | pte_t *pte; | |
220 | struct mm_struct *mm; | |
221 | ||
222 | /* Allow mapping of user addresses (within the thread) | |
223 | * for DMA if necessary. | |
224 | */ | |
225 | if (addr < TASK_SIZE) | |
226 | mm = current->mm; | |
227 | else | |
228 | mm = &init_mm; | |
229 | ||
230 | pa = 0; | |
231 | if (get_pteptr(mm, addr, &pte)) | |
232 | pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); | |
233 | ||
234 | return pa; | |
235 | } | |
63f1032b MS |
236 | |
237 | __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | |
238 | unsigned long address) | |
239 | { | |
240 | pte_t *pte; | |
241 | if (mem_init_done) { | |
242 | pte = (pte_t *)__get_free_page(GFP_KERNEL | | |
243 | __GFP_REPEAT | __GFP_ZERO); | |
244 | } else { | |
245 | pte = (pte_t *)early_get_page(); | |
246 | if (pte) | |
247 | clear_page(pte); | |
248 | } | |
249 | return pte; | |
250 | } | |
41938761 MS |
251 | |
252 | void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) | |
253 | { | |
254 | unsigned long address = __fix_to_virt(idx); | |
255 | ||
256 | if (idx >= __end_of_fixed_addresses) | |
257 | BUG(); | |
258 | ||
259 | map_page(address, phys, pgprot_val(flags)); | |
260 | } |