Commit | Line | Data |
---|---|---|
14cf11af PM |
1 | /* |
2 | * This file contains the routines setting up the linux page tables. | |
3 | * -- paulus | |
4 | * | |
5 | * Derived from arch/ppc/mm/init.c: | |
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
7 | * | |
8 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | |
9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
10 | * Copyright (C) 1996 Paul Mackerras | |
14cf11af PM |
11 | * |
12 | * Derived from "arch/i386/mm/init.c" | |
13 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU General Public License | |
17 | * as published by the Free Software Foundation; either version | |
18 | * 2 of the License, or (at your option) any later version. | |
19 | * | |
20 | */ | |
21 | ||
14cf11af PM |
22 | #include <linux/kernel.h> |
23 | #include <linux/module.h> | |
24 | #include <linux/types.h> | |
25 | #include <linux/mm.h> | |
26 | #include <linux/vmalloc.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/highmem.h> | |
c5df7f77 | 29 | #include <linux/lmb.h> |
14cf11af PM |
30 | |
31 | #include <asm/pgtable.h> | |
32 | #include <asm/pgalloc.h> | |
2c419bde | 33 | #include <asm/fixmap.h> |
14cf11af PM |
34 | #include <asm/io.h> |
35 | ||
36 | #include "mmu_decl.h" | |
37 | ||
38 | unsigned long ioremap_base; | |
39 | unsigned long ioremap_bot; | |
920573bd | 40 | EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ |
14cf11af PM |
41 | |
42 | #if defined(CONFIG_6xx) || defined(CONFIG_POWER3) | |
43 | #define HAVE_BATS 1 | |
44 | #endif | |
45 | ||
46 | #if defined(CONFIG_FSL_BOOKE) | |
47 | #define HAVE_TLBCAM 1 | |
48 | #endif | |
49 | ||
50 | extern char etext[], _stext[]; | |
51 | ||
14cf11af | 52 | #ifdef HAVE_BATS |
7c5c4325 BB |
53 | extern phys_addr_t v_mapped_by_bats(unsigned long va); |
54 | extern unsigned long p_mapped_by_bats(phys_addr_t pa); | |
55 | void setbat(int index, unsigned long virt, phys_addr_t phys, | |
14cf11af PM |
56 | unsigned int size, int flags); |
57 | ||
58 | #else /* !HAVE_BATS */ | |
59 | #define v_mapped_by_bats(x) (0UL) | |
60 | #define p_mapped_by_bats(x) (0UL) | |
61 | #endif /* HAVE_BATS */ | |
62 | ||
63 | #ifdef HAVE_TLBCAM | |
64 | extern unsigned int tlbcam_index; | |
6c24b174 KG |
65 | extern phys_addr_t v_mapped_by_tlbcam(unsigned long va); |
66 | extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa); | |
14cf11af PM |
67 | #else /* !HAVE_TLBCAM */ |
68 | #define v_mapped_by_tlbcam(x) (0UL) | |
69 | #define p_mapped_by_tlbcam(x) (0UL) | |
70 | #endif /* HAVE_TLBCAM */ | |
71 | ||
ca9153a3 | 72 | #define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT) |
14cf11af PM |
73 | |
74 | pgd_t *pgd_alloc(struct mm_struct *mm) | |
75 | { | |
76 | pgd_t *ret; | |
77 | ||
ca9153a3 IY |
78 | /* pgdir take page or two with 4K pages and a page fraction otherwise */ |
79 | #ifndef CONFIG_PPC_4K_PAGES | |
80 | ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL); | |
81 | #else | |
82 | ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, | |
83 | PGDIR_ORDER - PAGE_SHIFT); | |
84 | #endif | |
14cf11af PM |
85 | return ret; |
86 | } | |
87 | ||
5e541973 | 88 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
14cf11af | 89 | { |
ca9153a3 IY |
90 | #ifndef CONFIG_PPC_4K_PAGES |
91 | kfree((void *)pgd); | |
92 | #else | |
93 | free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT); | |
94 | #endif | |
14cf11af PM |
95 | } |
96 | ||
f1aed924 | 97 | __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
14cf11af PM |
98 | { |
99 | pte_t *pte; | |
100 | extern int mem_init_done; | |
101 | extern void *early_get_page(void); | |
102 | ||
103 | if (mem_init_done) { | |
104 | pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | |
105 | } else { | |
106 | pte = (pte_t *)early_get_page(); | |
107 | if (pte) | |
108 | clear_page(pte); | |
109 | } | |
110 | return pte; | |
111 | } | |
112 | ||
2f569afd | 113 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) |
14cf11af PM |
114 | { |
115 | struct page *ptepage; | |
116 | ||
117 | #ifdef CONFIG_HIGHPTE | |
2f569afd | 118 | gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT | __GFP_ZERO; |
14cf11af | 119 | #else |
2f569afd | 120 | gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO; |
14cf11af PM |
121 | #endif |
122 | ||
123 | ptepage = alloc_pages(flags, 0); | |
2f569afd MS |
124 | if (!ptepage) |
125 | return NULL; | |
126 | pgtable_page_ctor(ptepage); | |
14cf11af PM |
127 | return ptepage; |
128 | } | |
129 | ||
14cf11af PM |
130 | void __iomem * |
131 | ioremap(phys_addr_t addr, unsigned long size) | |
132 | { | |
1cdab55d BH |
133 | return __ioremap_caller(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED, |
134 | __builtin_return_address(0)); | |
14cf11af | 135 | } |
920573bd | 136 | EXPORT_SYMBOL(ioremap); |
14cf11af | 137 | |
68a64357 BH |
138 | void __iomem * |
139 | ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) | |
140 | { | |
a1f242ff BH |
141 | /* writeable implies dirty for kernel addresses */ |
142 | if (flags & _PAGE_RW) | |
143 | flags |= _PAGE_DIRTY | _PAGE_HWWRITE; | |
144 | ||
145 | /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ | |
ea3cc330 | 146 | flags &= ~(_PAGE_USER | _PAGE_EXEC); |
a1f242ff | 147 | |
1cdab55d | 148 | return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); |
68a64357 BH |
149 | } |
150 | EXPORT_SYMBOL(ioremap_flags); | |
151 | ||
14cf11af PM |
152 | void __iomem * |
153 | __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) | |
1cdab55d BH |
154 | { |
155 | return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); | |
156 | } | |
157 | ||
158 | void __iomem * | |
159 | __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, | |
160 | void *caller) | |
14cf11af PM |
161 | { |
162 | unsigned long v, i; | |
163 | phys_addr_t p; | |
164 | int err; | |
165 | ||
a1f242ff BH |
166 | /* Make sure we have the base flags */ |
167 | if ((flags & _PAGE_PRESENT) == 0) | |
8d1cf34e | 168 | flags |= PAGE_KERNEL; |
a1f242ff BH |
169 | |
170 | /* Non-cacheable page cannot be coherent */ | |
171 | if (flags & _PAGE_NO_CACHE) | |
172 | flags &= ~_PAGE_COHERENT; | |
173 | ||
14cf11af PM |
174 | /* |
175 | * Choose an address to map it to. | |
176 | * Once the vmalloc system is running, we use it. | |
177 | * Before then, we use space going down from ioremap_base | |
178 | * (ioremap_bot records where we're up to). | |
179 | */ | |
180 | p = addr & PAGE_MASK; | |
181 | size = PAGE_ALIGN(addr + size) - p; | |
182 | ||
183 | /* | |
184 | * If the address lies within the first 16 MB, assume it's in ISA | |
185 | * memory space | |
186 | */ | |
187 | if (p < 16*1024*1024) | |
188 | p += _ISA_MEM_BASE; | |
189 | ||
01695a96 | 190 | #ifndef CONFIG_CRASH_DUMP |
14cf11af PM |
191 | /* |
192 | * Don't allow anybody to remap normal RAM that we're using. | |
193 | * mem_init() sets high_memory so only do the check after that. | |
194 | */ | |
c5df7f77 AH |
195 | if (mem_init_done && (p < virt_to_phys(high_memory)) && |
196 | !(__allow_ioremap_reserved && lmb_is_region_reserved(p, size))) { | |
37f01d64 DG |
197 | printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n", |
198 | (unsigned long long)p, __builtin_return_address(0)); | |
14cf11af PM |
199 | return NULL; |
200 | } | |
01695a96 | 201 | #endif |
14cf11af PM |
202 | |
203 | if (size == 0) | |
204 | return NULL; | |
205 | ||
206 | /* | |
207 | * Is it already mapped? Perhaps overlapped by a previous | |
208 | * BAT mapping. If the whole area is mapped then we're done, | |
209 | * otherwise remap it since we want to keep the virt addrs for | |
210 | * each request contiguous. | |
211 | * | |
212 | * We make the assumption here that if the bottom and top | |
213 | * of the range we want are mapped then it's mapped to the | |
214 | * same virt address (and this is contiguous). | |
215 | * -- Cort | |
216 | */ | |
217 | if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ ) | |
218 | goto out; | |
219 | ||
220 | if ((v = p_mapped_by_tlbcam(p))) | |
221 | goto out; | |
222 | ||
223 | if (mem_init_done) { | |
224 | struct vm_struct *area; | |
1cdab55d | 225 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
14cf11af PM |
226 | if (area == 0) |
227 | return NULL; | |
228 | v = (unsigned long) area->addr; | |
229 | } else { | |
230 | v = (ioremap_bot -= size); | |
231 | } | |
232 | ||
14cf11af PM |
233 | /* |
234 | * Should check if it is a candidate for a BAT mapping | |
235 | */ | |
236 | ||
237 | err = 0; | |
238 | for (i = 0; i < size && err == 0; i += PAGE_SIZE) | |
239 | err = map_page(v+i, p+i, flags); | |
240 | if (err) { | |
241 | if (mem_init_done) | |
242 | vunmap((void *)v); | |
243 | return NULL; | |
244 | } | |
245 | ||
246 | out: | |
247 | return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK)); | |
248 | } | |
920573bd | 249 | EXPORT_SYMBOL(__ioremap); |
14cf11af PM |
250 | |
251 | void iounmap(volatile void __iomem *addr) | |
252 | { | |
253 | /* | |
254 | * If mapped by BATs then there is nothing to do. | |
255 | * Calling vfree() generates a benign warning. | |
256 | */ | |
257 | if (v_mapped_by_bats((unsigned long)addr)) return; | |
258 | ||
259 | if (addr > high_memory && (unsigned long) addr < ioremap_bot) | |
260 | vunmap((void *) (PAGE_MASK & (unsigned long)addr)); | |
261 | } | |
920573bd | 262 | EXPORT_SYMBOL(iounmap); |
14cf11af | 263 | |
68a64357 | 264 | int map_page(unsigned long va, phys_addr_t pa, int flags) |
14cf11af PM |
265 | { |
266 | pmd_t *pd; | |
267 | pte_t *pg; | |
268 | int err = -ENOMEM; | |
269 | ||
14cf11af | 270 | /* Use upper 10 bits of VA to index the first level map */ |
d1953c88 | 271 | pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va); |
14cf11af | 272 | /* Use middle 10 bits of VA to index the second-level map */ |
e2f2e58e | 273 | pg = pte_alloc_kernel(pd, va); |
14cf11af PM |
274 | if (pg != 0) { |
275 | err = 0; | |
3be4e699 BH |
276 | /* The PTE should never be already set nor present in the |
277 | * hash table | |
278 | */ | |
7021d86a AV |
279 | BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) && |
280 | flags); | |
3be4e699 BH |
281 | set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, |
282 | __pgprot(flags))); | |
14cf11af | 283 | } |
14cf11af PM |
284 | return err; |
285 | } | |
286 | ||
287 | /* | |
de32400d | 288 | * Map in a chunk of physical memory starting at start. |
14cf11af | 289 | */ |
de32400d | 290 | void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) |
14cf11af | 291 | { |
99c62dd7 KG |
292 | unsigned long v, s, f; |
293 | phys_addr_t p; | |
ee4f2ea4 | 294 | int ktext; |
14cf11af | 295 | |
de32400d | 296 | s = offset; |
ccdcef72 | 297 | v = PAGE_OFFSET + s; |
99c62dd7 | 298 | p = memstart_addr + s; |
de32400d | 299 | for (; s < top; s += PAGE_SIZE) { |
ee4f2ea4 | 300 | ktext = ((char *) v >= _stext && (char *) v < etext); |
8d1cf34e | 301 | f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL; |
14cf11af | 302 | map_page(v, p, f); |
ee4f2ea4 BH |
303 | #ifdef CONFIG_PPC_STD_MMU_32 |
304 | if (ktext) | |
305 | hash_preload(&init_mm, v, 0, 0x300); | |
306 | #endif | |
14cf11af PM |
307 | v += PAGE_SIZE; |
308 | p += PAGE_SIZE; | |
309 | } | |
310 | } | |
311 | ||
de32400d AH |
312 | void __init mapin_ram(void) |
313 | { | |
314 | unsigned long s, top; | |
315 | ||
316 | #ifndef CONFIG_WII | |
317 | top = total_lowmem; | |
318 | s = mmu_mapin_ram(top); | |
319 | __mapin_ram_chunk(s, top); | |
320 | #else | |
321 | if (!wii_hole_size) { | |
322 | s = mmu_mapin_ram(total_lowmem); | |
323 | __mapin_ram_chunk(s, total_lowmem); | |
324 | } else { | |
325 | top = wii_hole_start; | |
326 | s = mmu_mapin_ram(top); | |
327 | __mapin_ram_chunk(s, top); | |
328 | ||
329 | top = lmb_end_of_DRAM(); | |
330 | s = wii_mmu_mapin_mem2(top); | |
331 | __mapin_ram_chunk(s, top); | |
332 | } | |
333 | #endif | |
334 | } | |
335 | ||
14cf11af PM |
336 | /* Scan the real Linux page tables and return a PTE pointer for |
337 | * a virtual address in a context. | |
338 | * Returns true (1) if PTE was found, zero otherwise. The pointer to | |
339 | * the PTE pointer is unmodified if PTE is not found. | |
340 | */ | |
341 | int | |
bab70a4a | 342 | get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) |
14cf11af PM |
343 | { |
344 | pgd_t *pgd; | |
d1953c88 | 345 | pud_t *pud; |
14cf11af PM |
346 | pmd_t *pmd; |
347 | pte_t *pte; | |
348 | int retval = 0; | |
349 | ||
350 | pgd = pgd_offset(mm, addr & PAGE_MASK); | |
351 | if (pgd) { | |
d1953c88 DG |
352 | pud = pud_offset(pgd, addr & PAGE_MASK); |
353 | if (pud && pud_present(*pud)) { | |
354 | pmd = pmd_offset(pud, addr & PAGE_MASK); | |
355 | if (pmd_present(*pmd)) { | |
356 | pte = pte_offset_map(pmd, addr & PAGE_MASK); | |
357 | if (pte) { | |
358 | retval = 1; | |
359 | *ptep = pte; | |
360 | if (pmdp) | |
361 | *pmdp = pmd; | |
362 | /* XXX caller needs to do pte_unmap, yuck */ | |
363 | } | |
364 | } | |
365 | } | |
14cf11af PM |
366 | } |
367 | return(retval); | |
368 | } | |
369 | ||
88df6e90 BH |
370 | #ifdef CONFIG_DEBUG_PAGEALLOC |
371 | ||
372 | static int __change_page_attr(struct page *page, pgprot_t prot) | |
373 | { | |
374 | pte_t *kpte; | |
375 | pmd_t *kpmd; | |
376 | unsigned long address; | |
377 | ||
378 | BUG_ON(PageHighMem(page)); | |
379 | address = (unsigned long)page_address(page); | |
380 | ||
381 | if (v_mapped_by_bats(address) || v_mapped_by_tlbcam(address)) | |
382 | return 0; | |
383 | if (!get_pteptr(&init_mm, address, &kpte, &kpmd)) | |
384 | return -EINVAL; | |
50891457 | 385 | __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0); |
88df6e90 | 386 | wmb(); |
f63837f0 BH |
387 | #ifdef CONFIG_PPC_STD_MMU |
388 | flush_hash_pages(0, address, pmd_val(*kpmd), 1); | |
389 | #else | |
390 | flush_tlb_page(NULL, address); | |
391 | #endif | |
88df6e90 BH |
392 | pte_unmap(kpte); |
393 | ||
394 | return 0; | |
395 | } | |
396 | ||
397 | /* | |
398 | * Change the page attributes of an page in the linear mapping. | |
399 | * | |
400 | * THIS CONFLICTS WITH BAT MAPPINGS, DEBUG USE ONLY | |
401 | */ | |
402 | static int change_page_attr(struct page *page, int numpages, pgprot_t prot) | |
403 | { | |
404 | int i, err = 0; | |
405 | unsigned long flags; | |
406 | ||
407 | local_irq_save(flags); | |
408 | for (i = 0; i < numpages; i++, page++) { | |
409 | err = __change_page_attr(page, prot); | |
410 | if (err) | |
411 | break; | |
412 | } | |
413 | local_irq_restore(flags); | |
414 | return err; | |
415 | } | |
416 | ||
417 | ||
418 | void kernel_map_pages(struct page *page, int numpages, int enable) | |
419 | { | |
420 | if (PageHighMem(page)) | |
421 | return; | |
422 | ||
423 | change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); | |
424 | } | |
425 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | |
2c419bde KG |
426 | |
427 | static int fixmaps; | |
2c419bde KG |
428 | |
429 | void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) | |
430 | { | |
431 | unsigned long address = __fix_to_virt(idx); | |
432 | ||
433 | if (idx >= __end_of_fixed_addresses) { | |
434 | BUG(); | |
435 | return; | |
436 | } | |
437 | ||
46a74179 | 438 | map_page(address, phys, pgprot_val(flags)); |
2c419bde KG |
439 | fixmaps++; |
440 | } | |
441 | ||
442 | void __this_fixmap_does_not_exist(void) | |
443 | { | |
444 | WARN_ON(1); | |
445 | } |