Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/mm-armv.c | |
3 | * | |
90072059 | 4 | * Copyright (C) 1998-2005 Russell King |
1da177e4 LT |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * Page table sludge for ARM v3 and v4 processor architectures. | |
11 | */ | |
12 | #include <linux/config.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/mm.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/bootmem.h> | |
17 | #include <linux/highmem.h> | |
18 | #include <linux/nodemask.h> | |
19 | ||
20 | #include <asm/pgalloc.h> | |
21 | #include <asm/page.h> | |
22 | #include <asm/io.h> | |
23 | #include <asm/setup.h> | |
24 | #include <asm/tlbflush.h> | |
25 | ||
26 | #include <asm/mach/map.h> | |
27 | ||
28 | #define CPOLICY_UNCACHED 0 | |
29 | #define CPOLICY_BUFFERED 1 | |
30 | #define CPOLICY_WRITETHROUGH 2 | |
31 | #define CPOLICY_WRITEBACK 3 | |
32 | #define CPOLICY_WRITEALLOC 4 | |
33 | ||
34 | static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; | |
35 | static unsigned int ecc_mask __initdata = 0; | |
36 | pgprot_t pgprot_kernel; | |
37 | ||
38 | EXPORT_SYMBOL(pgprot_kernel); | |
39 | ||
c4e1f6f6 RK |
40 | pmd_t *top_pmd; |
41 | ||
1da177e4 LT |
42 | struct cachepolicy { |
43 | const char policy[16]; | |
44 | unsigned int cr_mask; | |
45 | unsigned int pmd; | |
46 | unsigned int pte; | |
47 | }; | |
48 | ||
49 | static struct cachepolicy cache_policies[] __initdata = { | |
50 | { | |
51 | .policy = "uncached", | |
52 | .cr_mask = CR_W|CR_C, | |
53 | .pmd = PMD_SECT_UNCACHED, | |
54 | .pte = 0, | |
55 | }, { | |
56 | .policy = "buffered", | |
57 | .cr_mask = CR_C, | |
58 | .pmd = PMD_SECT_BUFFERED, | |
59 | .pte = PTE_BUFFERABLE, | |
60 | }, { | |
61 | .policy = "writethrough", | |
62 | .cr_mask = 0, | |
63 | .pmd = PMD_SECT_WT, | |
64 | .pte = PTE_CACHEABLE, | |
65 | }, { | |
66 | .policy = "writeback", | |
67 | .cr_mask = 0, | |
68 | .pmd = PMD_SECT_WB, | |
69 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | |
70 | }, { | |
71 | .policy = "writealloc", | |
72 | .cr_mask = 0, | |
73 | .pmd = PMD_SECT_WBWA, | |
74 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | |
75 | } | |
76 | }; | |
77 | ||
78 | /* | |
79 | * These are useful for identifing cache coherency | |
80 | * problems by allowing the cache or the cache and | |
81 | * writebuffer to be turned off. (Note: the write | |
82 | * buffer should not be on and the cache off). | |
83 | */ | |
84 | static void __init early_cachepolicy(char **p) | |
85 | { | |
86 | int i; | |
87 | ||
88 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | |
89 | int len = strlen(cache_policies[i].policy); | |
90 | ||
91 | if (memcmp(*p, cache_policies[i].policy, len) == 0) { | |
92 | cachepolicy = i; | |
93 | cr_alignment &= ~cache_policies[i].cr_mask; | |
94 | cr_no_alignment &= ~cache_policies[i].cr_mask; | |
95 | *p += len; | |
96 | break; | |
97 | } | |
98 | } | |
99 | if (i == ARRAY_SIZE(cache_policies)) | |
100 | printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); | |
101 | flush_cache_all(); | |
102 | set_cr(cr_alignment); | |
103 | } | |
104 | ||
105 | static void __init early_nocache(char **__unused) | |
106 | { | |
107 | char *p = "buffered"; | |
108 | printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); | |
109 | early_cachepolicy(&p); | |
110 | } | |
111 | ||
112 | static void __init early_nowrite(char **__unused) | |
113 | { | |
114 | char *p = "uncached"; | |
115 | printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); | |
116 | early_cachepolicy(&p); | |
117 | } | |
118 | ||
119 | static void __init early_ecc(char **p) | |
120 | { | |
121 | if (memcmp(*p, "on", 2) == 0) { | |
122 | ecc_mask = PMD_PROTECTION; | |
123 | *p += 2; | |
124 | } else if (memcmp(*p, "off", 3) == 0) { | |
125 | ecc_mask = 0; | |
126 | *p += 3; | |
127 | } | |
128 | } | |
129 | ||
130 | __early_param("nocache", early_nocache); | |
131 | __early_param("nowb", early_nowrite); | |
132 | __early_param("cachepolicy=", early_cachepolicy); | |
133 | __early_param("ecc=", early_ecc); | |
134 | ||
135 | static int __init noalign_setup(char *__unused) | |
136 | { | |
137 | cr_alignment &= ~CR_A; | |
138 | cr_no_alignment &= ~CR_A; | |
139 | set_cr(cr_alignment); | |
140 | return 1; | |
141 | } | |
142 | ||
143 | __setup("noalign", noalign_setup); | |
144 | ||
145 | #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) | |
146 | ||
155bb144 RK |
147 | static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) |
148 | { | |
149 | return pmd_offset(pgd, virt); | |
150 | } | |
151 | ||
152 | static inline pmd_t *pmd_off_k(unsigned long virt) | |
153 | { | |
154 | return pmd_off(pgd_offset_k(virt), virt); | |
155 | } | |
156 | ||
1da177e4 LT |
157 | /* |
158 | * need to get a 16k page for level 1 | |
159 | */ | |
160 | pgd_t *get_pgd_slow(struct mm_struct *mm) | |
161 | { | |
162 | pgd_t *new_pgd, *init_pgd; | |
163 | pmd_t *new_pmd, *init_pmd; | |
164 | pte_t *new_pte, *init_pte; | |
165 | ||
166 | new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); | |
167 | if (!new_pgd) | |
168 | goto no_pgd; | |
169 | ||
170 | memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); | |
171 | ||
a343e607 RK |
172 | /* |
173 | * Copy over the kernel and IO PGD entries | |
174 | */ | |
1da177e4 | 175 | init_pgd = pgd_offset_k(0); |
a343e607 RK |
176 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, |
177 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); | |
178 | ||
179 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | |
1da177e4 LT |
180 | |
181 | if (!vectors_high()) { | |
1da177e4 LT |
182 | /* |
183 | * On ARM, first page must always be allocated since it | |
184 | * contains the machine vectors. | |
185 | */ | |
186 | new_pmd = pmd_alloc(mm, new_pgd, 0); | |
187 | if (!new_pmd) | |
188 | goto no_pmd; | |
189 | ||
190 | new_pte = pte_alloc_map(mm, new_pmd, 0); | |
191 | if (!new_pte) | |
192 | goto no_pte; | |
193 | ||
194 | init_pmd = pmd_offset(init_pgd, 0); | |
195 | init_pte = pte_offset_map_nested(init_pmd, 0); | |
196 | set_pte(new_pte, *init_pte); | |
197 | pte_unmap_nested(init_pte); | |
198 | pte_unmap(new_pte); | |
1da177e4 LT |
199 | } |
200 | ||
1da177e4 LT |
201 | return new_pgd; |
202 | ||
203 | no_pte: | |
1da177e4 | 204 | pmd_free(new_pmd); |
1da177e4 | 205 | no_pmd: |
1da177e4 | 206 | free_pages((unsigned long)new_pgd, 2); |
1da177e4 LT |
207 | no_pgd: |
208 | return NULL; | |
209 | } | |
210 | ||
211 | void free_pgd_slow(pgd_t *pgd) | |
212 | { | |
213 | pmd_t *pmd; | |
214 | struct page *pte; | |
215 | ||
216 | if (!pgd) | |
217 | return; | |
218 | ||
219 | /* pgd is always present and good */ | |
155bb144 | 220 | pmd = pmd_off(pgd, 0); |
1da177e4 LT |
221 | if (pmd_none(*pmd)) |
222 | goto free; | |
223 | if (pmd_bad(*pmd)) { | |
224 | pmd_ERROR(*pmd); | |
225 | pmd_clear(pmd); | |
226 | goto free; | |
227 | } | |
228 | ||
229 | pte = pmd_page(*pmd); | |
230 | pmd_clear(pmd); | |
231 | dec_page_state(nr_page_table_pages); | |
4c21e2f2 | 232 | pte_lock_deinit(pte); |
1da177e4 LT |
233 | pte_free(pte); |
234 | pmd_free(pmd); | |
235 | free: | |
236 | free_pages((unsigned long) pgd, 2); | |
237 | } | |
238 | ||
239 | /* | |
240 | * Create a SECTION PGD between VIRT and PHYS in domain | |
241 | * DOMAIN with protection PROT. This operates on half- | |
242 | * pgdir entry increments. | |
243 | */ | |
244 | static inline void | |
245 | alloc_init_section(unsigned long virt, unsigned long phys, int prot) | |
246 | { | |
155bb144 | 247 | pmd_t *pmdp = pmd_off_k(virt); |
1da177e4 | 248 | |
1da177e4 LT |
249 | if (virt & (1 << 20)) |
250 | pmdp++; | |
251 | ||
252 | *pmdp = __pmd(phys | prot); | |
253 | flush_pmd_entry(pmdp); | |
254 | } | |
255 | ||
256 | /* | |
257 | * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT | |
258 | */ | |
259 | static inline void | |
260 | alloc_init_supersection(unsigned long virt, unsigned long phys, int prot) | |
261 | { | |
262 | int i; | |
263 | ||
264 | for (i = 0; i < 16; i += 1) { | |
083bc6b3 | 265 | alloc_init_section(virt, phys, prot | PMD_SECT_SUPER); |
1da177e4 LT |
266 | |
267 | virt += (PGDIR_SIZE / 2); | |
1da177e4 LT |
268 | } |
269 | } | |
270 | ||
271 | /* | |
272 | * Add a PAGE mapping between VIRT and PHYS in domain | |
273 | * DOMAIN with protection PROT. Note that due to the | |
274 | * way we map the PTEs, we must allocate two PTE_SIZE'd | |
275 | * blocks - one for the Linux pte table, and one for | |
276 | * the hardware pte table. | |
277 | */ | |
278 | static inline void | |
279 | alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot) | |
280 | { | |
155bb144 | 281 | pmd_t *pmdp = pmd_off_k(virt); |
1da177e4 LT |
282 | pte_t *ptep; |
283 | ||
1da177e4 | 284 | if (pmd_none(*pmdp)) { |
1da177e4 LT |
285 | ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * |
286 | sizeof(pte_t)); | |
287 | ||
08f4ffb3 | 288 | __pmd_populate(pmdp, __pa(ptep) | prot_l1); |
1da177e4 LT |
289 | } |
290 | ptep = pte_offset_kernel(pmdp, virt); | |
291 | ||
292 | set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); | |
293 | } | |
294 | ||
1da177e4 LT |
295 | struct mem_types { |
296 | unsigned int prot_pte; | |
297 | unsigned int prot_l1; | |
298 | unsigned int prot_sect; | |
299 | unsigned int domain; | |
300 | }; | |
301 | ||
302 | static struct mem_types mem_types[] __initdata = { | |
303 | [MT_DEVICE] = { | |
304 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | |
305 | L_PTE_WRITE, | |
306 | .prot_l1 = PMD_TYPE_TABLE, | |
307 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | | |
308 | PMD_SECT_AP_WRITE, | |
309 | .domain = DOMAIN_IO, | |
310 | }, | |
311 | [MT_CACHECLEAN] = { | |
312 | .prot_sect = PMD_TYPE_SECT, | |
313 | .domain = DOMAIN_KERNEL, | |
314 | }, | |
315 | [MT_MINICLEAN] = { | |
316 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE, | |
317 | .domain = DOMAIN_KERNEL, | |
318 | }, | |
319 | [MT_LOW_VECTORS] = { | |
320 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | |
321 | L_PTE_EXEC, | |
322 | .prot_l1 = PMD_TYPE_TABLE, | |
323 | .domain = DOMAIN_USER, | |
324 | }, | |
325 | [MT_HIGH_VECTORS] = { | |
326 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | |
327 | L_PTE_USER | L_PTE_EXEC, | |
328 | .prot_l1 = PMD_TYPE_TABLE, | |
329 | .domain = DOMAIN_USER, | |
330 | }, | |
331 | [MT_MEMORY] = { | |
332 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | |
333 | .domain = DOMAIN_KERNEL, | |
334 | }, | |
335 | [MT_ROM] = { | |
336 | .prot_sect = PMD_TYPE_SECT, | |
337 | .domain = DOMAIN_KERNEL, | |
338 | }, | |
339 | [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */ | |
340 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | |
341 | L_PTE_WRITE, | |
342 | .prot_l1 = PMD_TYPE_TABLE, | |
343 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | | |
344 | PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE | | |
345 | PMD_SECT_TEX(1), | |
346 | .domain = DOMAIN_IO, | |
347 | } | |
348 | }; | |
349 | ||
350 | /* | |
351 | * Adjust the PMD section entries according to the CPU in use. | |
352 | */ | |
90072059 | 353 | void __init build_mem_type_table(void) |
1da177e4 LT |
354 | { |
355 | struct cachepolicy *cp; | |
356 | unsigned int cr = get_cr(); | |
6626a707 | 357 | unsigned int user_pgprot; |
1da177e4 LT |
358 | int cpu_arch = cpu_architecture(); |
359 | int i; | |
360 | ||
361 | #if defined(CONFIG_CPU_DCACHE_DISABLE) | |
362 | if (cachepolicy > CPOLICY_BUFFERED) | |
363 | cachepolicy = CPOLICY_BUFFERED; | |
364 | #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) | |
365 | if (cachepolicy > CPOLICY_WRITETHROUGH) | |
366 | cachepolicy = CPOLICY_WRITETHROUGH; | |
367 | #endif | |
368 | if (cpu_arch < CPU_ARCH_ARMv5) { | |
369 | if (cachepolicy >= CPOLICY_WRITEALLOC) | |
370 | cachepolicy = CPOLICY_WRITEBACK; | |
371 | ecc_mask = 0; | |
372 | } | |
373 | ||
8107338b | 374 | if (cpu_arch <= CPU_ARCH_ARMv5TEJ) { |
1da177e4 LT |
375 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { |
376 | if (mem_types[i].prot_l1) | |
377 | mem_types[i].prot_l1 |= PMD_BIT4; | |
378 | if (mem_types[i].prot_sect) | |
379 | mem_types[i].prot_sect |= PMD_BIT4; | |
380 | } | |
381 | } | |
382 | ||
6626a707 RK |
383 | cp = &cache_policies[cachepolicy]; |
384 | user_pgprot = cp->pte; | |
385 | ||
1da177e4 LT |
386 | /* |
387 | * ARMv6 and above have extended page tables. | |
388 | */ | |
389 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { | |
390 | /* | |
391 | * bit 4 becomes XN which we must clear for the | |
392 | * kernel memory mapping. | |
393 | */ | |
394 | mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4; | |
395 | mem_types[MT_ROM].prot_sect &= ~PMD_BIT4; | |
396 | /* | |
ca315159 GD |
397 | * Mark cache clean areas and XIP ROM read only |
398 | * from SVC mode and no access from userspace. | |
1da177e4 | 399 | */ |
ca315159 | 400 | mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
1da177e4 LT |
401 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
402 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | |
186efd52 | 403 | |
6626a707 RK |
404 | /* |
405 | * Mark the device area as "shared device" | |
406 | */ | |
186efd52 RK |
407 | mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE; |
408 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; | |
1da177e4 | 409 | |
6626a707 RK |
410 | /* |
411 | * User pages need to be mapped with the ASID | |
412 | * (iow, non-global) | |
413 | */ | |
414 | user_pgprot |= L_PTE_ASID; | |
415 | } | |
1da177e4 LT |
416 | |
417 | if (cpu_arch >= CPU_ARCH_ARMv5) { | |
418 | mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; | |
419 | mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; | |
420 | } else { | |
421 | mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte; | |
422 | mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte; | |
423 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); | |
424 | } | |
425 | ||
426 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | |
427 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | |
428 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; | |
429 | mem_types[MT_ROM].prot_sect |= cp->pmd; | |
430 | ||
431 | for (i = 0; i < 16; i++) { | |
432 | unsigned long v = pgprot_val(protection_map[i]); | |
86a8a839 | 433 | v = (v & ~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot; |
1da177e4 LT |
434 | protection_map[i] = __pgprot(v); |
435 | } | |
436 | ||
437 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | |
438 | L_PTE_DIRTY | L_PTE_WRITE | | |
439 | L_PTE_EXEC | cp->pte); | |
440 | ||
441 | switch (cp->pmd) { | |
442 | case PMD_SECT_WT: | |
443 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; | |
444 | break; | |
445 | case PMD_SECT_WB: | |
446 | case PMD_SECT_WBWA: | |
447 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; | |
448 | break; | |
449 | } | |
450 | printk("Memory policy: ECC %sabled, Data cache %s\n", | |
451 | ecc_mask ? "en" : "dis", cp->policy); | |
452 | } | |
453 | ||
454 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) | |
455 | ||
456 | /* | |
457 | * Create the page directory entries and any necessary | |
458 | * page tables for the mapping specified by `md'. We | |
459 | * are able to cope here with varying sizes and address | |
460 | * offsets, and we take full advantage of sections and | |
461 | * supersections. | |
462 | */ | |
90072059 | 463 | void __init create_mapping(struct map_desc *md) |
1da177e4 LT |
464 | { |
465 | unsigned long virt, length; | |
466 | int prot_sect, prot_l1, domain; | |
467 | pgprot_t prot_pte; | |
0b7cd62e | 468 | unsigned long off = (u32)__pfn_to_phys(md->pfn); |
1da177e4 LT |
469 | |
470 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { | |
471 | printk(KERN_WARNING "BUG: not creating mapping for " | |
0b7cd62e DS |
472 | "0x%016llx at 0x%08lx in user region\n", |
473 | __pfn_to_phys((u64)md->pfn), md->virtual); | |
1da177e4 LT |
474 | return; |
475 | } | |
476 | ||
477 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && | |
478 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { | |
0b7cd62e | 479 | printk(KERN_WARNING "BUG: mapping for 0x%016llx at 0x%08lx " |
1da177e4 | 480 | "overlaps vmalloc space\n", |
0b7cd62e | 481 | __pfn_to_phys((u64)md->pfn), md->virtual); |
1da177e4 LT |
482 | } |
483 | ||
484 | domain = mem_types[md->type].domain; | |
485 | prot_pte = __pgprot(mem_types[md->type].prot_pte); | |
486 | prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain); | |
487 | prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain); | |
488 | ||
0b7cd62e DS |
489 | /* |
490 | * Catch 36-bit addresses | |
491 | */ | |
492 | if(md->pfn >= 0x100000) { | |
493 | if(domain) { | |
494 | printk(KERN_ERR "MM: invalid domain in supersection " | |
495 | "mapping for 0x%016llx at 0x%08lx\n", | |
496 | __pfn_to_phys((u64)md->pfn), md->virtual); | |
497 | return; | |
498 | } | |
499 | if((md->virtual | md->length | __pfn_to_phys(md->pfn)) | |
500 | & ~SUPERSECTION_MASK) { | |
501 | printk(KERN_ERR "MM: cannot create mapping for " | |
502 | "0x%016llx at 0x%08lx invalid alignment\n", | |
503 | __pfn_to_phys((u64)md->pfn), md->virtual); | |
504 | return; | |
505 | } | |
506 | ||
507 | /* | |
508 | * Shift bits [35:32] of address into bits [23:20] of PMD | |
509 | * (See ARMv6 spec). | |
510 | */ | |
511 | off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); | |
512 | } | |
513 | ||
1da177e4 | 514 | virt = md->virtual; |
0b7cd62e | 515 | off -= virt; |
1da177e4 LT |
516 | length = md->length; |
517 | ||
518 | if (mem_types[md->type].prot_l1 == 0 && | |
519 | (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { | |
520 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " | |
521 | "be mapped using pages, ignoring.\n", | |
9769c246 | 522 | __pfn_to_phys(md->pfn), md->virtual); |
1da177e4 LT |
523 | return; |
524 | } | |
525 | ||
526 | while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { | |
527 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); | |
528 | ||
529 | virt += PAGE_SIZE; | |
530 | length -= PAGE_SIZE; | |
531 | } | |
532 | ||
533 | /* N.B. ARMv6 supersections are only defined to work with domain 0. | |
534 | * Since domain assignments can in fact be arbitrary, the | |
535 | * 'domain == 0' check below is required to insure that ARMv6 | |
536 | * supersections are only allocated for domain 0 regardless | |
537 | * of the actual domain assignments in use. | |
538 | */ | |
539 | if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) { | |
0b7cd62e DS |
540 | /* |
541 | * Align to supersection boundary if !high pages. | |
542 | * High pages have already been checked for proper | |
543 | * alignment above and they will fail the SUPSERSECTION_MASK | |
544 | * check because of the way the address is encoded into | |
545 | * offset. | |
546 | */ | |
547 | if (md->pfn <= 0x100000) { | |
548 | while ((virt & ~SUPERSECTION_MASK || | |
549 | (virt + off) & ~SUPERSECTION_MASK) && | |
550 | length >= (PGDIR_SIZE / 2)) { | |
551 | alloc_init_section(virt, virt + off, prot_sect); | |
552 | ||
553 | virt += (PGDIR_SIZE / 2); | |
554 | length -= (PGDIR_SIZE / 2); | |
555 | } | |
1da177e4 LT |
556 | } |
557 | ||
558 | while (length >= SUPERSECTION_SIZE) { | |
559 | alloc_init_supersection(virt, virt + off, prot_sect); | |
560 | ||
561 | virt += SUPERSECTION_SIZE; | |
562 | length -= SUPERSECTION_SIZE; | |
563 | } | |
564 | } | |
565 | ||
566 | /* | |
567 | * A section mapping covers half a "pgdir" entry. | |
568 | */ | |
569 | while (length >= (PGDIR_SIZE / 2)) { | |
570 | alloc_init_section(virt, virt + off, prot_sect); | |
571 | ||
572 | virt += (PGDIR_SIZE / 2); | |
573 | length -= (PGDIR_SIZE / 2); | |
574 | } | |
575 | ||
576 | while (length >= PAGE_SIZE) { | |
577 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); | |
578 | ||
579 | virt += PAGE_SIZE; | |
580 | length -= PAGE_SIZE; | |
581 | } | |
582 | } | |
583 | ||
584 | /* | |
585 | * In order to soft-boot, we need to insert a 1:1 mapping in place of | |
586 | * the user-mode pages. This will then ensure that we have predictable | |
587 | * results when turning the mmu off | |
588 | */ | |
589 | void setup_mm_for_reboot(char mode) | |
590 | { | |
103461a8 | 591 | unsigned long base_pmdval; |
1da177e4 | 592 | pgd_t *pgd; |
1da177e4 | 593 | int i; |
1da177e4 LT |
594 | |
595 | if (current->mm && current->mm->pgd) | |
596 | pgd = current->mm->pgd; | |
597 | else | |
598 | pgd = init_mm.pgd; | |
599 | ||
103461a8 RK |
600 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; |
601 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ) | |
602 | base_pmdval |= PMD_BIT4; | |
603 | ||
604 | for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { | |
605 | unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; | |
606 | pmd_t *pmd; | |
607 | ||
155bb144 | 608 | pmd = pmd_off(pgd, i << PGDIR_SHIFT); |
1da177e4 LT |
609 | pmd[0] = __pmd(pmdval); |
610 | pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); | |
611 | flush_pmd_entry(pmd); | |
612 | } | |
613 | } | |
614 | ||
1da177e4 LT |
615 | /* |
616 | * Create the architecture specific mappings | |
617 | */ | |
618 | void __init iotable_init(struct map_desc *io_desc, int nr) | |
619 | { | |
620 | int i; | |
621 | ||
622 | for (i = 0; i < nr; i++) | |
623 | create_mapping(io_desc + i); | |
624 | } |