Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/fault-armv.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * Modifications for ARM processor (c) 1995-2002 Russell King | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #include <linux/module.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/mm.h> | |
15 | #include <linux/bitops.h> | |
16 | #include <linux/vmalloc.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/pagemap.h> | |
19 | ||
09d9bae0 | 20 | #include <asm/bugs.h> |
1da177e4 | 21 | #include <asm/cacheflush.h> |
46097c7d | 22 | #include <asm/cachetype.h> |
1da177e4 LT |
23 | #include <asm/pgtable.h> |
24 | #include <asm/tlbflush.h> | |
25 | ||
7b0a1003 RK |
26 | #include "mm.h" |
27 | ||
bb30f36f | 28 | static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; |
1da177e4 LT |
29 | |
30 | /* | |
31 | * We take the easy way out of this problem - we make the | |
32 | * PTE uncacheable. However, we leave the write buffer on. | |
69b04754 HD |
33 | * |
34 | * Note that the pte lock held when calling update_mmu_cache must also | |
35 | * guard the pte (somewhere else in the same mm) that we modify here. | |
36 | * Therefore those configurations which might call adjust_pte (those | |
37 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. | |
1da177e4 | 38 | */ |
c26c20b8 RK |
39 | static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, |
40 | pte_t *ptep) | |
1da177e4 | 41 | { |
c26c20b8 | 42 | pte_t entry = *ptep; |
53cdb27a | 43 | int ret; |
1da177e4 | 44 | |
53cdb27a RK |
45 | /* |
46 | * If this page is present, it's actually being shared. | |
47 | */ | |
48 | ret = pte_present(entry); | |
49 | ||
1da177e4 LT |
50 | /* |
51 | * If this page isn't present, or is already setup to | |
52 | * fault (ie, is old), we can safely ignore any issues. | |
53 | */ | |
bb30f36f | 54 | if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { |
08e445bd NP |
55 | unsigned long pfn = pte_pfn(entry); |
56 | flush_cache_page(vma, address, pfn); | |
57 | outer_flush_range((pfn << PAGE_SHIFT), | |
58 | (pfn << PAGE_SHIFT) + PAGE_SIZE); | |
bb30f36f RK |
59 | pte_val(entry) &= ~L_PTE_MT_MASK; |
60 | pte_val(entry) |= shared_pte_mask; | |
c26c20b8 | 61 | set_pte_at(vma->vm_mm, address, ptep, entry); |
1da177e4 | 62 | flush_tlb_page(vma, address); |
1da177e4 | 63 | } |
c26c20b8 RK |
64 | |
65 | return ret; | |
66 | } | |
67 | ||
68 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address) | |
69 | { | |
56dd4709 | 70 | spinlock_t *ptl; |
c26c20b8 RK |
71 | pgd_t *pgd; |
72 | pmd_t *pmd; | |
73 | pte_t *pte; | |
74 | int ret; | |
75 | ||
76 | pgd = pgd_offset(vma->vm_mm, address); | |
f8a85f11 RK |
77 | if (pgd_none_or_clear_bad(pgd)) |
78 | return 0; | |
c26c20b8 RK |
79 | |
80 | pmd = pmd_offset(pgd, address); | |
f8a85f11 RK |
81 | if (pmd_none_or_clear_bad(pmd)) |
82 | return 0; | |
c26c20b8 | 83 | |
56dd4709 RK |
84 | /* |
85 | * This is called while another page table is mapped, so we | |
86 | * must use the nested version. This also means we need to | |
87 | * open-code the spin-locking. | |
88 | */ | |
89 | ptl = pte_lockptr(vma->vm_mm, pmd); | |
90 | pte = pte_offset_map_nested(pmd, address); | |
91 | spin_lock(ptl); | |
c26c20b8 RK |
92 | |
93 | ret = do_adjust_pte(vma, address, pte); | |
94 | ||
56dd4709 RK |
95 | spin_unlock(ptl); |
96 | pte_unmap_nested(pte); | |
c26c20b8 | 97 | |
1da177e4 | 98 | return ret; |
1da177e4 LT |
99 | } |
100 | ||
101 | static void | |
8830f04a | 102 | make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) |
1da177e4 | 103 | { |
1da177e4 LT |
104 | struct mm_struct *mm = vma->vm_mm; |
105 | struct vm_area_struct *mpnt; | |
106 | struct prio_tree_iter iter; | |
107 | unsigned long offset; | |
108 | pgoff_t pgoff; | |
109 | int aliases = 0; | |
110 | ||
1da177e4 LT |
111 | pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); |
112 | ||
113 | /* | |
114 | * If we have any shared mappings that are in the same mm | |
115 | * space, then we need to handle them specially to maintain | |
116 | * cache coherency. | |
117 | */ | |
118 | flush_dcache_mmap_lock(mapping); | |
119 | vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { | |
120 | /* | |
121 | * If this VMA is not in our MM, we can ignore it. | |
122 | * Note that we intentionally mask out the VMA | |
123 | * that we are fixing up. | |
124 | */ | |
125 | if (mpnt->vm_mm != mm || mpnt == vma) | |
126 | continue; | |
127 | if (!(mpnt->vm_flags & VM_MAYSHARE)) | |
128 | continue; | |
129 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | |
130 | aliases += adjust_pte(mpnt, mpnt->vm_start + offset); | |
131 | } | |
132 | flush_dcache_mmap_unlock(mapping); | |
133 | if (aliases) | |
134 | adjust_pte(vma, addr); | |
135 | else | |
8830f04a | 136 | flush_cache_page(vma, addr, pfn); |
1da177e4 LT |
137 | } |
138 | ||
139 | /* | |
140 | * Take care of architecture specific things when placing a new PTE into | |
141 | * a page table, or changing an existing PTE. Basically, there are two | |
142 | * things that we need to take care of: | |
143 | * | |
144 | * 1. If PG_dcache_dirty is set for the page, we need to ensure | |
145 | * that any cache entries for the kernels virtual memory | |
146 | * range are written back to the page. | |
147 | * 2. If we have multiple shared mappings of the same space in | |
148 | * an object, we need to deal with the cache aliasing issues. | |
149 | * | |
69b04754 | 150 | * Note that the pte lock will be held. |
1da177e4 LT |
151 | */ |
152 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | |
153 | { | |
154 | unsigned long pfn = pte_pfn(pte); | |
8830f04a | 155 | struct address_space *mapping; |
1da177e4 LT |
156 | struct page *page; |
157 | ||
158 | if (!pfn_valid(pfn)) | |
159 | return; | |
8830f04a | 160 | |
421fe93c RK |
161 | /* |
162 | * The zero page is never written to, so never has any dirty | |
163 | * cache lines, and therefore never needs to be flushed. | |
164 | */ | |
1da177e4 | 165 | page = pfn_to_page(pfn); |
421fe93c RK |
166 | if (page == ZERO_PAGE(0)) |
167 | return; | |
168 | ||
8830f04a | 169 | mapping = page_mapping(page); |
826cbdaf | 170 | #ifndef CONFIG_SMP |
787b2faa NG |
171 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) |
172 | __flush_dcache_page(mapping, page); | |
826cbdaf | 173 | #endif |
787b2faa | 174 | if (mapping) { |
1da177e4 | 175 | if (cache_is_vivt()) |
8830f04a | 176 | make_coherent(mapping, vma, addr, pfn); |
826cbdaf CM |
177 | else if (vma->vm_flags & VM_EXEC) |
178 | __flush_icache_all(); | |
1da177e4 LT |
179 | } |
180 | } | |
181 | ||
182 | /* | |
183 | * Check whether the write buffer has physical address aliasing | |
184 | * issues. If it has, we need to avoid them for the case where | |
185 | * we have several shared mappings of the same object in user | |
186 | * space. | |
187 | */ | |
188 | static int __init check_writebuffer(unsigned long *p1, unsigned long *p2) | |
189 | { | |
190 | register unsigned long zero = 0, one = 1, val; | |
191 | ||
192 | local_irq_disable(); | |
193 | mb(); | |
194 | *p1 = one; | |
195 | mb(); | |
196 | *p2 = zero; | |
197 | mb(); | |
198 | val = *p1; | |
199 | mb(); | |
200 | local_irq_enable(); | |
201 | return val != zero; | |
202 | } | |
203 | ||
204 | void __init check_writebuffer_bugs(void) | |
205 | { | |
206 | struct page *page; | |
207 | const char *reason; | |
208 | unsigned long v = 1; | |
209 | ||
210 | printk(KERN_INFO "CPU: Testing write buffer coherency: "); | |
211 | ||
212 | page = alloc_page(GFP_KERNEL); | |
213 | if (page) { | |
214 | unsigned long *p1, *p2; | |
52e8bfd8 RK |
215 | pgprot_t prot = __pgprot_modify(PAGE_KERNEL, |
216 | L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE); | |
1da177e4 LT |
217 | |
218 | p1 = vmap(&page, 1, VM_IOREMAP, prot); | |
219 | p2 = vmap(&page, 1, VM_IOREMAP, prot); | |
220 | ||
221 | if (p1 && p2) { | |
222 | v = check_writebuffer(p1, p2); | |
223 | reason = "enabling work-around"; | |
224 | } else { | |
225 | reason = "unable to map memory\n"; | |
226 | } | |
227 | ||
228 | vunmap(p1); | |
229 | vunmap(p2); | |
230 | put_page(page); | |
231 | } else { | |
232 | reason = "unable to grab page\n"; | |
233 | } | |
234 | ||
235 | if (v) { | |
236 | printk("failed, %s\n", reason); | |
bb30f36f | 237 | shared_pte_mask = L_PTE_MT_UNCACHED; |
1da177e4 LT |
238 | } else { |
239 | printk("ok\n"); | |
240 | } | |
241 | } |