Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/flush.c | |
3 | * | |
4 | * Copyright (C) 1995-2002 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #include <linux/module.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/pagemap.h> | |
13 | ||
14 | #include <asm/cacheflush.h> | |
46097c7d | 15 | #include <asm/cachetype.h> |
1da177e4 | 16 | #include <asm/system.h> |
8d802d28 RK |
17 | #include <asm/tlbflush.h> |
18 | ||
1b2e2b73 RK |
19 | #include "mm.h" |
20 | ||
9cba3ccc CM |
21 | #ifdef CONFIG_ARM_ERRATA_411920 |
22 | extern void v6_icache_inval_all(void); | |
23 | #endif | |
24 | ||
8d802d28 | 25 | #ifdef CONFIG_CPU_CACHE_VIPT |
d7b6b358 | 26 | |
481467d6 CM |
27 | #define ALIAS_FLUSH_START 0xffff4000 |
28 | ||
481467d6 CM |
29 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) |
30 | { | |
31 | unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); | |
141fa40c | 32 | const int zero = 0; |
481467d6 | 33 | |
ad1ae2fe | 34 | set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); |
481467d6 CM |
35 | flush_tlb_kernel_page(to); |
36 | ||
37 | asm( "mcrr p15, 0, %1, %0, c14\n" | |
141fa40c | 38 | " mcr p15, 0, %2, c7, c10, 4\n" |
9cba3ccc | 39 | #ifndef CONFIG_ARM_ERRATA_411920 |
141fa40c | 40 | " mcr p15, 0, %2, c7, c5, 0\n" |
9cba3ccc | 41 | #endif |
481467d6 | 42 | : |
141fa40c | 43 | : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) |
481467d6 | 44 | : "cc"); |
9cba3ccc CM |
45 | #ifdef CONFIG_ARM_ERRATA_411920 |
46 | v6_icache_inval_all(); | |
47 | #endif | |
481467d6 CM |
48 | } |
49 | ||
d7b6b358 RK |
50 | void flush_cache_mm(struct mm_struct *mm) |
51 | { | |
52 | if (cache_is_vivt()) { | |
53 | if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) | |
54 | __cpuc_flush_user_all(); | |
55 | return; | |
56 | } | |
57 | ||
58 | if (cache_is_vipt_aliasing()) { | |
59 | asm( "mcr p15, 0, %0, c7, c14, 0\n" | |
9cba3ccc CM |
60 | " mcr p15, 0, %0, c7, c10, 4\n" |
61 | #ifndef CONFIG_ARM_ERRATA_411920 | |
d7b6b358 | 62 | " mcr p15, 0, %0, c7, c5, 0\n" |
9cba3ccc | 63 | #endif |
d7b6b358 RK |
64 | : |
65 | : "r" (0) | |
66 | : "cc"); | |
9cba3ccc CM |
67 | #ifdef CONFIG_ARM_ERRATA_411920 |
68 | v6_icache_inval_all(); | |
69 | #endif | |
d7b6b358 RK |
70 | } |
71 | } | |
72 | ||
73 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | |
74 | { | |
75 | if (cache_is_vivt()) { | |
76 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) | |
77 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), | |
78 | vma->vm_flags); | |
79 | return; | |
80 | } | |
81 | ||
82 | if (cache_is_vipt_aliasing()) { | |
83 | asm( "mcr p15, 0, %0, c7, c14, 0\n" | |
9cba3ccc CM |
84 | " mcr p15, 0, %0, c7, c10, 4\n" |
85 | #ifndef CONFIG_ARM_ERRATA_411920 | |
d7b6b358 | 86 | " mcr p15, 0, %0, c7, c5, 0\n" |
9cba3ccc | 87 | #endif |
d7b6b358 RK |
88 | : |
89 | : "r" (0) | |
90 | : "cc"); | |
9cba3ccc CM |
91 | #ifdef CONFIG_ARM_ERRATA_411920 |
92 | v6_icache_inval_all(); | |
93 | #endif | |
d7b6b358 RK |
94 | } |
95 | } | |
96 | ||
97 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | |
98 | { | |
99 | if (cache_is_vivt()) { | |
100 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { | |
101 | unsigned long addr = user_addr & PAGE_MASK; | |
102 | __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); | |
103 | } | |
104 | return; | |
105 | } | |
106 | ||
107 | if (cache_is_vipt_aliasing()) | |
108 | flush_pfn_alias(pfn, user_addr); | |
109 | } | |
a188ad2b GD |
110 | |
111 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |
112 | unsigned long uaddr, void *kaddr, | |
113 | unsigned long len, int write) | |
114 | { | |
115 | if (cache_is_vivt()) { | |
116 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { | |
117 | unsigned long addr = (unsigned long)kaddr; | |
118 | __cpuc_coherent_kern_range(addr, addr + len); | |
119 | } | |
120 | return; | |
121 | } | |
122 | ||
123 | if (cache_is_vipt_aliasing()) { | |
124 | flush_pfn_alias(page_to_pfn(page), uaddr); | |
125 | return; | |
126 | } | |
127 | ||
128 | /* VIPT non-aliasing cache */ | |
129 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) && | |
a71ebdfa | 130 | vma->vm_flags & VM_EXEC) { |
a188ad2b GD |
131 | unsigned long addr = (unsigned long)kaddr; |
132 | /* only flushing the kernel mapping on non-aliasing VIPT */ | |
133 | __cpuc_coherent_kern_range(addr, addr + len); | |
134 | } | |
135 | } | |
8d802d28 RK |
136 | #else |
137 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) | |
138 | #endif | |
1da177e4 | 139 | |
8830f04a | 140 | void __flush_dcache_page(struct address_space *mapping, struct page *page) |
1da177e4 | 141 | { |
1da177e4 LT |
142 | /* |
143 | * Writeback any data associated with the kernel mapping of this | |
144 | * page. This ensures that data in the physical page is mutually | |
145 | * coherent with the kernels mapping. | |
146 | */ | |
147 | __cpuc_flush_dcache_page(page_address(page)); | |
148 | ||
149 | /* | |
8830f04a RK |
150 | * If this is a page cache page, and we have an aliasing VIPT cache, |
151 | * we only need to do one flush - which would be at the relevant | |
8d802d28 RK |
152 | * userspace colour, which is congruent with page->index. |
153 | */ | |
8830f04a RK |
154 | if (mapping && cache_is_vipt_aliasing()) |
155 | flush_pfn_alias(page_to_pfn(page), | |
156 | page->index << PAGE_CACHE_SHIFT); | |
157 | } | |
158 | ||
159 | static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) | |
160 | { | |
161 | struct mm_struct *mm = current->active_mm; | |
162 | struct vm_area_struct *mpnt; | |
163 | struct prio_tree_iter iter; | |
164 | pgoff_t pgoff; | |
8d802d28 | 165 | |
1da177e4 LT |
166 | /* |
167 | * There are possible user space mappings of this page: | |
168 | * - VIVT cache: we need to also write back and invalidate all user | |
169 | * data in the current VM view associated with this page. | |
170 | * - aliasing VIPT: we only need to find one mapping of this page. | |
171 | */ | |
172 | pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | |
173 | ||
174 | flush_dcache_mmap_lock(mapping); | |
175 | vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { | |
176 | unsigned long offset; | |
177 | ||
178 | /* | |
179 | * If this VMA is not in our MM, we can ignore it. | |
180 | */ | |
181 | if (mpnt->vm_mm != mm) | |
182 | continue; | |
183 | if (!(mpnt->vm_flags & VM_MAYSHARE)) | |
184 | continue; | |
185 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | |
186 | flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); | |
1da177e4 LT |
187 | } |
188 | flush_dcache_mmap_unlock(mapping); | |
189 | } | |
190 | ||
191 | /* | |
192 | * Ensure cache coherency between kernel mapping and userspace mapping | |
193 | * of this page. | |
194 | * | |
195 | * We have three cases to consider: | |
196 | * - VIPT non-aliasing cache: fully coherent so nothing required. | |
197 | * - VIVT: fully aliasing, so we need to handle every alias in our | |
198 | * current VM view. | |
199 | * - VIPT aliasing: need to handle one alias in our current VM view. | |
200 | * | |
201 | * If we need to handle aliasing: | |
202 | * If the page only exists in the page cache and there are no user | |
203 | * space mappings, we can be lazy and remember that we may have dirty | |
204 | * kernel cache lines for later. Otherwise, we assume we have | |
205 | * aliasing mappings. | |
df2f5e72 RK |
206 | * |
207 | * Note that we disable the lazy flush for SMP. | |
1da177e4 LT |
208 | */ |
209 | void flush_dcache_page(struct page *page) | |
210 | { | |
211 | struct address_space *mapping = page_mapping(page); | |
212 | ||
df2f5e72 | 213 | #ifndef CONFIG_SMP |
d73cd428 | 214 | if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) |
1da177e4 | 215 | set_bit(PG_dcache_dirty, &page->flags); |
df2f5e72 RK |
216 | else |
217 | #endif | |
218 | { | |
1da177e4 | 219 | __flush_dcache_page(mapping, page); |
8830f04a RK |
220 | if (mapping && cache_is_vivt()) |
221 | __flush_dcache_aliases(mapping, page); | |
826cbdaf CM |
222 | else if (mapping) |
223 | __flush_icache_all(); | |
8830f04a | 224 | } |
1da177e4 LT |
225 | } |
226 | EXPORT_SYMBOL(flush_dcache_page); | |
6020dff0 RK |
227 | |
228 | /* | |
229 | * Flush an anonymous page so that users of get_user_pages() | |
230 | * can safely access the data. The expected sequence is: | |
231 | * | |
232 | * get_user_pages() | |
233 | * -> flush_anon_page | |
234 | * memcpy() to/from page | |
235 | * if written to page, flush_dcache_page() | |
236 | */ | |
237 | void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | |
238 | { | |
239 | unsigned long pfn; | |
240 | ||
241 | /* VIPT non-aliasing caches need do nothing */ | |
242 | if (cache_is_vipt_nonaliasing()) | |
243 | return; | |
244 | ||
245 | /* | |
246 | * Write back and invalidate userspace mapping. | |
247 | */ | |
248 | pfn = page_to_pfn(page); | |
249 | if (cache_is_vivt()) { | |
250 | flush_cache_page(vma, vmaddr, pfn); | |
251 | } else { | |
252 | /* | |
253 | * For aliasing VIPT, we can flush an alias of the | |
254 | * userspace address only. | |
255 | */ | |
256 | flush_pfn_alias(pfn, vmaddr); | |
257 | } | |
258 | ||
259 | /* | |
260 | * Invalidate kernel mapping. No data should be contained | |
261 | * in this mapping of the page. FIXME: this is overkill | |
262 | * since we actually ask for a write-back and invalidate. | |
263 | */ | |
264 | __cpuc_flush_dcache_page(page_address(page)); | |
265 | } |