Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * linux/arch/arm/mm/flush.c | |
4 | * | |
5 | * Copyright (C) 1995-2002 Russell King | |
1da177e4 LT |
6 | */ |
7 | #include <linux/module.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/pagemap.h> | |
39af22a7 | 10 | #include <linux/highmem.h> |
1da177e4 LT |
11 | |
12 | #include <asm/cacheflush.h> | |
46097c7d | 13 | #include <asm/cachetype.h> |
7e5a69e8 | 14 | #include <asm/highmem.h> |
2ef7f3db | 15 | #include <asm/smp_plat.h> |
8d802d28 | 16 | #include <asm/tlbflush.h> |
0b19f933 | 17 | #include <linux/hugetlb.h> |
8d802d28 | 18 | |
1b2e2b73 RK |
19 | #include "mm.h" |
20 | ||
f8130906 | 21 | #ifdef CONFIG_ARM_HEAVY_MB |
4e1f8a6f RK |
22 | void (*soc_mb)(void); |
23 | ||
f8130906 RK |
24 | void arm_heavy_mb(void) |
25 | { | |
26 | #ifdef CONFIG_OUTER_CACHE_SYNC | |
27 | if (outer_cache.sync) | |
28 | outer_cache.sync(); | |
29 | #endif | |
4e1f8a6f RK |
30 | if (soc_mb) |
31 | soc_mb(); | |
f8130906 RK |
32 | } |
33 | EXPORT_SYMBOL(arm_heavy_mb); | |
34 | #endif | |
35 | ||
8d802d28 | 36 | #ifdef CONFIG_CPU_CACHE_VIPT |
d7b6b358 | 37 | |
481467d6 CM |
38 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) |
39 | { | |
de27c308 | 40 | unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
141fa40c | 41 | const int zero = 0; |
481467d6 | 42 | |
67ece144 | 43 | set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); |
481467d6 CM |
44 | |
45 | asm( "mcrr p15, 0, %1, %0, c14\n" | |
df71dfd4 | 46 | " mcr p15, 0, %2, c7, c10, 4" |
481467d6 | 47 | : |
12e669b4 | 48 | : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero) |
481467d6 CM |
49 | : "cc"); |
50 | } | |
51 | ||
c4e259c8 WD |
52 | static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) |
53 | { | |
67ece144 | 54 | unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
c4e259c8 WD |
55 | unsigned long offset = vaddr & (PAGE_SIZE - 1); |
56 | unsigned long to; | |
57 | ||
67ece144 RK |
58 | set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); |
59 | to = va + offset; | |
c4e259c8 WD |
60 | flush_icache_range(to, to + len); |
61 | } | |
62 | ||
d7b6b358 RK |
63 | void flush_cache_mm(struct mm_struct *mm) |
64 | { | |
65 | if (cache_is_vivt()) { | |
2f0b1926 | 66 | vivt_flush_cache_mm(mm); |
d7b6b358 RK |
67 | return; |
68 | } | |
69 | ||
70 | if (cache_is_vipt_aliasing()) { | |
71 | asm( "mcr p15, 0, %0, c7, c14, 0\n" | |
df71dfd4 | 72 | " mcr p15, 0, %0, c7, c10, 4" |
d7b6b358 RK |
73 | : |
74 | : "r" (0) | |
75 | : "cc"); | |
76 | } | |
77 | } | |
78 | ||
79 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | |
80 | { | |
81 | if (cache_is_vivt()) { | |
2f0b1926 | 82 | vivt_flush_cache_range(vma, start, end); |
d7b6b358 RK |
83 | return; |
84 | } | |
85 | ||
86 | if (cache_is_vipt_aliasing()) { | |
87 | asm( "mcr p15, 0, %0, c7, c14, 0\n" | |
df71dfd4 | 88 | " mcr p15, 0, %0, c7, c10, 4" |
d7b6b358 RK |
89 | : |
90 | : "r" (0) | |
91 | : "cc"); | |
92 | } | |
9e95922b | 93 | |
6060e8df | 94 | if (vma->vm_flags & VM_EXEC) |
9e95922b | 95 | __flush_icache_all(); |
d7b6b358 RK |
96 | } |
97 | ||
8b5989f3 | 98 | void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn, unsigned int nr) |
d7b6b358 RK |
99 | { |
100 | if (cache_is_vivt()) { | |
8b5989f3 | 101 | vivt_flush_cache_pages(vma, user_addr, pfn, nr); |
d7b6b358 RK |
102 | return; |
103 | } | |
104 | ||
2df341ed | 105 | if (cache_is_vipt_aliasing()) { |
d7b6b358 | 106 | flush_pfn_alias(pfn, user_addr); |
2df341ed RK |
107 | __flush_icache_all(); |
108 | } | |
9e95922b RK |
109 | |
110 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) | |
111 | __flush_icache_all(); | |
d7b6b358 | 112 | } |
c4e259c8 | 113 | |
2ef7f3db | 114 | #else |
c4e259c8 WD |
115 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) |
116 | #define flush_icache_alias(pfn,vaddr,len) do { } while (0) | |
2ef7f3db | 117 | #endif |
a188ad2b | 118 | |
72e6ae28 VK |
119 | #define FLAG_PA_IS_EXEC 1 |
120 | #define FLAG_PA_CORE_IN_MM 2 | |
121 | ||
2ef7f3db RK |
122 | static void flush_ptrace_access_other(void *args) |
123 | { | |
124 | __flush_icache_all(); | |
125 | } | |
2ef7f3db | 126 | |
72e6ae28 VK |
127 | static inline |
128 | void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr, | |
129 | unsigned long len, unsigned int flags) | |
a188ad2b GD |
130 | { |
131 | if (cache_is_vivt()) { | |
72e6ae28 | 132 | if (flags & FLAG_PA_CORE_IN_MM) { |
2ef7f3db RK |
133 | unsigned long addr = (unsigned long)kaddr; |
134 | __cpuc_coherent_kern_range(addr, addr + len); | |
135 | } | |
a188ad2b GD |
136 | return; |
137 | } | |
138 | ||
139 | if (cache_is_vipt_aliasing()) { | |
140 | flush_pfn_alias(page_to_pfn(page), uaddr); | |
2df341ed | 141 | __flush_icache_all(); |
a188ad2b GD |
142 | return; |
143 | } | |
144 | ||
c4e259c8 | 145 | /* VIPT non-aliasing D-cache */ |
72e6ae28 | 146 | if (flags & FLAG_PA_IS_EXEC) { |
a188ad2b | 147 | unsigned long addr = (unsigned long)kaddr; |
c4e259c8 WD |
148 | if (icache_is_vipt_aliasing()) |
149 | flush_icache_alias(page_to_pfn(page), uaddr, len); | |
150 | else | |
151 | __cpuc_coherent_kern_range(addr, addr + len); | |
2ef7f3db RK |
152 | if (cache_ops_need_broadcast()) |
153 | smp_call_function(flush_ptrace_access_other, | |
154 | NULL, 1); | |
a188ad2b GD |
155 | } |
156 | } | |
2ef7f3db | 157 | |
72e6ae28 VK |
158 | static |
159 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |
160 | unsigned long uaddr, void *kaddr, unsigned long len) | |
161 | { | |
162 | unsigned int flags = 0; | |
163 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) | |
164 | flags |= FLAG_PA_CORE_IN_MM; | |
165 | if (vma->vm_flags & VM_EXEC) | |
166 | flags |= FLAG_PA_IS_EXEC; | |
167 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); | |
168 | } | |
169 | ||
170 | void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, | |
171 | void *kaddr, unsigned long len) | |
172 | { | |
173 | unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC; | |
174 | ||
175 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); | |
176 | } | |
177 | ||
2ef7f3db RK |
178 | /* |
179 | * Copy user data from/to a page which is mapped into a different | |
180 | * processes address space. Really, we want to allow our "user | |
181 | * space" model to handle this. | |
182 | * | |
183 | * Note that this code needs to run on the current CPU. | |
184 | */ | |
185 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | |
186 | unsigned long uaddr, void *dst, const void *src, | |
187 | unsigned long len) | |
188 | { | |
189 | #ifdef CONFIG_SMP | |
190 | preempt_disable(); | |
8d802d28 | 191 | #endif |
2ef7f3db RK |
192 | memcpy(dst, src, len); |
193 | flush_ptrace_access(vma, page, uaddr, dst, len); | |
194 | #ifdef CONFIG_SMP | |
195 | preempt_enable(); | |
196 | #endif | |
197 | } | |
1da177e4 | 198 | |
8b5989f3 | 199 | void __flush_dcache_folio(struct address_space *mapping, struct folio *folio) |
1da177e4 | 200 | { |
1da177e4 LT |
201 | /* |
202 | * Writeback any data associated with the kernel mapping of this | |
203 | * page. This ensures that data in the physical page is mutually | |
204 | * coherent with the kernels mapping. | |
205 | */ | |
8b5989f3 MWO |
206 | if (!folio_test_highmem(folio)) { |
207 | __cpuc_flush_dcache_area(folio_address(folio), | |
208 | folio_size(folio)); | |
7e5a69e8 | 209 | } else { |
0b19f933 | 210 | unsigned long i; |
dd0f67f4 | 211 | if (cache_is_vipt_nonaliasing()) { |
8b5989f3 MWO |
212 | for (i = 0; i < folio_nr_pages(folio); i++) { |
213 | void *addr = kmap_local_folio(folio, | |
214 | i * PAGE_SIZE); | |
dd0f67f4 | 215 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
8b5989f3 | 216 | kunmap_local(addr); |
0b19f933 SC |
217 | } |
218 | } else { | |
8b5989f3 MWO |
219 | for (i = 0; i < folio_nr_pages(folio); i++) { |
220 | void *addr = kmap_high_get(folio_page(folio, i)); | |
0b19f933 SC |
221 | if (addr) { |
222 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | |
8b5989f3 | 223 | kunmap_high(folio_page(folio, i)); |
0b19f933 | 224 | } |
dd0f67f4 | 225 | } |
7e5a69e8 NP |
226 | } |
227 | } | |
1da177e4 LT |
228 | |
229 | /* | |
acc53a0b | 230 | * If this is a page cache folio, and we have an aliasing VIPT cache, |
8830f04a | 231 | * we only need to do one flush - which would be at the relevant |
acc53a0b | 232 | * userspace colour, which is congruent with folio->index. |
8d802d28 | 233 | */ |
f91fb05d | 234 | if (mapping && cache_is_vipt_aliasing()) |
8b5989f3 | 235 | flush_pfn_alias(folio_pfn(folio), folio_pos(folio)); |
8830f04a RK |
236 | } |
237 | ||
8b5989f3 | 238 | static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio) |
8830f04a RK |
239 | { |
240 | struct mm_struct *mm = current->active_mm; | |
8b5989f3 MWO |
241 | struct vm_area_struct *vma; |
242 | pgoff_t pgoff, pgoff_end; | |
8d802d28 | 243 | |
1da177e4 LT |
244 | /* |
245 | * There are possible user space mappings of this page: | |
246 | * - VIVT cache: we need to also write back and invalidate all user | |
247 | * data in the current VM view associated with this page. | |
248 | * - aliasing VIPT: we only need to find one mapping of this page. | |
249 | */ | |
8b5989f3 MWO |
250 | pgoff = folio->index; |
251 | pgoff_end = pgoff + folio_nr_pages(folio) - 1; | |
1da177e4 LT |
252 | |
253 | flush_dcache_mmap_lock(mapping); | |
8b5989f3 MWO |
254 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) { |
255 | unsigned long start, offset, pfn; | |
256 | unsigned int nr; | |
1da177e4 LT |
257 | |
258 | /* | |
259 | * If this VMA is not in our MM, we can ignore it. | |
260 | */ | |
8b5989f3 | 261 | if (vma->vm_mm != mm) |
1da177e4 | 262 | continue; |
8b5989f3 | 263 | if (!(vma->vm_flags & VM_MAYSHARE)) |
1da177e4 | 264 | continue; |
8b5989f3 MWO |
265 | |
266 | start = vma->vm_start; | |
267 | pfn = folio_pfn(folio); | |
268 | nr = folio_nr_pages(folio); | |
269 | offset = pgoff - vma->vm_pgoff; | |
270 | if (offset > -nr) { | |
271 | pfn -= offset; | |
272 | nr += offset; | |
273 | } else { | |
274 | start += offset * PAGE_SIZE; | |
275 | } | |
276 | if (start + nr * PAGE_SIZE > vma->vm_end) | |
277 | nr = (vma->vm_end - start) / PAGE_SIZE; | |
278 | ||
279 | flush_cache_pages(vma, start, pfn, nr); | |
1da177e4 LT |
280 | } |
281 | flush_dcache_mmap_unlock(mapping); | |
282 | } | |
283 | ||
6012191a CM |
284 | #if __LINUX_ARM_ARCH__ >= 6 |
285 | void __sync_icache_dcache(pte_t pteval) | |
286 | { | |
287 | unsigned long pfn; | |
8b5989f3 | 288 | struct folio *folio; |
6012191a CM |
289 | struct address_space *mapping; |
290 | ||
6012191a CM |
291 | if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) |
292 | /* only flush non-aliasing VIPT caches for exec mappings */ | |
293 | return; | |
294 | pfn = pte_pfn(pteval); | |
295 | if (!pfn_valid(pfn)) | |
296 | return; | |
297 | ||
8b5989f3 | 298 | folio = page_folio(pfn_to_page(pfn)); |
0c66c6f4 YL |
299 | if (folio_test_reserved(folio)) |
300 | return; | |
301 | ||
6012191a | 302 | if (cache_is_vipt_aliasing()) |
8b5989f3 | 303 | mapping = folio_flush_mapping(folio); |
6012191a CM |
304 | else |
305 | mapping = NULL; | |
306 | ||
8b5989f3 MWO |
307 | if (!test_and_set_bit(PG_dcache_clean, &folio->flags)) |
308 | __flush_dcache_folio(mapping, folio); | |
8373dc38 | 309 | |
310 | if (pte_exec(pteval)) | |
6012191a CM |
311 | __flush_icache_all(); |
312 | } | |
313 | #endif | |
314 | ||
1da177e4 LT |
315 | /* |
316 | * Ensure cache coherency between kernel mapping and userspace mapping | |
317 | * of this page. | |
318 | * | |
319 | * We have three cases to consider: | |
320 | * - VIPT non-aliasing cache: fully coherent so nothing required. | |
321 | * - VIVT: fully aliasing, so we need to handle every alias in our | |
322 | * current VM view. | |
323 | * - VIPT aliasing: need to handle one alias in our current VM view. | |
324 | * | |
325 | * If we need to handle aliasing: | |
326 | * If the page only exists in the page cache and there are no user | |
327 | * space mappings, we can be lazy and remember that we may have dirty | |
328 | * kernel cache lines for later. Otherwise, we assume we have | |
329 | * aliasing mappings. | |
df2f5e72 | 330 | * |
31bee4cf | 331 | * Note that we disable the lazy flush for SMP configurations where |
332 | * the cache maintenance operations are not automatically broadcasted. | |
1da177e4 | 333 | */ |
8b5989f3 | 334 | void flush_dcache_folio(struct folio *folio) |
1da177e4 | 335 | { |
421fe93c RK |
336 | struct address_space *mapping; |
337 | ||
338 | /* | |
339 | * The zero page is never written to, so never has any dirty | |
340 | * cache lines, and therefore never needs to be flushed. | |
341 | */ | |
8b5989f3 | 342 | if (is_zero_pfn(folio_pfn(folio))) |
421fe93c RK |
343 | return; |
344 | ||
00a19f3e | 345 | if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) { |
8b5989f3 MWO |
346 | if (test_bit(PG_dcache_clean, &folio->flags)) |
347 | clear_bit(PG_dcache_clean, &folio->flags); | |
00a19f3e RV |
348 | return; |
349 | } | |
350 | ||
8b5989f3 | 351 | mapping = folio_flush_mapping(folio); |
1da177e4 | 352 | |
85848dd7 | 353 | if (!cache_ops_need_broadcast() && |
8b5989f3 MWO |
354 | mapping && !folio_mapped(folio)) |
355 | clear_bit(PG_dcache_clean, &folio->flags); | |
85848dd7 | 356 | else { |
8b5989f3 | 357 | __flush_dcache_folio(mapping, folio); |
8830f04a | 358 | if (mapping && cache_is_vivt()) |
8b5989f3 | 359 | __flush_dcache_aliases(mapping, folio); |
826cbdaf CM |
360 | else if (mapping) |
361 | __flush_icache_all(); | |
8b5989f3 | 362 | set_bit(PG_dcache_clean, &folio->flags); |
8830f04a | 363 | } |
1da177e4 | 364 | } |
8b5989f3 | 365 | EXPORT_SYMBOL(flush_dcache_folio); |
6020dff0 | 366 | |
8b5989f3 MWO |
367 | void flush_dcache_page(struct page *page) |
368 | { | |
369 | flush_dcache_folio(page_folio(page)); | |
370 | } | |
371 | EXPORT_SYMBOL(flush_dcache_page); | |
6020dff0 RK |
372 | /* |
373 | * Flush an anonymous page so that users of get_user_pages() | |
374 | * can safely access the data. The expected sequence is: | |
375 | * | |
376 | * get_user_pages() | |
377 | * -> flush_anon_page | |
378 | * memcpy() to/from page | |
379 | * if written to page, flush_dcache_page() | |
380 | */ | |
57ea76fd | 381 | void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr); |
6020dff0 RK |
382 | void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
383 | { | |
384 | unsigned long pfn; | |
385 | ||
386 | /* VIPT non-aliasing caches need do nothing */ | |
387 | if (cache_is_vipt_nonaliasing()) | |
388 | return; | |
389 | ||
390 | /* | |
391 | * Write back and invalidate userspace mapping. | |
392 | */ | |
393 | pfn = page_to_pfn(page); | |
394 | if (cache_is_vivt()) { | |
395 | flush_cache_page(vma, vmaddr, pfn); | |
396 | } else { | |
397 | /* | |
398 | * For aliasing VIPT, we can flush an alias of the | |
399 | * userspace address only. | |
400 | */ | |
401 | flush_pfn_alias(pfn, vmaddr); | |
2df341ed | 402 | __flush_icache_all(); |
6020dff0 RK |
403 | } |
404 | ||
405 | /* | |
406 | * Invalidate kernel mapping. No data should be contained | |
407 | * in this mapping of the page. FIXME: this is overkill | |
408 | * since we actually ask for a write-back and invalidate. | |
409 | */ | |
2c9b9c84 | 410 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
6020dff0 | 411 | } |