Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/include/asm-arm/cacheflush.h | |
3 | * | |
4 | * Copyright (C) 1999-2002 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #ifndef _ASMARM_CACHEFLUSH_H | |
11 | #define _ASMARM_CACHEFLUSH_H | |
12 | ||
1da177e4 LT |
13 | #include <linux/sched.h> |
14 | #include <linux/mm.h> | |
15 | ||
1da177e4 | 16 | #include <asm/glue.h> |
b8a9b66f RK |
17 | #include <asm/shmparam.h> |
18 | ||
19 | #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) | |
1da177e4 LT |
20 | |
21 | /* | |
22 | * Cache Model | |
23 | * =========== | |
24 | */ | |
25 | #undef _CACHE | |
26 | #undef MULTI_CACHE | |
27 | ||
6cc7cbef | 28 | #if defined(CONFIG_CPU_CACHE_V3) |
1da177e4 LT |
29 | # ifdef _CACHE |
30 | # define MULTI_CACHE 1 | |
31 | # else | |
32 | # define _CACHE v3 | |
33 | # endif | |
34 | #endif | |
35 | ||
6cc7cbef | 36 | #if defined(CONFIG_CPU_CACHE_V4) |
1da177e4 LT |
37 | # ifdef _CACHE |
38 | # define MULTI_CACHE 1 | |
39 | # else | |
40 | # define _CACHE v4 | |
41 | # endif | |
42 | #endif | |
43 | ||
44 | #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ | |
45 | defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) | |
46 | # define MULTI_CACHE 1 | |
47 | #endif | |
48 | ||
49 | #if defined(CONFIG_CPU_ARM926T) | |
50 | # ifdef _CACHE | |
51 | # define MULTI_CACHE 1 | |
52 | # else | |
53 | # define _CACHE arm926 | |
54 | # endif | |
55 | #endif | |
56 | ||
d60674eb HC |
57 | #if defined(CONFIG_CPU_ARM940T) |
58 | # ifdef _CACHE | |
59 | # define MULTI_CACHE 1 | |
60 | # else | |
61 | # define _CACHE arm940 | |
62 | # endif | |
63 | #endif | |
64 | ||
f37f46eb HC |
65 | #if defined(CONFIG_CPU_ARM946E) |
66 | # ifdef _CACHE | |
67 | # define MULTI_CACHE 1 | |
68 | # else | |
69 | # define _CACHE arm946 | |
70 | # endif | |
71 | #endif | |
72 | ||
6cc7cbef | 73 | #if defined(CONFIG_CPU_CACHE_V4WB) |
1da177e4 LT |
74 | # ifdef _CACHE |
75 | # define MULTI_CACHE 1 | |
76 | # else | |
77 | # define _CACHE v4wb | |
78 | # endif | |
79 | #endif | |
80 | ||
81 | #if defined(CONFIG_CPU_XSCALE) | |
82 | # ifdef _CACHE | |
83 | # define MULTI_CACHE 1 | |
84 | # else | |
85 | # define _CACHE xscale | |
86 | # endif | |
87 | #endif | |
88 | ||
23bdf86a LB |
89 | #if defined(CONFIG_CPU_XSC3) |
90 | # ifdef _CACHE | |
91 | # define MULTI_CACHE 1 | |
92 | # else | |
93 | # define _CACHE xsc3 | |
94 | # endif | |
95 | #endif | |
96 | ||
1da177e4 LT |
97 | #if defined(CONFIG_CPU_V6) |
98 | //# ifdef _CACHE | |
99 | # define MULTI_CACHE 1 | |
100 | //# else | |
101 | //# define _CACHE v6 | |
102 | //# endif | |
103 | #endif | |
104 | ||
bbe88886 CM |
105 | #if defined(CONFIG_CPU_V7) |
106 | //# ifdef _CACHE | |
107 | # define MULTI_CACHE 1 | |
108 | //# else | |
109 | //# define _CACHE v7 | |
110 | //# endif | |
111 | #endif | |
112 | ||
1da177e4 LT |
113 | #if !defined(_CACHE) && !defined(MULTI_CACHE) |
114 | #error Unknown cache maintainence model | |
115 | #endif | |
116 | ||
117 | /* | |
118 | * This flag is used to indicate that the page pointed to by a pte | |
119 | * is dirty and requires cleaning before returning it to the user. | |
120 | */ | |
121 | #define PG_dcache_dirty PG_arch_1 | |
122 | ||
123 | /* | |
124 | * MM Cache Management | |
125 | * =================== | |
126 | * | |
127 | * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files | |
128 | * implement these methods. | |
129 | * | |
130 | * Start addresses are inclusive and end addresses are exclusive; | |
131 | * start addresses should be rounded down, end addresses up. | |
132 | * | |
133 | * See Documentation/cachetlb.txt for more information. | |
134 | * Please note that the implementation of these, and the required | |
135 | * effects are cache-type (VIVT/VIPT/PIPT) specific. | |
136 | * | |
137 | * flush_cache_kern_all() | |
138 | * | |
139 | * Unconditionally clean and invalidate the entire cache. | |
140 | * | |
141 | * flush_cache_user_mm(mm) | |
142 | * | |
143 | * Clean and invalidate all user space cache entries | |
144 | * before a change of page tables. | |
145 | * | |
146 | * flush_cache_user_range(start, end, flags) | |
147 | * | |
148 | * Clean and invalidate a range of cache entries in the | |
149 | * specified address space before a change of page tables. | |
150 | * - start - user start address (inclusive, page aligned) | |
151 | * - end - user end address (exclusive, page aligned) | |
152 | * - flags - vma->vm_flags field | |
153 | * | |
154 | * coherent_kern_range(start, end) | |
155 | * | |
156 | * Ensure coherency between the Icache and the Dcache in the | |
157 | * region described by start, end. If you have non-snooping | |
158 | * Harvard caches, you need to implement this function. | |
159 | * - start - virtual start address | |
160 | * - end - virtual end address | |
161 | * | |
162 | * DMA Cache Coherency | |
163 | * =================== | |
164 | * | |
165 | * dma_inv_range(start, end) | |
166 | * | |
167 | * Invalidate (discard) the specified virtual address range. | |
168 | * May not write back any entries. If 'start' or 'end' | |
169 | * are not cache line aligned, those lines must be written | |
170 | * back. | |
171 | * - start - virtual start address | |
172 | * - end - virtual end address | |
173 | * | |
174 | * dma_clean_range(start, end) | |
175 | * | |
176 | * Clean (write back) the specified virtual address range. | |
177 | * - start - virtual start address | |
178 | * - end - virtual end address | |
179 | * | |
180 | * dma_flush_range(start, end) | |
181 | * | |
182 | * Clean and invalidate the specified virtual address range. | |
183 | * - start - virtual start address | |
184 | * - end - virtual end address | |
185 | */ | |
186 | ||
187 | struct cpu_cache_fns { | |
188 | void (*flush_kern_all)(void); | |
189 | void (*flush_user_all)(void); | |
190 | void (*flush_user_range)(unsigned long, unsigned long, unsigned int); | |
191 | ||
192 | void (*coherent_kern_range)(unsigned long, unsigned long); | |
193 | void (*coherent_user_range)(unsigned long, unsigned long); | |
194 | void (*flush_kern_dcache_page)(void *); | |
195 | ||
7ae5a761 RK |
196 | void (*dma_inv_range)(const void *, const void *); |
197 | void (*dma_clean_range)(const void *, const void *); | |
198 | void (*dma_flush_range)(const void *, const void *); | |
1da177e4 LT |
199 | }; |
200 | ||
953233dc CM |
201 | struct outer_cache_fns { |
202 | void (*inv_range)(unsigned long, unsigned long); | |
203 | void (*clean_range)(unsigned long, unsigned long); | |
204 | void (*flush_range)(unsigned long, unsigned long); | |
205 | }; | |
206 | ||
1da177e4 LT |
207 | /* |
208 | * Select the calling method | |
209 | */ | |
210 | #ifdef MULTI_CACHE | |
211 | ||
212 | extern struct cpu_cache_fns cpu_cache; | |
213 | ||
214 | #define __cpuc_flush_kern_all cpu_cache.flush_kern_all | |
215 | #define __cpuc_flush_user_all cpu_cache.flush_user_all | |
216 | #define __cpuc_flush_user_range cpu_cache.flush_user_range | |
217 | #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range | |
218 | #define __cpuc_coherent_user_range cpu_cache.coherent_user_range | |
219 | #define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page | |
220 | ||
221 | /* | |
222 | * These are private to the dma-mapping API. Do not use directly. | |
223 | * Their sole purpose is to ensure that data held in the cache | |
224 | * is visible to DMA, or data written by DMA to system memory is | |
225 | * visible to the CPU. | |
226 | */ | |
227 | #define dmac_inv_range cpu_cache.dma_inv_range | |
228 | #define dmac_clean_range cpu_cache.dma_clean_range | |
229 | #define dmac_flush_range cpu_cache.dma_flush_range | |
230 | ||
231 | #else | |
232 | ||
233 | #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) | |
234 | #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) | |
235 | #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) | |
236 | #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) | |
237 | #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) | |
238 | #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) | |
239 | ||
240 | extern void __cpuc_flush_kern_all(void); | |
241 | extern void __cpuc_flush_user_all(void); | |
242 | extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); | |
243 | extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); | |
244 | extern void __cpuc_coherent_user_range(unsigned long, unsigned long); | |
245 | extern void __cpuc_flush_dcache_page(void *); | |
246 | ||
247 | /* | |
248 | * These are private to the dma-mapping API. Do not use directly. | |
249 | * Their sole purpose is to ensure that data held in the cache | |
250 | * is visible to DMA, or data written by DMA to system memory is | |
251 | * visible to the CPU. | |
252 | */ | |
253 | #define dmac_inv_range __glue(_CACHE,_dma_inv_range) | |
254 | #define dmac_clean_range __glue(_CACHE,_dma_clean_range) | |
255 | #define dmac_flush_range __glue(_CACHE,_dma_flush_range) | |
256 | ||
7ae5a761 RK |
257 | extern void dmac_inv_range(const void *, const void *); |
258 | extern void dmac_clean_range(const void *, const void *); | |
259 | extern void dmac_flush_range(const void *, const void *); | |
1da177e4 LT |
260 | |
261 | #endif | |
262 | ||
953233dc CM |
263 | #ifdef CONFIG_OUTER_CACHE |
264 | ||
265 | extern struct outer_cache_fns outer_cache; | |
266 | ||
267 | static inline void outer_inv_range(unsigned long start, unsigned long end) | |
268 | { | |
269 | if (outer_cache.inv_range) | |
270 | outer_cache.inv_range(start, end); | |
271 | } | |
272 | static inline void outer_clean_range(unsigned long start, unsigned long end) | |
273 | { | |
274 | if (outer_cache.clean_range) | |
275 | outer_cache.clean_range(start, end); | |
276 | } | |
277 | static inline void outer_flush_range(unsigned long start, unsigned long end) | |
278 | { | |
279 | if (outer_cache.flush_range) | |
280 | outer_cache.flush_range(start, end); | |
281 | } | |
282 | ||
283 | #else | |
284 | ||
285 | static inline void outer_inv_range(unsigned long start, unsigned long end) | |
286 | { } | |
287 | static inline void outer_clean_range(unsigned long start, unsigned long end) | |
288 | { } | |
289 | static inline void outer_flush_range(unsigned long start, unsigned long end) | |
290 | { } | |
291 | ||
292 | #endif | |
293 | ||
1da177e4 LT |
294 | /* |
295 | * flush_cache_vmap() is used when creating mappings (eg, via vmap, | |
296 | * vmalloc, ioremap etc) in kernel space for pages. Since the | |
297 | * direct-mappings of these pages may contain cached data, we need | |
298 | * to do a full cache flush to ensure that writebacks don't corrupt | |
299 | * data placed into these pages via the new mappings. | |
300 | */ | |
301 | #define flush_cache_vmap(start, end) flush_cache_all() | |
302 | #define flush_cache_vunmap(start, end) flush_cache_all() | |
303 | ||
304 | /* | |
305 | * Copy user data from/to a page which is mapped into a different | |
306 | * processes address space. Really, we want to allow our "user | |
307 | * space" model to handle this. | |
308 | */ | |
309 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | |
310 | do { \ | |
1da177e4 | 311 | memcpy(dst, src, len); \ |
a188ad2b | 312 | flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ |
1da177e4 LT |
313 | } while (0) |
314 | ||
315 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | |
316 | do { \ | |
1da177e4 LT |
317 | memcpy(dst, src, len); \ |
318 | } while (0) | |
319 | ||
320 | /* | |
321 | * Convert calls to our calling convention. | |
322 | */ | |
323 | #define flush_cache_all() __cpuc_flush_kern_all() | |
d7b6b358 | 324 | #ifndef CONFIG_CPU_CACHE_VIPT |
1da177e4 LT |
325 | static inline void flush_cache_mm(struct mm_struct *mm) |
326 | { | |
327 | if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) | |
328 | __cpuc_flush_user_all(); | |
329 | } | |
330 | ||
331 | static inline void | |
332 | flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | |
333 | { | |
334 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) | |
335 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), | |
336 | vma->vm_flags); | |
337 | } | |
338 | ||
339 | static inline void | |
340 | flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | |
341 | { | |
342 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { | |
343 | unsigned long addr = user_addr & PAGE_MASK; | |
344 | __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); | |
345 | } | |
346 | } | |
a188ad2b GD |
347 | |
348 | static inline void | |
349 | flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |
350 | unsigned long uaddr, void *kaddr, | |
351 | unsigned long len, int write) | |
352 | { | |
353 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { | |
354 | unsigned long addr = (unsigned long)kaddr; | |
355 | __cpuc_coherent_kern_range(addr, addr + len); | |
356 | } | |
357 | } | |
d7b6b358 RK |
358 | #else |
359 | extern void flush_cache_mm(struct mm_struct *mm); | |
360 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | |
361 | extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); | |
a188ad2b GD |
362 | extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
363 | unsigned long uaddr, void *kaddr, | |
364 | unsigned long len, int write); | |
d7b6b358 | 365 | #endif |
1da177e4 | 366 | |
ec8c0446 RB |
367 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) |
368 | ||
1da177e4 LT |
369 | /* |
370 | * flush_cache_user_range is used when we want to ensure that the | |
371 | * Harvard caches are synchronised for the user space address range. | |
372 | * This is used for the ARM private sys_cacheflush system call. | |
373 | */ | |
374 | #define flush_cache_user_range(vma,start,end) \ | |
375 | __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) | |
376 | ||
377 | /* | |
378 | * Perform necessary cache operations to ensure that data previously | |
379 | * stored within this range of addresses can be executed by the CPU. | |
380 | */ | |
381 | #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e) | |
382 | ||
383 | /* | |
384 | * Perform necessary cache operations to ensure that the TLB will | |
385 | * see data written in the specified area. | |
386 | */ | |
387 | #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size) | |
388 | ||
389 | /* | |
390 | * flush_dcache_page is used when the kernel has written to the page | |
391 | * cache page at virtual address page->virtual. | |
392 | * | |
393 | * If this page isn't mapped (ie, page_mapping == NULL), or it might | |
394 | * have userspace mappings, then we _must_ always clean + invalidate | |
395 | * the dcache entries associated with the kernel mapping. | |
396 | * | |
397 | * Otherwise we can defer the operation, and clean the cache when we are | |
398 | * about to change to user space. This is the same method as used on SPARC64. | |
399 | * See update_mmu_cache for the user space part. | |
400 | */ | |
401 | extern void flush_dcache_page(struct page *); | |
402 | ||
1c9d3df5 RP |
403 | extern void __flush_dcache_page(struct address_space *mapping, struct page *page); |
404 | ||
6020dff0 RK |
405 | #define ARCH_HAS_FLUSH_ANON_PAGE |
406 | static inline void flush_anon_page(struct vm_area_struct *vma, | |
407 | struct page *page, unsigned long vmaddr) | |
408 | { | |
409 | extern void __flush_anon_page(struct vm_area_struct *vma, | |
410 | struct page *, unsigned long); | |
411 | if (PageAnon(page)) | |
412 | __flush_anon_page(vma, page, vmaddr); | |
413 | } | |
414 | ||
1da177e4 LT |
415 | #define flush_dcache_mmap_lock(mapping) \ |
416 | write_lock_irq(&(mapping)->tree_lock) | |
417 | #define flush_dcache_mmap_unlock(mapping) \ | |
418 | write_unlock_irq(&(mapping)->tree_lock) | |
419 | ||
420 | #define flush_icache_user_range(vma,page,addr,len) \ | |
421 | flush_dcache_page(page) | |
422 | ||
423 | /* | |
424 | * We don't appear to need to do anything here. In fact, if we did, we'd | |
425 | * duplicate cache flushing elsewhere performed by flush_dcache_page(). | |
426 | */ | |
427 | #define flush_icache_page(vma,page) do { } while (0) | |
428 | ||
aaf83acb CM |
429 | #define __cacheid_present(val) (val != read_cpuid(CPUID_ID)) |
430 | #define __cacheid_type_v7(val) ((val & (7 << 29)) == (4 << 29)) | |
431 | ||
432 | #define __cacheid_vivt_prev7(val) ((val & (15 << 25)) != (14 << 25)) | |
433 | #define __cacheid_vipt_prev7(val) ((val & (15 << 25)) == (14 << 25)) | |
434 | #define __cacheid_vipt_nonaliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25)) | |
435 | #define __cacheid_vipt_aliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23)) | |
436 | ||
437 | #define __cacheid_vivt(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val)) | |
438 | #define __cacheid_vipt(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val)) | |
439 | #define __cacheid_vipt_nonaliasing(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val)) | |
440 | #define __cacheid_vipt_aliasing(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val)) | |
065cf519 | 441 | #define __cacheid_vivt_asid_tagged_instr(val) (__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0) |
1da177e4 LT |
442 | |
443 | #if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) | |
444 | ||
445 | #define cache_is_vivt() 1 | |
446 | #define cache_is_vipt() 0 | |
447 | #define cache_is_vipt_nonaliasing() 0 | |
448 | #define cache_is_vipt_aliasing() 0 | |
065cf519 | 449 | #define icache_is_vivt_asid_tagged() 0 |
1da177e4 LT |
450 | |
451 | #elif defined(CONFIG_CPU_CACHE_VIPT) | |
452 | ||
453 | #define cache_is_vivt() 0 | |
454 | #define cache_is_vipt() 1 | |
455 | #define cache_is_vipt_nonaliasing() \ | |
456 | ({ \ | |
457 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
458 | __cacheid_vipt_nonaliasing(__val); \ | |
459 | }) | |
460 | ||
461 | #define cache_is_vipt_aliasing() \ | |
462 | ({ \ | |
463 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
464 | __cacheid_vipt_aliasing(__val); \ | |
465 | }) | |
466 | ||
065cf519 CM |
467 | #define icache_is_vivt_asid_tagged() \ |
468 | ({ \ | |
469 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
470 | __cacheid_vivt_asid_tagged_instr(__val); \ | |
471 | }) | |
472 | ||
1da177e4 LT |
473 | #else |
474 | ||
475 | #define cache_is_vivt() \ | |
476 | ({ \ | |
477 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
478 | (!__cacheid_present(__val)) || __cacheid_vivt(__val); \ | |
479 | }) | |
480 | ||
481 | #define cache_is_vipt() \ | |
482 | ({ \ | |
483 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
484 | __cacheid_present(__val) && __cacheid_vipt(__val); \ | |
485 | }) | |
486 | ||
487 | #define cache_is_vipt_nonaliasing() \ | |
488 | ({ \ | |
489 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
490 | __cacheid_present(__val) && \ | |
491 | __cacheid_vipt_nonaliasing(__val); \ | |
492 | }) | |
493 | ||
494 | #define cache_is_vipt_aliasing() \ | |
495 | ({ \ | |
496 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
497 | __cacheid_present(__val) && \ | |
498 | __cacheid_vipt_aliasing(__val); \ | |
499 | }) | |
500 | ||
065cf519 CM |
501 | #define icache_is_vivt_asid_tagged() \ |
502 | ({ \ | |
503 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
504 | __cacheid_present(__val) && \ | |
505 | __cacheid_vivt_asid_tagged_instr(__val); \ | |
506 | }) | |
507 | ||
1da177e4 LT |
508 | #endif |
509 | ||
510 | #endif |