Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/include/asm-arm/cacheflush.h | |
3 | * | |
4 | * Copyright (C) 1999-2002 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #ifndef _ASMARM_CACHEFLUSH_H | |
11 | #define _ASMARM_CACHEFLUSH_H | |
12 | ||
1da177e4 LT |
13 | #include <linux/sched.h> |
14 | #include <linux/mm.h> | |
15 | ||
1da177e4 | 16 | #include <asm/glue.h> |
b8a9b66f RK |
17 | #include <asm/shmparam.h> |
18 | ||
19 | #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) | |
1da177e4 LT |
20 | |
21 | /* | |
22 | * Cache Model | |
23 | * =========== | |
24 | */ | |
25 | #undef _CACHE | |
26 | #undef MULTI_CACHE | |
27 | ||
b731c311 HC |
28 | #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) || \ |
29 | defined(CONFIG_CPU_ARM740T) | |
1da177e4 LT |
30 | # ifdef _CACHE |
31 | # define MULTI_CACHE 1 | |
32 | # else | |
33 | # define _CACHE v3 | |
34 | # endif | |
35 | #endif | |
36 | ||
43f5f014 HC |
37 | #if defined(CONFIG_CPU_ARM720T) || defined(CONFIG_CPU_ARM7TDMI) || \ |
38 | defined(CONFIG_CPU_ARM9TDMI) | |
1da177e4 LT |
39 | # ifdef _CACHE |
40 | # define MULTI_CACHE 1 | |
41 | # else | |
42 | # define _CACHE v4 | |
43 | # endif | |
44 | #endif | |
45 | ||
46 | #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ | |
47 | defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) | |
48 | # define MULTI_CACHE 1 | |
49 | #endif | |
50 | ||
51 | #if defined(CONFIG_CPU_ARM926T) | |
52 | # ifdef _CACHE | |
53 | # define MULTI_CACHE 1 | |
54 | # else | |
55 | # define _CACHE arm926 | |
56 | # endif | |
57 | #endif | |
58 | ||
59 | #if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100) | |
60 | # ifdef _CACHE | |
61 | # define MULTI_CACHE 1 | |
62 | # else | |
63 | # define _CACHE v4wb | |
64 | # endif | |
65 | #endif | |
66 | ||
67 | #if defined(CONFIG_CPU_XSCALE) | |
68 | # ifdef _CACHE | |
69 | # define MULTI_CACHE 1 | |
70 | # else | |
71 | # define _CACHE xscale | |
72 | # endif | |
73 | #endif | |
74 | ||
23bdf86a LB |
75 | #if defined(CONFIG_CPU_XSC3) |
76 | # ifdef _CACHE | |
77 | # define MULTI_CACHE 1 | |
78 | # else | |
79 | # define _CACHE xsc3 | |
80 | # endif | |
81 | #endif | |
82 | ||
1da177e4 LT |
83 | #if defined(CONFIG_CPU_V6) |
84 | //# ifdef _CACHE | |
85 | # define MULTI_CACHE 1 | |
86 | //# else | |
87 | //# define _CACHE v6 | |
88 | //# endif | |
89 | #endif | |
90 | ||
91 | #if !defined(_CACHE) && !defined(MULTI_CACHE) | |
92 | #error Unknown cache maintainence model | |
93 | #endif | |
94 | ||
95 | /* | |
96 | * This flag is used to indicate that the page pointed to by a pte | |
97 | * is dirty and requires cleaning before returning it to the user. | |
98 | */ | |
99 | #define PG_dcache_dirty PG_arch_1 | |
100 | ||
101 | /* | |
102 | * MM Cache Management | |
103 | * =================== | |
104 | * | |
105 | * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files | |
106 | * implement these methods. | |
107 | * | |
108 | * Start addresses are inclusive and end addresses are exclusive; | |
109 | * start addresses should be rounded down, end addresses up. | |
110 | * | |
111 | * See Documentation/cachetlb.txt for more information. | |
112 | * Please note that the implementation of these, and the required | |
113 | * effects are cache-type (VIVT/VIPT/PIPT) specific. | |
114 | * | |
115 | * flush_cache_kern_all() | |
116 | * | |
117 | * Unconditionally clean and invalidate the entire cache. | |
118 | * | |
119 | * flush_cache_user_mm(mm) | |
120 | * | |
121 | * Clean and invalidate all user space cache entries | |
122 | * before a change of page tables. | |
123 | * | |
124 | * flush_cache_user_range(start, end, flags) | |
125 | * | |
126 | * Clean and invalidate a range of cache entries in the | |
127 | * specified address space before a change of page tables. | |
128 | * - start - user start address (inclusive, page aligned) | |
129 | * - end - user end address (exclusive, page aligned) | |
130 | * - flags - vma->vm_flags field | |
131 | * | |
132 | * coherent_kern_range(start, end) | |
133 | * | |
134 | * Ensure coherency between the Icache and the Dcache in the | |
135 | * region described by start, end. If you have non-snooping | |
136 | * Harvard caches, you need to implement this function. | |
137 | * - start - virtual start address | |
138 | * - end - virtual end address | |
139 | * | |
140 | * DMA Cache Coherency | |
141 | * =================== | |
142 | * | |
143 | * dma_inv_range(start, end) | |
144 | * | |
145 | * Invalidate (discard) the specified virtual address range. | |
146 | * May not write back any entries. If 'start' or 'end' | |
147 | * are not cache line aligned, those lines must be written | |
148 | * back. | |
149 | * - start - virtual start address | |
150 | * - end - virtual end address | |
151 | * | |
152 | * dma_clean_range(start, end) | |
153 | * | |
154 | * Clean (write back) the specified virtual address range. | |
155 | * - start - virtual start address | |
156 | * - end - virtual end address | |
157 | * | |
158 | * dma_flush_range(start, end) | |
159 | * | |
160 | * Clean and invalidate the specified virtual address range. | |
161 | * - start - virtual start address | |
162 | * - end - virtual end address | |
163 | */ | |
164 | ||
165 | struct cpu_cache_fns { | |
166 | void (*flush_kern_all)(void); | |
167 | void (*flush_user_all)(void); | |
168 | void (*flush_user_range)(unsigned long, unsigned long, unsigned int); | |
169 | ||
170 | void (*coherent_kern_range)(unsigned long, unsigned long); | |
171 | void (*coherent_user_range)(unsigned long, unsigned long); | |
172 | void (*flush_kern_dcache_page)(void *); | |
173 | ||
174 | void (*dma_inv_range)(unsigned long, unsigned long); | |
175 | void (*dma_clean_range)(unsigned long, unsigned long); | |
176 | void (*dma_flush_range)(unsigned long, unsigned long); | |
177 | }; | |
178 | ||
179 | /* | |
180 | * Select the calling method | |
181 | */ | |
182 | #ifdef MULTI_CACHE | |
183 | ||
184 | extern struct cpu_cache_fns cpu_cache; | |
185 | ||
186 | #define __cpuc_flush_kern_all cpu_cache.flush_kern_all | |
187 | #define __cpuc_flush_user_all cpu_cache.flush_user_all | |
188 | #define __cpuc_flush_user_range cpu_cache.flush_user_range | |
189 | #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range | |
190 | #define __cpuc_coherent_user_range cpu_cache.coherent_user_range | |
191 | #define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page | |
192 | ||
193 | /* | |
194 | * These are private to the dma-mapping API. Do not use directly. | |
195 | * Their sole purpose is to ensure that data held in the cache | |
196 | * is visible to DMA, or data written by DMA to system memory is | |
197 | * visible to the CPU. | |
198 | */ | |
199 | #define dmac_inv_range cpu_cache.dma_inv_range | |
200 | #define dmac_clean_range cpu_cache.dma_clean_range | |
201 | #define dmac_flush_range cpu_cache.dma_flush_range | |
202 | ||
203 | #else | |
204 | ||
205 | #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) | |
206 | #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) | |
207 | #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) | |
208 | #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) | |
209 | #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) | |
210 | #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) | |
211 | ||
212 | extern void __cpuc_flush_kern_all(void); | |
213 | extern void __cpuc_flush_user_all(void); | |
214 | extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); | |
215 | extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); | |
216 | extern void __cpuc_coherent_user_range(unsigned long, unsigned long); | |
217 | extern void __cpuc_flush_dcache_page(void *); | |
218 | ||
219 | /* | |
220 | * These are private to the dma-mapping API. Do not use directly. | |
221 | * Their sole purpose is to ensure that data held in the cache | |
222 | * is visible to DMA, or data written by DMA to system memory is | |
223 | * visible to the CPU. | |
224 | */ | |
225 | #define dmac_inv_range __glue(_CACHE,_dma_inv_range) | |
226 | #define dmac_clean_range __glue(_CACHE,_dma_clean_range) | |
227 | #define dmac_flush_range __glue(_CACHE,_dma_flush_range) | |
228 | ||
229 | extern void dmac_inv_range(unsigned long, unsigned long); | |
230 | extern void dmac_clean_range(unsigned long, unsigned long); | |
231 | extern void dmac_flush_range(unsigned long, unsigned long); | |
232 | ||
233 | #endif | |
234 | ||
235 | /* | |
236 | * flush_cache_vmap() is used when creating mappings (eg, via vmap, | |
237 | * vmalloc, ioremap etc) in kernel space for pages. Since the | |
238 | * direct-mappings of these pages may contain cached data, we need | |
239 | * to do a full cache flush to ensure that writebacks don't corrupt | |
240 | * data placed into these pages via the new mappings. | |
241 | */ | |
242 | #define flush_cache_vmap(start, end) flush_cache_all() | |
243 | #define flush_cache_vunmap(start, end) flush_cache_all() | |
244 | ||
245 | /* | |
246 | * Copy user data from/to a page which is mapped into a different | |
247 | * processes address space. Really, we want to allow our "user | |
248 | * space" model to handle this. | |
249 | */ | |
250 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | |
251 | do { \ | |
1da177e4 | 252 | memcpy(dst, src, len); \ |
a188ad2b | 253 | flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ |
1da177e4 LT |
254 | } while (0) |
255 | ||
256 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | |
257 | do { \ | |
1da177e4 LT |
258 | memcpy(dst, src, len); \ |
259 | } while (0) | |
260 | ||
261 | /* | |
262 | * Convert calls to our calling convention. | |
263 | */ | |
264 | #define flush_cache_all() __cpuc_flush_kern_all() | |
d7b6b358 | 265 | #ifndef CONFIG_CPU_CACHE_VIPT |
1da177e4 LT |
266 | static inline void flush_cache_mm(struct mm_struct *mm) |
267 | { | |
268 | if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) | |
269 | __cpuc_flush_user_all(); | |
270 | } | |
271 | ||
272 | static inline void | |
273 | flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | |
274 | { | |
275 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) | |
276 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), | |
277 | vma->vm_flags); | |
278 | } | |
279 | ||
280 | static inline void | |
281 | flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | |
282 | { | |
283 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { | |
284 | unsigned long addr = user_addr & PAGE_MASK; | |
285 | __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); | |
286 | } | |
287 | } | |
a188ad2b GD |
288 | |
289 | static inline void | |
290 | flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |
291 | unsigned long uaddr, void *kaddr, | |
292 | unsigned long len, int write) | |
293 | { | |
294 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { | |
295 | unsigned long addr = (unsigned long)kaddr; | |
296 | __cpuc_coherent_kern_range(addr, addr + len); | |
297 | } | |
298 | } | |
d7b6b358 RK |
299 | #else |
300 | extern void flush_cache_mm(struct mm_struct *mm); | |
301 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | |
302 | extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); | |
a188ad2b GD |
303 | extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
304 | unsigned long uaddr, void *kaddr, | |
305 | unsigned long len, int write); | |
d7b6b358 | 306 | #endif |
1da177e4 LT |
307 | |
308 | /* | |
309 | * flush_cache_user_range is used when we want to ensure that the | |
310 | * Harvard caches are synchronised for the user space address range. | |
311 | * This is used for the ARM private sys_cacheflush system call. | |
312 | */ | |
313 | #define flush_cache_user_range(vma,start,end) \ | |
314 | __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) | |
315 | ||
316 | /* | |
317 | * Perform necessary cache operations to ensure that data previously | |
318 | * stored within this range of addresses can be executed by the CPU. | |
319 | */ | |
320 | #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e) | |
321 | ||
322 | /* | |
323 | * Perform necessary cache operations to ensure that the TLB will | |
324 | * see data written in the specified area. | |
325 | */ | |
326 | #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size) | |
327 | ||
328 | /* | |
329 | * flush_dcache_page is used when the kernel has written to the page | |
330 | * cache page at virtual address page->virtual. | |
331 | * | |
332 | * If this page isn't mapped (ie, page_mapping == NULL), or it might | |
333 | * have userspace mappings, then we _must_ always clean + invalidate | |
334 | * the dcache entries associated with the kernel mapping. | |
335 | * | |
336 | * Otherwise we can defer the operation, and clean the cache when we are | |
337 | * about to change to user space. This is the same method as used on SPARC64. | |
338 | * See update_mmu_cache for the user space part. | |
339 | */ | |
340 | extern void flush_dcache_page(struct page *); | |
341 | ||
342 | #define flush_dcache_mmap_lock(mapping) \ | |
343 | write_lock_irq(&(mapping)->tree_lock) | |
344 | #define flush_dcache_mmap_unlock(mapping) \ | |
345 | write_unlock_irq(&(mapping)->tree_lock) | |
346 | ||
347 | #define flush_icache_user_range(vma,page,addr,len) \ | |
348 | flush_dcache_page(page) | |
349 | ||
350 | /* | |
351 | * We don't appear to need to do anything here. In fact, if we did, we'd | |
352 | * duplicate cache flushing elsewhere performed by flush_dcache_page(). | |
353 | */ | |
354 | #define flush_icache_page(vma,page) do { } while (0) | |
355 | ||
356 | #define __cacheid_present(val) (val != read_cpuid(CPUID_ID)) | |
357 | #define __cacheid_vivt(val) ((val & (15 << 25)) != (14 << 25)) | |
358 | #define __cacheid_vipt(val) ((val & (15 << 25)) == (14 << 25)) | |
359 | #define __cacheid_vipt_nonaliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25)) | |
360 | #define __cacheid_vipt_aliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23)) | |
361 | ||
362 | #if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) | |
363 | ||
364 | #define cache_is_vivt() 1 | |
365 | #define cache_is_vipt() 0 | |
366 | #define cache_is_vipt_nonaliasing() 0 | |
367 | #define cache_is_vipt_aliasing() 0 | |
368 | ||
369 | #elif defined(CONFIG_CPU_CACHE_VIPT) | |
370 | ||
371 | #define cache_is_vivt() 0 | |
372 | #define cache_is_vipt() 1 | |
373 | #define cache_is_vipt_nonaliasing() \ | |
374 | ({ \ | |
375 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
376 | __cacheid_vipt_nonaliasing(__val); \ | |
377 | }) | |
378 | ||
379 | #define cache_is_vipt_aliasing() \ | |
380 | ({ \ | |
381 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
382 | __cacheid_vipt_aliasing(__val); \ | |
383 | }) | |
384 | ||
385 | #else | |
386 | ||
387 | #define cache_is_vivt() \ | |
388 | ({ \ | |
389 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
390 | (!__cacheid_present(__val)) || __cacheid_vivt(__val); \ | |
391 | }) | |
392 | ||
393 | #define cache_is_vipt() \ | |
394 | ({ \ | |
395 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
396 | __cacheid_present(__val) && __cacheid_vipt(__val); \ | |
397 | }) | |
398 | ||
399 | #define cache_is_vipt_nonaliasing() \ | |
400 | ({ \ | |
401 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
402 | __cacheid_present(__val) && \ | |
403 | __cacheid_vipt_nonaliasing(__val); \ | |
404 | }) | |
405 | ||
406 | #define cache_is_vipt_aliasing() \ | |
407 | ({ \ | |
408 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
409 | __cacheid_present(__val) && \ | |
410 | __cacheid_vipt_aliasing(__val); \ | |
411 | }) | |
412 | ||
413 | #endif | |
414 | ||
415 | #endif |