Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/include/asm-arm/cacheflush.h | |
3 | * | |
4 | * Copyright (C) 1999-2002 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #ifndef _ASMARM_CACHEFLUSH_H | |
11 | #define _ASMARM_CACHEFLUSH_H | |
12 | ||
13 | #include <linux/config.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/mm.h> | |
16 | ||
1da177e4 | 17 | #include <asm/glue.h> |
b8a9b66f RK |
18 | #include <asm/shmparam.h> |
19 | ||
20 | #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) | |
1da177e4 LT |
21 | |
22 | /* | |
23 | * Cache Model | |
24 | * =========== | |
25 | */ | |
26 | #undef _CACHE | |
27 | #undef MULTI_CACHE | |
28 | ||
29 | #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) | |
30 | # ifdef _CACHE | |
31 | # define MULTI_CACHE 1 | |
32 | # else | |
33 | # define _CACHE v3 | |
34 | # endif | |
35 | #endif | |
36 | ||
37 | #if defined(CONFIG_CPU_ARM720T) | |
38 | # ifdef _CACHE | |
39 | # define MULTI_CACHE 1 | |
40 | # else | |
41 | # define _CACHE v4 | |
42 | # endif | |
43 | #endif | |
44 | ||
45 | #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ | |
46 | defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) | |
47 | # define MULTI_CACHE 1 | |
48 | #endif | |
49 | ||
50 | #if defined(CONFIG_CPU_ARM926T) | |
51 | # ifdef _CACHE | |
52 | # define MULTI_CACHE 1 | |
53 | # else | |
54 | # define _CACHE arm926 | |
55 | # endif | |
56 | #endif | |
57 | ||
58 | #if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100) | |
59 | # ifdef _CACHE | |
60 | # define MULTI_CACHE 1 | |
61 | # else | |
62 | # define _CACHE v4wb | |
63 | # endif | |
64 | #endif | |
65 | ||
66 | #if defined(CONFIG_CPU_XSCALE) | |
67 | # ifdef _CACHE | |
68 | # define MULTI_CACHE 1 | |
69 | # else | |
70 | # define _CACHE xscale | |
71 | # endif | |
72 | #endif | |
73 | ||
74 | #if defined(CONFIG_CPU_V6) | |
75 | //# ifdef _CACHE | |
76 | # define MULTI_CACHE 1 | |
77 | //# else | |
78 | //# define _CACHE v6 | |
79 | //# endif | |
80 | #endif | |
81 | ||
82 | #if !defined(_CACHE) && !defined(MULTI_CACHE) | |
83 | #error Unknown cache maintainence model | |
84 | #endif | |
85 | ||
86 | /* | |
87 | * This flag is used to indicate that the page pointed to by a pte | |
88 | * is dirty and requires cleaning before returning it to the user. | |
89 | */ | |
90 | #define PG_dcache_dirty PG_arch_1 | |
91 | ||
92 | /* | |
93 | * MM Cache Management | |
94 | * =================== | |
95 | * | |
96 | * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files | |
97 | * implement these methods. | |
98 | * | |
99 | * Start addresses are inclusive and end addresses are exclusive; | |
100 | * start addresses should be rounded down, end addresses up. | |
101 | * | |
102 | * See Documentation/cachetlb.txt for more information. | |
103 | * Please note that the implementation of these, and the required | |
104 | * effects are cache-type (VIVT/VIPT/PIPT) specific. | |
105 | * | |
106 | * flush_cache_kern_all() | |
107 | * | |
108 | * Unconditionally clean and invalidate the entire cache. | |
109 | * | |
110 | * flush_cache_user_mm(mm) | |
111 | * | |
112 | * Clean and invalidate all user space cache entries | |
113 | * before a change of page tables. | |
114 | * | |
115 | * flush_cache_user_range(start, end, flags) | |
116 | * | |
117 | * Clean and invalidate a range of cache entries in the | |
118 | * specified address space before a change of page tables. | |
119 | * - start - user start address (inclusive, page aligned) | |
120 | * - end - user end address (exclusive, page aligned) | |
121 | * - flags - vma->vm_flags field | |
122 | * | |
123 | * coherent_kern_range(start, end) | |
124 | * | |
125 | * Ensure coherency between the Icache and the Dcache in the | |
126 | * region described by start, end. If you have non-snooping | |
127 | * Harvard caches, you need to implement this function. | |
128 | * - start - virtual start address | |
129 | * - end - virtual end address | |
130 | * | |
131 | * DMA Cache Coherency | |
132 | * =================== | |
133 | * | |
134 | * dma_inv_range(start, end) | |
135 | * | |
136 | * Invalidate (discard) the specified virtual address range. | |
137 | * May not write back any entries. If 'start' or 'end' | |
138 | * are not cache line aligned, those lines must be written | |
139 | * back. | |
140 | * - start - virtual start address | |
141 | * - end - virtual end address | |
142 | * | |
143 | * dma_clean_range(start, end) | |
144 | * | |
145 | * Clean (write back) the specified virtual address range. | |
146 | * - start - virtual start address | |
147 | * - end - virtual end address | |
148 | * | |
149 | * dma_flush_range(start, end) | |
150 | * | |
151 | * Clean and invalidate the specified virtual address range. | |
152 | * - start - virtual start address | |
153 | * - end - virtual end address | |
154 | */ | |
155 | ||
156 | struct cpu_cache_fns { | |
157 | void (*flush_kern_all)(void); | |
158 | void (*flush_user_all)(void); | |
159 | void (*flush_user_range)(unsigned long, unsigned long, unsigned int); | |
160 | ||
161 | void (*coherent_kern_range)(unsigned long, unsigned long); | |
162 | void (*coherent_user_range)(unsigned long, unsigned long); | |
163 | void (*flush_kern_dcache_page)(void *); | |
164 | ||
165 | void (*dma_inv_range)(unsigned long, unsigned long); | |
166 | void (*dma_clean_range)(unsigned long, unsigned long); | |
167 | void (*dma_flush_range)(unsigned long, unsigned long); | |
168 | }; | |
169 | ||
170 | /* | |
171 | * Select the calling method | |
172 | */ | |
173 | #ifdef MULTI_CACHE | |
174 | ||
175 | extern struct cpu_cache_fns cpu_cache; | |
176 | ||
177 | #define __cpuc_flush_kern_all cpu_cache.flush_kern_all | |
178 | #define __cpuc_flush_user_all cpu_cache.flush_user_all | |
179 | #define __cpuc_flush_user_range cpu_cache.flush_user_range | |
180 | #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range | |
181 | #define __cpuc_coherent_user_range cpu_cache.coherent_user_range | |
182 | #define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page | |
183 | ||
184 | /* | |
185 | * These are private to the dma-mapping API. Do not use directly. | |
186 | * Their sole purpose is to ensure that data held in the cache | |
187 | * is visible to DMA, or data written by DMA to system memory is | |
188 | * visible to the CPU. | |
189 | */ | |
190 | #define dmac_inv_range cpu_cache.dma_inv_range | |
191 | #define dmac_clean_range cpu_cache.dma_clean_range | |
192 | #define dmac_flush_range cpu_cache.dma_flush_range | |
193 | ||
194 | #else | |
195 | ||
196 | #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) | |
197 | #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) | |
198 | #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) | |
199 | #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) | |
200 | #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) | |
201 | #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) | |
202 | ||
203 | extern void __cpuc_flush_kern_all(void); | |
204 | extern void __cpuc_flush_user_all(void); | |
205 | extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); | |
206 | extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); | |
207 | extern void __cpuc_coherent_user_range(unsigned long, unsigned long); | |
208 | extern void __cpuc_flush_dcache_page(void *); | |
209 | ||
210 | /* | |
211 | * These are private to the dma-mapping API. Do not use directly. | |
212 | * Their sole purpose is to ensure that data held in the cache | |
213 | * is visible to DMA, or data written by DMA to system memory is | |
214 | * visible to the CPU. | |
215 | */ | |
216 | #define dmac_inv_range __glue(_CACHE,_dma_inv_range) | |
217 | #define dmac_clean_range __glue(_CACHE,_dma_clean_range) | |
218 | #define dmac_flush_range __glue(_CACHE,_dma_flush_range) | |
219 | ||
220 | extern void dmac_inv_range(unsigned long, unsigned long); | |
221 | extern void dmac_clean_range(unsigned long, unsigned long); | |
222 | extern void dmac_flush_range(unsigned long, unsigned long); | |
223 | ||
224 | #endif | |
225 | ||
226 | /* | |
227 | * flush_cache_vmap() is used when creating mappings (eg, via vmap, | |
228 | * vmalloc, ioremap etc) in kernel space for pages. Since the | |
229 | * direct-mappings of these pages may contain cached data, we need | |
230 | * to do a full cache flush to ensure that writebacks don't corrupt | |
231 | * data placed into these pages via the new mappings. | |
232 | */ | |
233 | #define flush_cache_vmap(start, end) flush_cache_all() | |
234 | #define flush_cache_vunmap(start, end) flush_cache_all() | |
235 | ||
236 | /* | |
237 | * Copy user data from/to a page which is mapped into a different | |
238 | * processes address space. Really, we want to allow our "user | |
239 | * space" model to handle this. | |
240 | */ | |
241 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | |
242 | do { \ | |
243 | flush_cache_page(vma, vaddr, page_to_pfn(page));\ | |
244 | memcpy(dst, src, len); \ | |
245 | flush_dcache_page(page); \ | |
246 | } while (0) | |
247 | ||
248 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | |
249 | do { \ | |
250 | flush_cache_page(vma, vaddr, page_to_pfn(page));\ | |
251 | memcpy(dst, src, len); \ | |
252 | } while (0) | |
253 | ||
254 | /* | |
255 | * Convert calls to our calling convention. | |
256 | */ | |
257 | #define flush_cache_all() __cpuc_flush_kern_all() | |
d7b6b358 | 258 | #ifndef CONFIG_CPU_CACHE_VIPT |
1da177e4 LT |
259 | static inline void flush_cache_mm(struct mm_struct *mm) |
260 | { | |
261 | if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) | |
262 | __cpuc_flush_user_all(); | |
263 | } | |
264 | ||
265 | static inline void | |
266 | flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | |
267 | { | |
268 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) | |
269 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), | |
270 | vma->vm_flags); | |
271 | } | |
272 | ||
273 | static inline void | |
274 | flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | |
275 | { | |
276 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { | |
277 | unsigned long addr = user_addr & PAGE_MASK; | |
278 | __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); | |
279 | } | |
280 | } | |
d7b6b358 RK |
281 | #else |
282 | extern void flush_cache_mm(struct mm_struct *mm); | |
283 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | |
284 | extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); | |
285 | #endif | |
1da177e4 LT |
286 | |
287 | /* | |
288 | * flush_cache_user_range is used when we want to ensure that the | |
289 | * Harvard caches are synchronised for the user space address range. | |
290 | * This is used for the ARM private sys_cacheflush system call. | |
291 | */ | |
292 | #define flush_cache_user_range(vma,start,end) \ | |
293 | __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) | |
294 | ||
295 | /* | |
296 | * Perform necessary cache operations to ensure that data previously | |
297 | * stored within this range of addresses can be executed by the CPU. | |
298 | */ | |
299 | #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e) | |
300 | ||
301 | /* | |
302 | * Perform necessary cache operations to ensure that the TLB will | |
303 | * see data written in the specified area. | |
304 | */ | |
305 | #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size) | |
306 | ||
307 | /* | |
308 | * flush_dcache_page is used when the kernel has written to the page | |
309 | * cache page at virtual address page->virtual. | |
310 | * | |
311 | * If this page isn't mapped (ie, page_mapping == NULL), or it might | |
312 | * have userspace mappings, then we _must_ always clean + invalidate | |
313 | * the dcache entries associated with the kernel mapping. | |
314 | * | |
315 | * Otherwise we can defer the operation, and clean the cache when we are | |
316 | * about to change to user space. This is the same method as used on SPARC64. | |
317 | * See update_mmu_cache for the user space part. | |
318 | */ | |
319 | extern void flush_dcache_page(struct page *); | |
320 | ||
321 | #define flush_dcache_mmap_lock(mapping) \ | |
322 | write_lock_irq(&(mapping)->tree_lock) | |
323 | #define flush_dcache_mmap_unlock(mapping) \ | |
324 | write_unlock_irq(&(mapping)->tree_lock) | |
325 | ||
326 | #define flush_icache_user_range(vma,page,addr,len) \ | |
327 | flush_dcache_page(page) | |
328 | ||
329 | /* | |
330 | * We don't appear to need to do anything here. In fact, if we did, we'd | |
331 | * duplicate cache flushing elsewhere performed by flush_dcache_page(). | |
332 | */ | |
333 | #define flush_icache_page(vma,page) do { } while (0) | |
334 | ||
335 | #define __cacheid_present(val) (val != read_cpuid(CPUID_ID)) | |
336 | #define __cacheid_vivt(val) ((val & (15 << 25)) != (14 << 25)) | |
337 | #define __cacheid_vipt(val) ((val & (15 << 25)) == (14 << 25)) | |
338 | #define __cacheid_vipt_nonaliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25)) | |
339 | #define __cacheid_vipt_aliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23)) | |
340 | ||
341 | #if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) | |
342 | ||
343 | #define cache_is_vivt() 1 | |
344 | #define cache_is_vipt() 0 | |
345 | #define cache_is_vipt_nonaliasing() 0 | |
346 | #define cache_is_vipt_aliasing() 0 | |
347 | ||
348 | #elif defined(CONFIG_CPU_CACHE_VIPT) | |
349 | ||
350 | #define cache_is_vivt() 0 | |
351 | #define cache_is_vipt() 1 | |
352 | #define cache_is_vipt_nonaliasing() \ | |
353 | ({ \ | |
354 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
355 | __cacheid_vipt_nonaliasing(__val); \ | |
356 | }) | |
357 | ||
358 | #define cache_is_vipt_aliasing() \ | |
359 | ({ \ | |
360 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
361 | __cacheid_vipt_aliasing(__val); \ | |
362 | }) | |
363 | ||
364 | #else | |
365 | ||
366 | #define cache_is_vivt() \ | |
367 | ({ \ | |
368 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
369 | (!__cacheid_present(__val)) || __cacheid_vivt(__val); \ | |
370 | }) | |
371 | ||
372 | #define cache_is_vipt() \ | |
373 | ({ \ | |
374 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
375 | __cacheid_present(__val) && __cacheid_vipt(__val); \ | |
376 | }) | |
377 | ||
378 | #define cache_is_vipt_nonaliasing() \ | |
379 | ({ \ | |
380 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
381 | __cacheid_present(__val) && \ | |
382 | __cacheid_vipt_nonaliasing(__val); \ | |
383 | }) | |
384 | ||
385 | #define cache_is_vipt_aliasing() \ | |
386 | ({ \ | |
387 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | |
388 | __cacheid_present(__val) && \ | |
389 | __cacheid_vipt_aliasing(__val); \ | |
390 | }) | |
391 | ||
392 | #endif | |
393 | ||
394 | #endif |