Merge tag 'fscache-fixes-20140917' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / arm / include / asm / cacheflush.h
CommitLineData
1da177e4 1/*
4baa9922 2 * arch/arm/include/asm/cacheflush.h
1da177e4
LT
3 *
4 * Copyright (C) 1999-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_CACHEFLUSH_H
11#define _ASMARM_CACHEFLUSH_H
12
1da177e4
LT
13#include <linux/mm.h>
14
753790e7 15#include <asm/glue-cache.h>
b8a9b66f 16#include <asm/shmparam.h>
376e1421 17#include <asm/cachetype.h>
33f663ff 18#include <asm/outercache.h>
b8a9b66f
RK
19
20#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
1da177e4 21
1da177e4 22/*
c0177800
CM
23 * This flag is used to indicate that the page pointed to by a pte is clean
24 * and does not require cleaning before returning it to the user.
1da177e4 25 */
c0177800 26#define PG_dcache_clean PG_arch_1
1da177e4
LT
27
28/*
29 * MM Cache Management
30 * ===================
31 *
32 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
33 * implement these methods.
34 *
35 * Start addresses are inclusive and end addresses are exclusive;
36 * start addresses should be rounded down, end addresses up.
37 *
38 * See Documentation/cachetlb.txt for more information.
39 * Please note that the implementation of these, and the required
40 * effects are cache-type (VIVT/VIPT/PIPT) specific.
41 *
81d11955
TL
42 * flush_icache_all()
43 *
44 * Unconditionally clean and invalidate the entire icache.
45 * Currently only needed for cache-v6.S and cache-v7.S, see
46 * __flush_icache_all for the generic implementation.
47 *
2045124f 48 * flush_kern_all()
1da177e4
LT
49 *
50 * Unconditionally clean and invalidate the entire cache.
51 *
031bd879
LP
52 * flush_kern_louis()
53 *
54 * Flush data cache levels up to the level of unification
55 * inner shareable and invalidate the I-cache.
56 * Only needed from v7 onwards, falls back to flush_cache_all()
57 * for all other processor versions.
58 *
2045124f 59 * flush_user_all()
1da177e4
LT
60 *
61 * Clean and invalidate all user space cache entries
62 * before a change of page tables.
63 *
2045124f 64 * flush_user_range(start, end, flags)
1da177e4
LT
65 *
66 * Clean and invalidate a range of cache entries in the
67 * specified address space before a change of page tables.
68 * - start - user start address (inclusive, page aligned)
69 * - end - user end address (exclusive, page aligned)
70 * - flags - vma->vm_flags field
71 *
72 * coherent_kern_range(start, end)
73 *
74 * Ensure coherency between the Icache and the Dcache in the
75 * region described by start, end. If you have non-snooping
76 * Harvard caches, you need to implement this function.
77 * - start - virtual start address
78 * - end - virtual end address
79 *
2045124f
TL
80 * coherent_user_range(start, end)
81 *
82 * Ensure coherency between the Icache and the Dcache in the
83 * region described by start, end. If you have non-snooping
84 * Harvard caches, you need to implement this function.
85 * - start - virtual start address
86 * - end - virtual end address
87 *
88 * flush_kern_dcache_area(kaddr, size)
89 *
90 * Ensure that the data held in page is written back.
91 * - kaddr - page address
92 * - size - region size
93 *
1da177e4
LT
94 * DMA Cache Coherency
95 * ===================
96 *
1da177e4
LT
97 * dma_flush_range(start, end)
98 *
99 * Clean and invalidate the specified virtual address range.
100 * - start - virtual start address
101 * - end - virtual end address
102 */
103
104struct cpu_cache_fns {
81d11955 105 void (*flush_icache_all)(void);
1da177e4 106 void (*flush_kern_all)(void);
031bd879 107 void (*flush_kern_louis)(void);
1da177e4
LT
108 void (*flush_user_all)(void);
109 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
110
111 void (*coherent_kern_range)(unsigned long, unsigned long);
c5102f59 112 int (*coherent_user_range)(unsigned long, unsigned long);
2c9b9c84 113 void (*flush_kern_dcache_area)(void *, size_t);
1da177e4 114
a9c9147e
RK
115 void (*dma_map_area)(const void *, size_t, int);
116 void (*dma_unmap_area)(const void *, size_t, int);
1da177e4 117
7ae5a761 118 void (*dma_flush_range)(const void *, const void *);
1da177e4
LT
119};
120
121/*
122 * Select the calling method
123 */
124#ifdef MULTI_CACHE
125
126extern struct cpu_cache_fns cpu_cache;
127
81d11955 128#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
1da177e4 129#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
031bd879 130#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
1da177e4
LT
131#define __cpuc_flush_user_all cpu_cache.flush_user_all
132#define __cpuc_flush_user_range cpu_cache.flush_user_range
133#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
134#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
2c9b9c84 135#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
1da177e4
LT
136
137/*
138 * These are private to the dma-mapping API. Do not use directly.
139 * Their sole purpose is to ensure that data held in the cache
140 * is visible to DMA, or data written by DMA to system memory is
141 * visible to the CPU.
142 */
a9c9147e 143#define dmac_map_area cpu_cache.dma_map_area
753790e7 144#define dmac_unmap_area cpu_cache.dma_unmap_area
1da177e4
LT
145#define dmac_flush_range cpu_cache.dma_flush_range
146
147#else
148
81d11955 149extern void __cpuc_flush_icache_all(void);
1da177e4 150extern void __cpuc_flush_kern_all(void);
031bd879 151extern void __cpuc_flush_kern_louis(void);
1da177e4
LT
152extern void __cpuc_flush_user_all(void);
153extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
154extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
c5102f59 155extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
2c9b9c84 156extern void __cpuc_flush_dcache_area(void *, size_t);
1da177e4
LT
157
158/*
159 * These are private to the dma-mapping API. Do not use directly.
160 * Their sole purpose is to ensure that data held in the cache
161 * is visible to DMA, or data written by DMA to system memory is
162 * visible to the CPU.
163 */
a9c9147e
RK
164extern void dmac_map_area(const void *, size_t, int);
165extern void dmac_unmap_area(const void *, size_t, int);
7ae5a761 166extern void dmac_flush_range(const void *, const void *);
1da177e4
LT
167
168#endif
169
1da177e4
LT
170/*
171 * Copy user data from/to a page which is mapped into a different
172 * processes address space. Really, we want to allow our "user
173 * space" model to handle this.
174 */
2ef7f3db
RK
175extern void copy_to_user_page(struct vm_area_struct *, struct page *,
176 unsigned long, void *, const void *, unsigned long);
1da177e4
LT
177#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
178 do { \
1da177e4
LT
179 memcpy(dst, src, len); \
180 } while (0)
181
182/*
183 * Convert calls to our calling convention.
184 */
81d11955
TL
185
186/* Invalidate I-cache */
187#define __flush_icache_all_generic() \
188 asm("mcr p15, 0, %0, c7, c5, 0" \
189 : : "r" (0));
190
191/* Invalidate I-cache inner shareable */
192#define __flush_icache_all_v7_smp() \
193 asm("mcr p15, 0, %0, c7, c1, 0" \
194 : : "r" (0));
195
196/*
197 * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
198 * will fall through to use __flush_icache_all_generic.
199 */
e399b1a4
RK
200#if (defined(CONFIG_CPU_V7) && \
201 (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
81d11955
TL
202 defined(CONFIG_SMP_ON_UP)
203#define __flush_icache_preferred __cpuc_flush_icache_all
204#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
205#define __flush_icache_preferred __flush_icache_all_v7_smp
206#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
207#define __flush_icache_preferred __cpuc_flush_icache_all
208#else
209#define __flush_icache_preferred __flush_icache_all_generic
210#endif
211
212static inline void __flush_icache_all(void)
213{
214 __flush_icache_preferred();
9581960a 215 dsb(ishst);
81d11955
TL
216}
217
031bd879
LP
218/*
219 * Flush caches up to Level of Unification Inner Shareable
220 */
221#define flush_cache_louis() __cpuc_flush_kern_louis()
222
1da177e4 223#define flush_cache_all() __cpuc_flush_kern_all()
2f0b1926
RK
224
225static inline void vivt_flush_cache_mm(struct mm_struct *mm)
1da177e4 226{
56f8ba83 227 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
1da177e4
LT
228 __cpuc_flush_user_all();
229}
230
231static inline void
2f0b1926 232vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1da177e4 233{
b74253f7
WD
234 struct mm_struct *mm = vma->vm_mm;
235
236 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
1da177e4
LT
237 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
238 vma->vm_flags);
239}
240
241static inline void
2f0b1926 242vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
1da177e4 243{
b74253f7
WD
244 struct mm_struct *mm = vma->vm_mm;
245
246 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
1da177e4
LT
247 unsigned long addr = user_addr & PAGE_MASK;
248 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
249 }
250}
a188ad2b 251
2f0b1926
RK
252#ifndef CONFIG_CPU_CACHE_VIPT
253#define flush_cache_mm(mm) \
254 vivt_flush_cache_mm(mm)
255#define flush_cache_range(vma,start,end) \
256 vivt_flush_cache_range(vma,start,end)
257#define flush_cache_page(vma,addr,pfn) \
258 vivt_flush_cache_page(vma,addr,pfn)
d7b6b358
RK
259#else
260extern void flush_cache_mm(struct mm_struct *mm);
261extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
262extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
263#endif
1da177e4 264
ec8c0446
RB
265#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
266
1da177e4
LT
267/*
268 * flush_cache_user_range is used when we want to ensure that the
269 * Harvard caches are synchronised for the user space address range.
270 * This is used for the ARM private sys_cacheflush system call.
271 */
d9524dc3 272#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
1da177e4
LT
273
274/*
275 * Perform necessary cache operations to ensure that data previously
276 * stored within this range of addresses can be executed by the CPU.
277 */
278#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
279
280/*
281 * Perform necessary cache operations to ensure that the TLB will
282 * see data written in the specified area.
283 */
284#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
285
286/*
287 * flush_dcache_page is used when the kernel has written to the page
288 * cache page at virtual address page->virtual.
289 *
290 * If this page isn't mapped (ie, page_mapping == NULL), or it might
291 * have userspace mappings, then we _must_ always clean + invalidate
292 * the dcache entries associated with the kernel mapping.
293 *
294 * Otherwise we can defer the operation, and clean the cache when we are
295 * about to change to user space. This is the same method as used on SPARC64.
296 * See update_mmu_cache for the user space part.
297 */
2d4dc890 298#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
1da177e4
LT
299extern void flush_dcache_page(struct page *);
300
252a9aff
JB
301static inline void flush_kernel_vmap_range(void *addr, int size)
302{
303 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
304 __cpuc_flush_dcache_area(addr, (size_t)size);
305}
306static inline void invalidate_kernel_vmap_range(void *addr, int size)
307{
308 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
309 __cpuc_flush_dcache_area(addr, (size_t)size);
310}
826cbdaf 311
6020dff0
RK
312#define ARCH_HAS_FLUSH_ANON_PAGE
313static inline void flush_anon_page(struct vm_area_struct *vma,
314 struct page *page, unsigned long vmaddr)
315{
316 extern void __flush_anon_page(struct vm_area_struct *vma,
317 struct page *, unsigned long);
318 if (PageAnon(page))
319 __flush_anon_page(vma, page, vmaddr);
320}
321
73be1591 322#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
1bc39742 323extern void flush_kernel_dcache_page(struct page *);
73be1591 324
1da177e4 325#define flush_dcache_mmap_lock(mapping) \
19fd6231 326 spin_lock_irq(&(mapping)->tree_lock)
1da177e4 327#define flush_dcache_mmap_unlock(mapping) \
19fd6231 328 spin_unlock_irq(&(mapping)->tree_lock)
1da177e4
LT
329
330#define flush_icache_user_range(vma,page,addr,len) \
331 flush_dcache_page(page)
332
333/*
334 * We don't appear to need to do anything here. In fact, if we did, we'd
335 * duplicate cache flushing elsewhere performed by flush_dcache_page().
336 */
337#define flush_icache_page(vma,page) do { } while (0)
338
376e1421
CM
339/*
340 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
341 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
342 * caches, since the direct-mappings of these pages may contain cached
343 * data, we need to do a full cache flush to ensure that writebacks
344 * don't corrupt data placed into these pages via the new mappings.
345 */
346static inline void flush_cache_vmap(unsigned long start, unsigned long end)
347{
348 if (!cache_is_vipt_nonaliasing())
349 flush_cache_all();
350 else
351 /*
352 * set_pte_at() called from vmap_pte_range() does not
353 * have a DSB after cleaning the cache line.
354 */
6af396a6 355 dsb(ishst);
376e1421
CM
356}
357
358static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
359{
360 if (!cache_is_vipt_nonaliasing())
361 flush_cache_all();
362}
363
0c91e7e0
NP
364/*
365 * Memory synchronization helpers for mixed cached vs non cached accesses.
366 *
367 * Some synchronization algorithms have to set states in memory with the
368 * cache enabled or disabled depending on the code path. It is crucial
369 * to always ensure proper cache maintenance to update main memory right
370 * away in that case.
371 *
372 * Any cached write must be followed by a cache clean operation.
373 * Any cached read must be preceded by a cache invalidate operation.
374 * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
375 * operation is needed to avoid discarding possible concurrent writes to the
376 * accessed memory.
377 *
378 * Also, in order to prevent a cached writer from interfering with an
379 * adjacent non-cached writer, each state variable must be located to
380 * a separate cache line.
381 */
382
383/*
384 * This needs to be >= the max cache writeback size of all
385 * supported platforms included in the current kernel configuration.
386 * This is used to align state variables to their own cache lines.
387 */
388#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */
389#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
390
391/*
392 * There is no __cpuc_clean_dcache_area but we use it anyway for
393 * code intent clarity, and alias it to __cpuc_flush_dcache_area.
394 */
395#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
396
397/*
398 * Ensure preceding writes to *p by this CPU are visible to
399 * subsequent reads by other CPUs:
400 */
401static inline void __sync_cache_range_w(volatile void *p, size_t size)
402{
403 char *_p = (char *)p;
404
405 __cpuc_clean_dcache_area(_p, size);
406 outer_clean_range(__pa(_p), __pa(_p + size));
407}
408
409/*
410 * Ensure preceding writes to *p by other CPUs are visible to
411 * subsequent reads by this CPU. We must be careful not to
412 * discard data simultaneously written by another CPU, hence the
413 * usage of flush rather than invalidate operations.
414 */
415static inline void __sync_cache_range_r(volatile void *p, size_t size)
416{
417 char *_p = (char *)p;
418
419#ifdef CONFIG_OUTER_CACHE
420 if (outer_cache.flush_range) {
421 /*
422 * Ensure dirty data migrated from other CPUs into our cache
423 * are cleaned out safely before the outer cache is cleaned:
424 */
425 __cpuc_clean_dcache_area(_p, size);
426
427 /* Clean and invalidate stale data for *p from outer ... */
428 outer_flush_range(__pa(_p), __pa(_p + size));
429 }
430#endif
431
432 /* ... and inner cache: */
433 __cpuc_flush_dcache_area(_p, size);
434}
435
436#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
437#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
438
39792c7c
NP
439/*
440 * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
441 * To do so we must:
442 *
443 * - Clear the SCTLR.C bit to prevent further cache allocations
444 * - Flush the desired level of cache
445 * - Clear the ACTLR "SMP" bit to disable local coherency
446 *
447 * ... and so without any intervening memory access in between those steps,
448 * not even to the stack.
449 *
450 * WARNING -- After this has been called:
451 *
452 * - No ldrex/strex (and similar) instructions must be used.
453 * - The CPU is obviously no longer coherent with the other CPUs.
454 * - This is unlikely to work as expected if Linux is running non-secure.
455 *
456 * Note:
457 *
458 * - This is known to apply to several ARMv7 processor implementations,
459 * however some exceptions may exist. Caveat emptor.
460 *
461 * - The clobber list is dictated by the call to v7_flush_dcache_*.
462 * fp is preserved to the stack explicitly prior disabling the cache
463 * since adding it to the clobber list is incompatible with having
464 * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
465 * trampoline are inserted by the linker and to keep sp 64-bit aligned.
466 */
467#define v7_exit_coherency_flush(level) \
468 asm volatile( \
469 "stmfd sp!, {fp, ip} \n\t" \
470 "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
471 "bic r0, r0, #"__stringify(CR_C)" \n\t" \
472 "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
473 "isb \n\t" \
474 "bl v7_flush_dcache_"__stringify(level)" \n\t" \
39792c7c
NP
475 "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
476 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
477 "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
478 "isb \n\t" \
479 "dsb \n\t" \
480 "ldmfd sp!, {fp, ip}" \
481 : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
482 "r9","r10","lr","memory" )
483
75374ad4
LA
484int set_memory_ro(unsigned long addr, int numpages);
485int set_memory_rw(unsigned long addr, int numpages);
486int set_memory_x(unsigned long addr, int numpages);
487int set_memory_nx(unsigned long addr, int numpages);
488
72e6ae28
VK
489void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
490 void *kaddr, unsigned long len);
1da177e4 491#endif