treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 151
[linux-2.6-block.git] / arch / powerpc / include / asm / cacheflush.h
CommitLineData
26ef5c09
DG
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 */
7#ifndef _ASM_POWERPC_CACHEFLUSH_H
8#define _ASM_POWERPC_CACHEFLUSH_H
9
10#ifdef __KERNEL__
1da177e4
LT
11
12#include <linux/mm.h>
13#include <asm/cputable.h>
14
15/*
26ef5c09
DG
16 * No cache flushing is required when address mappings are changed,
17 * because the caches on PowerPCs are physically addressed.
1da177e4
LT
18 */
19#define flush_cache_all() do { } while (0)
20#define flush_cache_mm(mm) do { } while (0)
ec8c0446 21#define flush_cache_dup_mm(mm) do { } while (0)
1da177e4
LT
22#define flush_cache_range(vma, start, end) do { } while (0)
23#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
24#define flush_icache_page(vma, page) do { } while (0)
1da177e4
LT
25#define flush_cache_vunmap(start, end) do { } while (0)
26
ff5bc793 27#ifdef CONFIG_PPC_BOOK3S_64
f1cb8f9b
NP
28/*
29 * Book3s has no ptesync after setting a pte, so without this ptesync it's
30 * possible for a kernel virtual mapping access to return a spurious fault
31 * if it's accessed right after the pte is set. The page fault handler does
32 * not expect this type of fault. flush_cache_vmap is not exactly the right
33 * place to put this, but it seems to work well enough.
34 */
ff5bc793 35#define flush_cache_vmap(start, end) do { asm volatile("ptesync" ::: "memory"); } while (0)
f1cb8f9b
NP
36#else
37#define flush_cache_vmap(start, end) do { } while (0)
38#endif
39
2d4dc890 40#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
1da177e4
LT
41extern void flush_dcache_page(struct page *page);
42#define flush_dcache_mmap_lock(mapping) do { } while (0)
43#define flush_dcache_mmap_unlock(mapping) do { } while (0)
44
3b04c300 45extern void flush_icache_range(unsigned long, unsigned long);
1da177e4
LT
46extern void flush_icache_user_range(struct vm_area_struct *vma,
47 struct page *page, unsigned long addr,
48 int len);
26ef5c09
DG
49extern void __flush_dcache_icache(void *page_va);
50extern void flush_dcache_icache_page(struct page *page);
51#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
52extern void __flush_dcache_icache_phys(unsigned long physaddr);
2f7d2b74
SW
53#else
54static inline void __flush_dcache_icache_phys(unsigned long physaddr)
55{
56 BUG();
57}
58#endif
1da177e4 59
26ef5c09 60#ifdef CONFIG_PPC32
affe587b
CL
61/*
62 * Write any modified data cache blocks out to memory and invalidate them.
63 * Does not invalidate the corresponding instruction cache blocks.
64 */
65static inline void flush_dcache_range(unsigned long start, unsigned long stop)
66{
67 void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
68 unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
69 unsigned long i;
70
71 for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
72 dcbf(addr);
73 mb(); /* sync */
74}
75
76/*
77 * Write any modified data cache blocks out to memory.
78 * Does not invalidate the corresponding cache lines (especially for
79 * any corresponding instruction cache).
80 */
81static inline void clean_dcache_range(unsigned long start, unsigned long stop)
82{
83 void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
84 unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
85 unsigned long i;
86
87 for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
88 dcbst(addr);
89 mb(); /* sync */
90}
91
92/*
93 * Like above, but invalidate the D-cache. This is used by the 8xx
94 * to invalidate the cache so the PPC core doesn't get stale data
95 * from the CPM (no cache snooping here :-).
96 */
97static inline void invalidate_dcache_range(unsigned long start,
98 unsigned long stop)
99{
100 void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
101 unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
102 unsigned long i;
103
104 for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
105 dcbi(addr);
106 mb(); /* sync */
107}
108
26ef5c09
DG
109#endif /* CONFIG_PPC32 */
110#ifdef CONFIG_PPC64
affe587b 111extern void flush_dcache_range(unsigned long start, unsigned long stop);
1da177e4 112extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
26ef5c09 113#endif
1da177e4
LT
114
115#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
26ef5c09
DG
116 do { \
117 memcpy(dst, src, len); \
118 flush_icache_user_range(vma, page, vaddr, len); \
119 } while (0)
1da177e4
LT
120#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
121 memcpy(dst, src, len)
122
26ef5c09 123#endif /* __KERNEL__ */
1da177e4 124
26ef5c09 125#endif /* _ASM_POWERPC_CACHEFLUSH_H */