Commit | Line | Data |
---|---|---|
26ef5c09 DG |
1 | /* |
2 | * This program is free software; you can redistribute it and/or | |
3 | * modify it under the terms of the GNU General Public License | |
4 | * as published by the Free Software Foundation; either version | |
5 | * 2 of the License, or (at your option) any later version. | |
6 | */ | |
7 | #ifndef _ASM_POWERPC_CACHEFLUSH_H | |
8 | #define _ASM_POWERPC_CACHEFLUSH_H | |
9 | ||
10 | #ifdef __KERNEL__ | |
1da177e4 LT |
11 | |
12 | #include <linux/mm.h> | |
13 | #include <asm/cputable.h> | |
14 | ||
15 | /* | |
26ef5c09 DG |
16 | * No cache flushing is required when address mappings are changed, |
17 | * because the caches on PowerPCs are physically addressed. | |
1da177e4 LT |
18 | */ |
19 | #define flush_cache_all() do { } while (0) | |
20 | #define flush_cache_mm(mm) do { } while (0) | |
ec8c0446 | 21 | #define flush_cache_dup_mm(mm) do { } while (0) |
1da177e4 LT |
22 | #define flush_cache_range(vma, start, end) do { } while (0) |
23 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | |
24 | #define flush_icache_page(vma, page) do { } while (0) | |
1da177e4 LT |
25 | #define flush_cache_vunmap(start, end) do { } while (0) |
26 | ||
ff5bc793 | 27 | #ifdef CONFIG_PPC_BOOK3S_64 |
f1cb8f9b NP |
28 | /* |
29 | * Book3s has no ptesync after setting a pte, so without this ptesync it's | |
30 | * possible for a kernel virtual mapping access to return a spurious fault | |
31 | * if it's accessed right after the pte is set. The page fault handler does | |
32 | * not expect this type of fault. flush_cache_vmap is not exactly the right | |
33 | * place to put this, but it seems to work well enough. | |
34 | */ | |
04db3ede QC |
35 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) |
36 | { | |
37 | asm volatile("ptesync" ::: "memory"); | |
38 | } | |
f1cb8f9b | 39 | #else |
04db3ede | 40 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } |
f1cb8f9b NP |
41 | #endif |
42 | ||
2d4dc890 | 43 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
1da177e4 LT |
44 | extern void flush_dcache_page(struct page *page); |
45 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | |
46 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | |
47 | ||
3b04c300 | 48 | extern void flush_icache_range(unsigned long, unsigned long); |
1da177e4 LT |
49 | extern void flush_icache_user_range(struct vm_area_struct *vma, |
50 | struct page *page, unsigned long addr, | |
51 | int len); | |
26ef5c09 DG |
52 | extern void __flush_dcache_icache(void *page_va); |
53 | extern void flush_dcache_icache_page(struct page *page); | |
54 | #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE) | |
55 | extern void __flush_dcache_icache_phys(unsigned long physaddr); | |
2f7d2b74 SW |
56 | #else |
57 | static inline void __flush_dcache_icache_phys(unsigned long physaddr) | |
58 | { | |
59 | BUG(); | |
60 | } | |
61 | #endif | |
1da177e4 | 62 | |
26ef5c09 | 63 | #ifdef CONFIG_PPC32 |
affe587b CL |
64 | /* |
65 | * Write any modified data cache blocks out to memory and invalidate them. | |
66 | * Does not invalidate the corresponding instruction cache blocks. | |
67 | */ | |
68 | static inline void flush_dcache_range(unsigned long start, unsigned long stop) | |
69 | { | |
d98fc70f CL |
70 | unsigned long shift = l1_cache_shift(); |
71 | unsigned long bytes = l1_cache_bytes(); | |
72 | void *addr = (void *)(start & ~(bytes - 1)); | |
73 | unsigned long size = stop - (unsigned long)addr + (bytes - 1); | |
affe587b CL |
74 | unsigned long i; |
75 | ||
d98fc70f | 76 | for (i = 0; i < size >> shift; i++, addr += bytes) |
affe587b CL |
77 | dcbf(addr); |
78 | mb(); /* sync */ | |
79 | } | |
80 | ||
81 | /* | |
82 | * Write any modified data cache blocks out to memory. | |
83 | * Does not invalidate the corresponding cache lines (especially for | |
84 | * any corresponding instruction cache). | |
85 | */ | |
86 | static inline void clean_dcache_range(unsigned long start, unsigned long stop) | |
87 | { | |
d98fc70f CL |
88 | unsigned long shift = l1_cache_shift(); |
89 | unsigned long bytes = l1_cache_bytes(); | |
90 | void *addr = (void *)(start & ~(bytes - 1)); | |
91 | unsigned long size = stop - (unsigned long)addr + (bytes - 1); | |
affe587b CL |
92 | unsigned long i; |
93 | ||
d98fc70f | 94 | for (i = 0; i < size >> shift; i++, addr += bytes) |
affe587b CL |
95 | dcbst(addr); |
96 | mb(); /* sync */ | |
97 | } | |
98 | ||
99 | /* | |
100 | * Like above, but invalidate the D-cache. This is used by the 8xx | |
101 | * to invalidate the cache so the PPC core doesn't get stale data | |
102 | * from the CPM (no cache snooping here :-). | |
103 | */ | |
104 | static inline void invalidate_dcache_range(unsigned long start, | |
105 | unsigned long stop) | |
106 | { | |
d98fc70f CL |
107 | unsigned long shift = l1_cache_shift(); |
108 | unsigned long bytes = l1_cache_bytes(); | |
109 | void *addr = (void *)(start & ~(bytes - 1)); | |
110 | unsigned long size = stop - (unsigned long)addr + (bytes - 1); | |
affe587b CL |
111 | unsigned long i; |
112 | ||
d98fc70f | 113 | for (i = 0; i < size >> shift; i++, addr += bytes) |
affe587b CL |
114 | dcbi(addr); |
115 | mb(); /* sync */ | |
116 | } | |
117 | ||
26ef5c09 DG |
118 | #endif /* CONFIG_PPC32 */ |
119 | #ifdef CONFIG_PPC64 | |
affe587b | 120 | extern void flush_dcache_range(unsigned long start, unsigned long stop); |
26ef5c09 | 121 | #endif |
1da177e4 LT |
122 | |
123 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | |
26ef5c09 DG |
124 | do { \ |
125 | memcpy(dst, src, len); \ | |
126 | flush_icache_user_range(vma, page, vaddr, len); \ | |
127 | } while (0) | |
1da177e4 LT |
128 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
129 | memcpy(dst, src, len) | |
130 | ||
26ef5c09 | 131 | #endif /* __KERNEL__ */ |
1da177e4 | 132 | |
26ef5c09 | 133 | #endif /* _ASM_POWERPC_CACHEFLUSH_H */ |