Merge branches 'amd-iommu/fixes' and 'dma-debug/fixes' into iommu/fixes
[linux-2.6-block.git] / arch / sh / mm / cache.c
CommitLineData
1da177e4 1/*
f26b2a56 2 * arch/sh/mm/cache.c
1da177e4
LT
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
dfff0fa6 5 * Copyright (C) 2002 - 2009 Paul Mundt
1da177e4
LT
6 *
7 * Released under the terms of the GNU GPL v2.0.
8 */
1da177e4 9#include <linux/mm.h>
acca4f4d 10#include <linux/init.h>
52e27782 11#include <linux/mutex.h>
e06c4e57 12#include <linux/fs.h>
f26b2a56 13#include <linux/smp.h>
7747b9a4
PM
14#include <linux/highmem.h>
15#include <linux/module.h>
1da177e4
LT
16#include <asm/mmu_context.h>
17#include <asm/cacheflush.h>
18
f26b2a56
PM
19void (*local_flush_cache_all)(void *args) = cache_noop;
20void (*local_flush_cache_mm)(void *args) = cache_noop;
21void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
22void (*local_flush_cache_page)(void *args) = cache_noop;
23void (*local_flush_cache_range)(void *args) = cache_noop;
24void (*local_flush_dcache_page)(void *args) = cache_noop;
25void (*local_flush_icache_range)(void *args) = cache_noop;
26void (*local_flush_icache_page)(void *args) = cache_noop;
27void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
28
37443ef3 29void (*__flush_wback_region)(void *start, int size);
0a993b0a 30EXPORT_SYMBOL(__flush_wback_region);
37443ef3 31void (*__flush_purge_region)(void *start, int size);
0a993b0a 32EXPORT_SYMBOL(__flush_purge_region);
37443ef3 33void (*__flush_invalidate_region)(void *start, int size);
0a993b0a 34EXPORT_SYMBOL(__flush_invalidate_region);
37443ef3 35
37443ef3
PM
36static inline void noop__flush_region(void *start, int size)
37{
38}
39
6f379578
PM
40static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
41 int wait)
42{
43 preempt_disable();
44 smp_call_function(func, info, wait);
45 func(info);
46 preempt_enable();
47}
48
ba1789ef
PM
49void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
50 unsigned long vaddr, void *dst, const void *src,
51 unsigned long len)
1da177e4 52{
0dfae7d5
PM
53 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
54 !test_bit(PG_dcache_dirty, &page->flags)) {
2277ab4a
PM
55 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
56 memcpy(vto, src, len);
0906a3ad 57 kunmap_coherent(vto);
2277ab4a
PM
58 } else {
59 memcpy(dst, src, len);
0dfae7d5
PM
60 if (boot_cpu_data.dcache.n_aliases)
61 set_bit(PG_dcache_dirty, &page->flags);
2277ab4a 62 }
ba1789ef
PM
63
64 if (vma->vm_flags & VM_EXEC)
65 flush_cache_page(vma, vaddr, page_to_pfn(page));
66}
67
68void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
69 unsigned long vaddr, void *dst, const void *src,
70 unsigned long len)
71{
0dfae7d5
PM
72 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
73 !test_bit(PG_dcache_dirty, &page->flags)) {
2277ab4a
PM
74 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
75 memcpy(dst, vfrom, len);
0906a3ad 76 kunmap_coherent(vfrom);
2277ab4a
PM
77 } else {
78 memcpy(dst, src, len);
0dfae7d5
PM
79 if (boot_cpu_data.dcache.n_aliases)
80 set_bit(PG_dcache_dirty, &page->flags);
2277ab4a 81 }
1da177e4 82}
39e688a9 83
7747b9a4
PM
84void copy_user_highpage(struct page *to, struct page *from,
85 unsigned long vaddr, struct vm_area_struct *vma)
86{
87 void *vfrom, *vto;
88
7747b9a4 89 vto = kmap_atomic(to, KM_USER1);
7747b9a4 90
0dfae7d5
PM
91 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
92 !test_bit(PG_dcache_dirty, &from->flags)) {
7e01c949 93 vfrom = kmap_coherent(from, vaddr);
2277ab4a 94 copy_page(vto, vfrom);
7e01c949
PM
95 kunmap_coherent(vfrom);
96 } else {
97 vfrom = kmap_atomic(from, KM_USER0);
98 copy_page(vto, vfrom);
99 kunmap_atomic(vfrom, KM_USER0);
100 }
7747b9a4 101
7e01c949
PM
102 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
103 __flush_purge_region(vto, PAGE_SIZE);
39ac11c1 104
7e01c949 105 kunmap_atomic(vto, KM_USER1);
7747b9a4
PM
106 /* Make sure this page is cleared on other CPU's too before using it */
107 smp_wmb();
108}
109EXPORT_SYMBOL(copy_user_highpage);
dfff0fa6
PM
110
111void clear_user_highpage(struct page *page, unsigned long vaddr)
112{
113 void *kaddr = kmap_atomic(page, KM_USER0);
114
7e01c949 115 clear_page(kaddr);
dfff0fa6 116
7e01c949
PM
117 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
118 __flush_purge_region(kaddr, PAGE_SIZE);
dfff0fa6
PM
119
120 kunmap_atomic(kaddr, KM_USER0);
121}
122EXPORT_SYMBOL(clear_user_highpage);
9cef7492
PM
123
124void __update_cache(struct vm_area_struct *vma,
125 unsigned long address, pte_t pte)
126{
127 struct page *page;
128 unsigned long pfn = pte_pfn(pte);
129
130 if (!boot_cpu_data.dcache.n_aliases)
131 return;
132
133 page = pfn_to_page(pfn);
964f7e5a 134 if (pfn_valid(pfn)) {
9cef7492 135 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
76382b5b
MP
136 if (dirty)
137 __flush_purge_region(page_address(page), PAGE_SIZE);
9cef7492
PM
138 }
139}
c0fe478d
PM
140
141void __flush_anon_page(struct page *page, unsigned long vmaddr)
142{
143 unsigned long addr = (unsigned long) page_address(page);
144
145 if (pages_do_alias(addr, vmaddr)) {
146 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
147 !test_bit(PG_dcache_dirty, &page->flags)) {
148 void *kaddr;
149
150 kaddr = kmap_coherent(page, vmaddr);
6e4154d4
PM
151 /* XXX.. For now kunmap_coherent() does a purge */
152 /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
0906a3ad 153 kunmap_coherent(kaddr);
c0fe478d 154 } else
6e4154d4 155 __flush_purge_region((void *)addr, PAGE_SIZE);
c0fe478d
PM
156 }
157}
ecba1060 158
f26b2a56
PM
159void flush_cache_all(void)
160{
6f379578 161 cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
f26b2a56 162}
0a993b0a 163EXPORT_SYMBOL(flush_cache_all);
f26b2a56
PM
164
165void flush_cache_mm(struct mm_struct *mm)
166{
654d364e
PM
167 if (boot_cpu_data.dcache.n_aliases == 0)
168 return;
169
6f379578 170 cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
f26b2a56
PM
171}
172
173void flush_cache_dup_mm(struct mm_struct *mm)
174{
654d364e
PM
175 if (boot_cpu_data.dcache.n_aliases == 0)
176 return;
177
6f379578 178 cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
f26b2a56
PM
179}
180
181void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
182 unsigned long pfn)
183{
184 struct flusher_data data;
185
186 data.vma = vma;
187 data.addr1 = addr;
188 data.addr2 = pfn;
189
6f379578 190 cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
f26b2a56
PM
191}
192
193void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
194 unsigned long end)
195{
196 struct flusher_data data;
197
198 data.vma = vma;
199 data.addr1 = start;
200 data.addr2 = end;
201
6f379578 202 cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
f26b2a56 203}
0a993b0a 204EXPORT_SYMBOL(flush_cache_range);
f26b2a56
PM
205
206void flush_dcache_page(struct page *page)
207{
6f379578 208 cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
f26b2a56 209}
0a993b0a 210EXPORT_SYMBOL(flush_dcache_page);
f26b2a56
PM
211
212void flush_icache_range(unsigned long start, unsigned long end)
213{
214 struct flusher_data data;
215
216 data.vma = NULL;
217 data.addr1 = start;
218 data.addr2 = end;
219
6f379578 220 cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
f26b2a56
PM
221}
222
223void flush_icache_page(struct vm_area_struct *vma, struct page *page)
224{
225 /* Nothing uses the VMA, so just pass the struct page along */
6f379578 226 cacheop_on_each_cpu(local_flush_icache_page, page, 1);
f26b2a56
PM
227}
228
229void flush_cache_sigtramp(unsigned long address)
230{
6f379578 231 cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
f26b2a56
PM
232}
233
27d59ec1
PM
234static void compute_alias(struct cache_info *c)
235{
236 c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
237 c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
238}
239
240static void __init emit_cache_params(void)
241{
242 printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
243 boot_cpu_data.icache.ways,
244 boot_cpu_data.icache.sets,
245 boot_cpu_data.icache.way_incr);
246 printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
247 boot_cpu_data.icache.entry_mask,
248 boot_cpu_data.icache.alias_mask,
249 boot_cpu_data.icache.n_aliases);
250 printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
251 boot_cpu_data.dcache.ways,
252 boot_cpu_data.dcache.sets,
253 boot_cpu_data.dcache.way_incr);
254 printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
255 boot_cpu_data.dcache.entry_mask,
256 boot_cpu_data.dcache.alias_mask,
257 boot_cpu_data.dcache.n_aliases);
258
259 /*
260 * Emit Secondary Cache parameters if the CPU has a probed L2.
261 */
262 if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
263 printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
264 boot_cpu_data.scache.ways,
265 boot_cpu_data.scache.sets,
266 boot_cpu_data.scache.way_incr);
267 printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
268 boot_cpu_data.scache.entry_mask,
269 boot_cpu_data.scache.alias_mask,
270 boot_cpu_data.scache.n_aliases);
271 }
272}
273
ecba1060
PM
274void __init cpu_cache_init(void)
275{
3af539e5
PM
276 unsigned int cache_disabled = 0;
277
278#ifdef CCR
279 cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
280#endif
5fb80ae8 281
27d59ec1
PM
282 compute_alias(&boot_cpu_data.icache);
283 compute_alias(&boot_cpu_data.dcache);
284 compute_alias(&boot_cpu_data.scache);
285
37443ef3
PM
286 __flush_wback_region = noop__flush_region;
287 __flush_purge_region = noop__flush_region;
288 __flush_invalidate_region = noop__flush_region;
289
5fb80ae8
MD
290 /*
291 * No flushing is necessary in the disabled cache case so we can
292 * just keep the noop functions in local_flush_..() and __flush_..()
293 */
294 if (unlikely(cache_disabled))
295 goto skip;
296
109b44a8
PM
297 if (boot_cpu_data.family == CPU_FAMILY_SH2) {
298 extern void __weak sh2_cache_init(void);
299
300 sh2_cache_init();
301 }
302
a58e1a2a
PM
303 if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
304 extern void __weak sh2a_cache_init(void);
305
306 sh2a_cache_init();
307 }
308
79f1c9da
PM
309 if (boot_cpu_data.family == CPU_FAMILY_SH3) {
310 extern void __weak sh3_cache_init(void);
311
312 sh3_cache_init();
0d051d90
PM
313
314 if ((boot_cpu_data.type == CPU_SH7705) &&
315 (boot_cpu_data.dcache.sets == 512)) {
316 extern void __weak sh7705_cache_init(void);
317
318 sh7705_cache_init();
319 }
79f1c9da
PM
320 }
321
ecba1060
PM
322 if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
323 (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
324 (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
325 extern void __weak sh4_cache_init(void);
326
327 sh4_cache_init();
328 }
27d59ec1 329
2b431518
PM
330 if (boot_cpu_data.family == CPU_FAMILY_SH5) {
331 extern void __weak sh5_cache_init(void);
332
333 sh5_cache_init();
334 }
335
5fb80ae8 336skip:
27d59ec1 337 emit_cache_params();
ecba1060 338}