Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* cacheflush.h: FRV cache flushing routines |
2 | * | |
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #ifndef _ASM_CACHEFLUSH_H | |
13 | #define _ASM_CACHEFLUSH_H | |
14 | ||
15 | /* Keep includes the same across arches. */ | |
16 | #include <linux/mm.h> | |
17 | ||
18 | /* | |
19 | * virtually-indexed cache management (our cache is physically indexed) | |
20 | */ | |
21 | #define flush_cache_all() do {} while(0) | |
22 | #define flush_cache_mm(mm) do {} while(0) | |
23 | #define flush_cache_range(mm, start, end) do {} while(0) | |
24 | #define flush_cache_page(vma, vmaddr, pfn) do {} while(0) | |
25 | #define flush_cache_vmap(start, end) do {} while(0) | |
26 | #define flush_cache_vunmap(start, end) do {} while(0) | |
27 | #define flush_dcache_mmap_lock(mapping) do {} while(0) | |
28 | #define flush_dcache_mmap_unlock(mapping) do {} while(0) | |
29 | ||
30 | /* | |
31 | * physically-indexed cache managment | |
32 | * - see arch/frv/lib/cache.S | |
33 | */ | |
34 | extern void frv_dcache_writeback(unsigned long start, unsigned long size); | |
35 | extern void frv_cache_invalidate(unsigned long start, unsigned long size); | |
36 | extern void frv_icache_invalidate(unsigned long start, unsigned long size); | |
37 | extern void frv_cache_wback_inv(unsigned long start, unsigned long size); | |
38 | ||
39 | static inline void __flush_cache_all(void) | |
40 | { | |
41 | asm volatile(" dcef @(gr0,gr0),#1 \n" | |
42 | " icei @(gr0,gr0),#1 \n" | |
43 | " membar \n" | |
44 | : : : "memory" | |
45 | ); | |
46 | } | |
47 | ||
48 | /* dcache/icache coherency... */ | |
49 | #ifdef CONFIG_MMU | |
50 | extern void flush_dcache_page(struct page *page); | |
51 | #else | |
52 | static inline void flush_dcache_page(struct page *page) | |
53 | { | |
54 | unsigned long addr = page_to_phys(page); | |
55 | frv_dcache_writeback(addr, addr + PAGE_SIZE); | |
56 | } | |
57 | #endif | |
58 | ||
59 | static inline void flush_page_to_ram(struct page *page) | |
60 | { | |
61 | flush_dcache_page(page); | |
62 | } | |
63 | ||
64 | static inline void flush_icache(void) | |
65 | { | |
66 | __flush_cache_all(); | |
67 | } | |
68 | ||
69 | static inline void flush_icache_range(unsigned long start, unsigned long end) | |
70 | { | |
71 | frv_cache_wback_inv(start, end); | |
72 | } | |
73 | ||
74 | #ifdef CONFIG_MMU | |
75 | extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | |
76 | unsigned long start, unsigned long len); | |
77 | #else | |
78 | static inline void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | |
79 | unsigned long start, unsigned long len) | |
80 | { | |
81 | frv_cache_wback_inv(start, start + len); | |
82 | } | |
83 | #endif | |
84 | ||
85 | static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) | |
86 | { | |
87 | flush_icache_user_range(vma, page, page_to_phys(page), PAGE_SIZE); | |
88 | } | |
89 | ||
90 | ||
91 | #endif /* _ASM_CACHEFLUSH_H */ |