Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ALPHA_CACHEFLUSH_H |
2 | #define _ALPHA_CACHEFLUSH_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/mm.h> |
5 | ||
6 | /* Caches aren't brain-dead on the Alpha. */ | |
7 | #define flush_cache_all() do { } while (0) | |
8 | #define flush_cache_mm(mm) do { } while (0) | |
ec8c0446 | 9 | #define flush_cache_dup_mm(mm) do { } while (0) |
1da177e4 LT |
10 | #define flush_cache_range(vma, start, end) do { } while (0) |
11 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | |
12 | #define flush_dcache_page(page) do { } while (0) | |
13 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | |
14 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | |
15 | #define flush_cache_vmap(start, end) do { } while (0) | |
16 | #define flush_cache_vunmap(start, end) do { } while (0) | |
17 | ||
18 | /* Note that the following two definitions are _highly_ dependent | |
19 | on the contexts in which they are used in the kernel. I personally | |
20 | think it is criminal how loosely defined these macros are. */ | |
21 | ||
22 | /* We need to flush the kernel's icache after loading modules. The | |
23 | only other use of this macro is in load_aout_interp which is not | |
24 | used on Alpha. | |
25 | ||
26 | Note that this definition should *not* be used for userspace | |
27 | icache flushing. While functional, it is _way_ overkill. The | |
28 | icache is tagged with ASNs and it suffices to allocate a new ASN | |
29 | for the process. */ | |
30 | #ifndef CONFIG_SMP | |
31 | #define flush_icache_range(start, end) imb() | |
32 | #else | |
33 | #define flush_icache_range(start, end) smp_imb() | |
34 | extern void smp_imb(void); | |
35 | #endif | |
36 | ||
37 | /* We need to flush the userspace icache after setting breakpoints in | |
38 | ptrace. | |
39 | ||
40 | Instead of indiscriminately using imb, take advantage of the fact | |
41 | that icache entries are tagged with the ASN and load a new mm context. */ | |
42 | /* ??? Ought to use this in arch/alpha/kernel/signal.c too. */ | |
43 | ||
44 | #ifndef CONFIG_SMP | |
45 | extern void __load_new_mm_context(struct mm_struct *); | |
46 | static inline void | |
47 | flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | |
48 | unsigned long addr, int len) | |
49 | { | |
50 | if (vma->vm_flags & VM_EXEC) { | |
51 | struct mm_struct *mm = vma->vm_mm; | |
52 | if (current->active_mm == mm) | |
53 | __load_new_mm_context(mm); | |
54 | else | |
55 | mm->context[smp_processor_id()] = 0; | |
56 | } | |
57 | } | |
58 | #else | |
59 | extern void flush_icache_user_range(struct vm_area_struct *vma, | |
60 | struct page *page, unsigned long addr, int len); | |
61 | #endif | |
62 | ||
63 | /* This is used only in do_no_page and do_swap_page. */ | |
64 | #define flush_icache_page(vma, page) \ | |
65 | flush_icache_user_range((vma), (page), 0, 0) | |
66 | ||
67 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | |
68 | do { memcpy(dst, src, len); \ | |
69 | flush_icache_user_range(vma, page, vaddr, len); \ | |
70 | } while (0) | |
71 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | |
72 | memcpy(dst, src, len) | |
73 | ||
74 | #endif /* _ALPHA_CACHEFLUSH_H */ |