block: add helpers to run flush_dcache_page() against a bio and a request's pages
[linux-2.6-block.git] / arch / alpha / include / asm / cacheflush.h
CommitLineData
1da177e4
LT
1#ifndef _ALPHA_CACHEFLUSH_H
2#define _ALPHA_CACHEFLUSH_H
3
1da177e4
LT
4#include <linux/mm.h>
5
6/* Caches aren't brain-dead on the Alpha. */
7#define flush_cache_all() do { } while (0)
8#define flush_cache_mm(mm) do { } while (0)
ec8c0446 9#define flush_cache_dup_mm(mm) do { } while (0)
1da177e4
LT
10#define flush_cache_range(vma, start, end) do { } while (0)
11#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
2d4dc890 12#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
1da177e4
LT
13#define flush_dcache_page(page) do { } while (0)
14#define flush_dcache_mmap_lock(mapping) do { } while (0)
15#define flush_dcache_mmap_unlock(mapping) do { } while (0)
16#define flush_cache_vmap(start, end) do { } while (0)
17#define flush_cache_vunmap(start, end) do { } while (0)
18
19/* Note that the following two definitions are _highly_ dependent
20 on the contexts in which they are used in the kernel. I personally
21 think it is criminal how loosely defined these macros are. */
22
23/* We need to flush the kernel's icache after loading modules. The
24 only other use of this macro is in load_aout_interp which is not
25 used on Alpha.
26
27 Note that this definition should *not* be used for userspace
28 icache flushing. While functional, it is _way_ overkill. The
29 icache is tagged with ASNs and it suffices to allocate a new ASN
30 for the process. */
31#ifndef CONFIG_SMP
32#define flush_icache_range(start, end) imb()
33#else
34#define flush_icache_range(start, end) smp_imb()
35extern void smp_imb(void);
36#endif
37
38/* We need to flush the userspace icache after setting breakpoints in
39 ptrace.
40
41 Instead of indiscriminately using imb, take advantage of the fact
42 that icache entries are tagged with the ASN and load a new mm context. */
43/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */
44
45#ifndef CONFIG_SMP
46extern void __load_new_mm_context(struct mm_struct *);
47static inline void
48flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
49 unsigned long addr, int len)
50{
51 if (vma->vm_flags & VM_EXEC) {
52 struct mm_struct *mm = vma->vm_mm;
53 if (current->active_mm == mm)
54 __load_new_mm_context(mm);
55 else
56 mm->context[smp_processor_id()] = 0;
57 }
58}
59#else
60extern void flush_icache_user_range(struct vm_area_struct *vma,
61 struct page *page, unsigned long addr, int len);
62#endif
63
64/* This is used only in do_no_page and do_swap_page. */
65#define flush_icache_page(vma, page) \
66 flush_icache_user_range((vma), (page), 0, 0)
67
68#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
69do { memcpy(dst, src, len); \
70 flush_icache_user_range(vma, page, vaddr, len); \
71} while (0)
72#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
73 memcpy(dst, src, len)
74
75#endif /* _ALPHA_CACHEFLUSH_H */