block: add helpers to run flush_dcache_page() against a bio and a request's pages
[linux-2.6-block.git] / arch / x86 / include / asm / cacheflush.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_CACHEFLUSH_H
2#define _ASM_X86_CACHEFLUSH_H
b2bba72c
TG
3
4/* Keep includes the same across arches. */
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the intel. */
d3251005
TH
8static inline void flush_cache_all(void) { }
9static inline void flush_cache_mm(struct mm_struct *mm) { }
10static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
11static inline void flush_cache_range(struct vm_area_struct *vma,
12 unsigned long start, unsigned long end) { }
13static inline void flush_cache_page(struct vm_area_struct *vma,
14 unsigned long vmaddr, unsigned long pfn) { }
2d4dc890 15#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
d3251005
TH
16static inline void flush_dcache_page(struct page *page) { }
17static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
18static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
19static inline void flush_icache_range(unsigned long start,
20 unsigned long end) { }
21static inline void flush_icache_page(struct vm_area_struct *vma,
22 struct page *page) { }
23static inline void flush_icache_user_range(struct vm_area_struct *vma,
24 struct page *page,
25 unsigned long addr,
26 unsigned long len) { }
27static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
28static inline void flush_cache_vunmap(unsigned long start,
29 unsigned long end) { }
b2bba72c 30
d3251005
TH
31static inline void copy_to_user_page(struct vm_area_struct *vma,
32 struct page *page, unsigned long vaddr,
33 void *dst, const void *src,
34 unsigned long len)
35{
36 memcpy(dst, src, len);
37}
38
39static inline void copy_from_user_page(struct vm_area_struct *vma,
40 struct page *page, unsigned long vaddr,
41 void *dst, const void *src,
42 unsigned long len)
43{
44 memcpy(dst, src, len);
45}
b2bba72c 46
f5841740
VP
47#define PG_WC PG_arch_1
48PAGEFLAG(WC, WC)
49
50#ifdef CONFIG_X86_PAT
51/*
52 * X86 PAT uses page flags WC and Uncached together to keep track of
53 * memory type of pages that have backing page struct. X86 PAT supports 3
54 * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and
55 * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
56 * been changed from its default (value of -1 used to denote this).
57 * Note we do not support _PAGE_CACHE_UC here.
58 *
59 * Caller must hold memtype_lock for atomicity.
60 */
61static inline unsigned long get_page_memtype(struct page *pg)
62{
63 if (!PageUncached(pg) && !PageWC(pg))
64 return -1;
65 else if (!PageUncached(pg) && PageWC(pg))
66 return _PAGE_CACHE_WC;
67 else if (PageUncached(pg) && !PageWC(pg))
68 return _PAGE_CACHE_UC_MINUS;
69 else
70 return _PAGE_CACHE_WB;
71}
72
73static inline void set_page_memtype(struct page *pg, unsigned long memtype)
74{
75 switch (memtype) {
76 case _PAGE_CACHE_WC:
77 ClearPageUncached(pg);
78 SetPageWC(pg);
79 break;
80 case _PAGE_CACHE_UC_MINUS:
81 SetPageUncached(pg);
82 ClearPageWC(pg);
83 break;
84 case _PAGE_CACHE_WB:
85 SetPageUncached(pg);
86 SetPageWC(pg);
87 break;
88 default:
89 case -1:
90 ClearPageUncached(pg);
91 ClearPageWC(pg);
92 break;
93 }
94}
95#else
96static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
97static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
98#endif
75cbade8 99
7219bebd
AV
100/*
101 * The set_memory_* API can be used to change various attributes of a virtual
102 * address range. The attributes include:
103 * Cachability : UnCached, WriteCombining, WriteBack
104 * Executability : eXeutable, NoteXecutable
105 * Read/Write : ReadOnly, ReadWrite
106 * Presence : NotPresent
107 *
108 * Within a catagory, the attributes are mutually exclusive.
109 *
110 * The implementation of this API will take care of various aspects that
111 * are associated with changing such attributes, such as:
112 * - Flushing TLBs
113 * - Flushing CPU caches
114 * - Making sure aliases of the memory behind the mapping don't violate
115 * coherency rules as defined by the CPU in the system.
116 *
117 * What this API does not do:
118 * - Provide exclusion between various callers - including callers that
119 * operation on other mappings of the same physical page
120 * - Restore default attributes when a page is freed
121 * - Guarantee that mappings other than the requested one are
122 * in any state, other than that these do not violate rules for
123 * the CPU you have. Do not depend on any effects on other mappings,
124 * CPUs other than the one you have may have more relaxed rules.
125 * The caller is required to take care of these.
126 */
75cbade8 127
1219333d 128int _set_memory_uc(unsigned long addr, int numpages);
ef354af4 129int _set_memory_wc(unsigned long addr, int numpages);
1219333d 130int _set_memory_wb(unsigned long addr, int numpages);
75cbade8 131int set_memory_uc(unsigned long addr, int numpages);
ef354af4 132int set_memory_wc(unsigned long addr, int numpages);
75cbade8
AV
133int set_memory_wb(unsigned long addr, int numpages);
134int set_memory_x(unsigned long addr, int numpages);
135int set_memory_nx(unsigned long addr, int numpages);
136int set_memory_ro(unsigned long addr, int numpages);
137int set_memory_rw(unsigned long addr, int numpages);
f62d0f00 138int set_memory_np(unsigned long addr, int numpages);
c9caa02c 139int set_memory_4k(unsigned long addr, int numpages);
75cbade8 140
d75586ad
SL
141int set_memory_array_uc(unsigned long *addr, int addrinarray);
142int set_memory_array_wb(unsigned long *addr, int addrinarray);
143
0f350755 144int set_pages_array_uc(struct page **pages, int addrinarray);
145int set_pages_array_wb(struct page **pages, int addrinarray);
146
7219bebd
AV
147/*
148 * For legacy compatibility with the old APIs, a few functions
149 * are provided that work on a "struct page".
150 * These functions operate ONLY on the 1:1 kernel mapping of the
151 * memory that the struct page represents, and internally just
152 * call the set_memory_* function. See the description of the
153 * set_memory_* function for more details on conventions.
154 *
155 * These APIs should be considered *deprecated* and are likely going to
156 * be removed in the future.
157 * The reason for this is the implicit operation on the 1:1 mapping only,
158 * making this not a generally useful API.
159 *
160 * Specifically, many users of the old APIs had a virtual address,
161 * called virt_to_page() or vmalloc_to_page() on that address to
162 * get a struct page* that the old API required.
163 * To convert these cases, use set_memory_*() on the original
164 * virtual address, do not use these functions.
165 */
166
167int set_pages_uc(struct page *page, int numpages);
168int set_pages_wb(struct page *page, int numpages);
169int set_pages_x(struct page *page, int numpages);
170int set_pages_nx(struct page *page, int numpages);
171int set_pages_ro(struct page *page, int numpages);
172int set_pages_rw(struct page *page, int numpages);
173
174
4c61afcd 175void clflush_cache_range(void *addr, unsigned int size);
b2bba72c 176
b2bba72c
TG
177#ifdef CONFIG_DEBUG_RODATA
178void mark_rodata_ro(void);
7bfeab9a 179extern const int rodata_test_data;
16239630
SR
180void set_kernel_text_rw(void);
181void set_kernel_text_ro(void);
182#else
183static inline void set_kernel_text_rw(void) { }
184static inline void set_kernel_text_ro(void) { }
b2bba72c 185#endif
7bfeab9a 186
edeed305 187#ifdef CONFIG_DEBUG_RODATA_TEST
7bfeab9a 188int rodata_test(void);
edeed305 189#else
7bfeab9a 190static inline int rodata_test(void)
edeed305 191{
7bfeab9a 192 return 0;
edeed305
AV
193}
194#endif
b2bba72c 195
1965aae3 196#endif /* _ASM_X86_CACHEFLUSH_H */