Commit | Line | Data |
---|---|---|
6c386655 JF |
1 | #ifndef _ASM_X86_PGTABLE_H |
2 | #define _ASM_X86_PGTABLE_H | |
3 | ||
4 | #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) | |
5 | #define FIRST_USER_ADDRESS 0 | |
6 | ||
7 | #define _PAGE_BIT_PRESENT 0 | |
8 | #define _PAGE_BIT_RW 1 | |
9 | #define _PAGE_BIT_USER 2 | |
10 | #define _PAGE_BIT_PWT 3 | |
11 | #define _PAGE_BIT_PCD 4 | |
12 | #define _PAGE_BIT_ACCESSED 5 | |
13 | #define _PAGE_BIT_DIRTY 6 | |
14 | #define _PAGE_BIT_FILE 6 | |
15 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ | |
16 | #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ | |
17 | #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ | |
18 | #define _PAGE_BIT_UNUSED2 10 | |
19 | #define _PAGE_BIT_UNUSED3 11 | |
20 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ | |
21 | ||
f2919232 JF |
22 | /* |
23 | * Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a | |
24 | * sign-extended value on 32-bit with all 1's in the upper word, | |
25 | * which preserves the upper pte values on 64-bit ptes: | |
26 | */ | |
61f38226 IM |
27 | #define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT) |
28 | #define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW) | |
29 | #define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER) | |
30 | #define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT) | |
31 | #define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD) | |
32 | #define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED) | |
33 | #define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY) | |
34 | #define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */ | |
35 | #define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */ | |
36 | #define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1) | |
37 | #define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2) | |
38 | #define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3) | |
6c386655 JF |
39 | |
40 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | |
41 | #define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX) | |
42 | #else | |
43 | #define _PAGE_NX 0 | |
44 | #endif | |
45 | ||
46 | /* If _PAGE_PRESENT is clear, we use these: */ | |
47 | #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */ | |
48 | #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE; | |
49 | pte_present gives true */ | |
50 | ||
51 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | |
52 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | |
53 | ||
54 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | |
55 | ||
56 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | |
57 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | |
58 | ||
59 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | |
60 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | |
61 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | |
62 | #define PAGE_COPY PAGE_COPY_NOEXEC | |
63 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | |
64 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | |
65 | ||
66 | #ifdef CONFIG_X86_32 | |
67 | #define _PAGE_KERNEL_EXEC \ | |
68 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | |
69 | #define _PAGE_KERNEL (_PAGE_KERNEL_EXEC | _PAGE_NX) | |
70 | ||
71 | #ifndef __ASSEMBLY__ | |
72 | extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; | |
73 | #endif /* __ASSEMBLY__ */ | |
74 | #else | |
75 | #define __PAGE_KERNEL_EXEC \ | |
76 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | |
77 | #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) | |
78 | #endif | |
79 | ||
80 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) | |
81 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) | |
82 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) | |
83 | #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) | |
84 | #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) | |
85 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) | |
86 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) | |
87 | ||
88 | #ifdef CONFIG_X86_32 | |
89 | # define MAKE_GLOBAL(x) __pgprot((x)) | |
90 | #else | |
91 | # define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL) | |
92 | #endif | |
93 | ||
94 | #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL) | |
95 | #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) | |
96 | #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) | |
97 | #define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX) | |
98 | #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) | |
99 | #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE) | |
100 | #define PAGE_KERNEL_LARGE_EXEC MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC) | |
101 | #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL) | |
102 | #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE) | |
103 | ||
104 | /* xwr */ | |
105 | #define __P000 PAGE_NONE | |
106 | #define __P001 PAGE_READONLY | |
107 | #define __P010 PAGE_COPY | |
108 | #define __P011 PAGE_COPY | |
109 | #define __P100 PAGE_READONLY_EXEC | |
110 | #define __P101 PAGE_READONLY_EXEC | |
111 | #define __P110 PAGE_COPY_EXEC | |
112 | #define __P111 PAGE_COPY_EXEC | |
113 | ||
114 | #define __S000 PAGE_NONE | |
115 | #define __S001 PAGE_READONLY | |
116 | #define __S010 PAGE_SHARED | |
117 | #define __S011 PAGE_SHARED | |
118 | #define __S100 PAGE_READONLY_EXEC | |
119 | #define __S101 PAGE_READONLY_EXEC | |
120 | #define __S110 PAGE_SHARED_EXEC | |
121 | #define __S111 PAGE_SHARED_EXEC | |
122 | ||
4614139c | 123 | #ifndef __ASSEMBLY__ |
195466dc | 124 | |
8405b122 JF |
125 | /* |
126 | * ZERO_PAGE is a global shared page that is always zero: used | |
127 | * for zero-mapped memory areas etc.. | |
128 | */ | |
129 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | |
130 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
131 | ||
132 | ||
4614139c JF |
133 | /* |
134 | * The following only work if pte_present() is true. | |
135 | * Undefined behaviour if not.. | |
136 | */ | |
137 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | |
138 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | |
139 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | |
140 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | |
141 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } | |
142 | ||
143 | static inline int pmd_large(pmd_t pte) { | |
144 | return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) == | |
145 | (_PAGE_PSE|_PAGE_PRESENT); | |
146 | } | |
147 | ||
148 | static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_DIRTY); } | |
149 | static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_ACCESSED); } | |
150 | static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_RW); } | |
151 | static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_NX); } | |
152 | static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); } | |
153 | static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); } | |
154 | static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); } | |
155 | static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); } | |
156 | static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_PSE); } | |
157 | ||
6fdc05d4 JF |
158 | extern pteval_t __supported_pte_mask; |
159 | ||
160 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) | |
161 | { | |
162 | return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) | | |
163 | pgprot_val(pgprot)) & __supported_pte_mask); | |
164 | } | |
165 | ||
166 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | |
167 | { | |
168 | return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) | | |
169 | pgprot_val(pgprot)) & __supported_pte_mask); | |
170 | } | |
171 | ||
38472311 IM |
172 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
173 | { | |
174 | pteval_t val = pte_val(pte); | |
175 | ||
176 | /* | |
177 | * Chop off the NX bit (if present), and add the NX portion of | |
178 | * the newprot (if present): | |
179 | */ | |
180 | val &= _PAGE_CHG_MASK & ~_PAGE_NX; | |
181 | val |= pgprot_val(newprot) & __supported_pte_mask; | |
182 | ||
183 | return __pte(val); | |
184 | } | |
185 | ||
4891645e JF |
186 | #ifdef CONFIG_PARAVIRT |
187 | #include <asm/paravirt.h> | |
188 | #else /* !CONFIG_PARAVIRT */ | |
189 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) | |
190 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) | |
191 | ||
192 | #define set_pte_present(mm, addr, ptep, pte) \ | |
193 | native_set_pte_present(mm, addr, ptep, pte) | |
194 | #define set_pte_atomic(ptep, pte) \ | |
195 | native_set_pte_atomic(ptep, pte) | |
196 | ||
197 | #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) | |
198 | ||
199 | #ifndef __PAGETABLE_PUD_FOLDED | |
200 | #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) | |
201 | #define pgd_clear(pgd) native_pgd_clear(pgd) | |
202 | #endif | |
203 | ||
204 | #ifndef set_pud | |
205 | # define set_pud(pudp, pud) native_set_pud(pudp, pud) | |
206 | #endif | |
207 | ||
208 | #ifndef __PAGETABLE_PMD_FOLDED | |
209 | #define pud_clear(pud) native_pud_clear(pud) | |
210 | #endif | |
211 | ||
212 | #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) | |
213 | #define pmd_clear(pmd) native_pmd_clear(pmd) | |
214 | ||
215 | #define pte_update(mm, addr, ptep) do { } while (0) | |
216 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | |
217 | #endif /* CONFIG_PARAVIRT */ | |
218 | ||
4614139c JF |
219 | #endif /* __ASSEMBLY__ */ |
220 | ||
96a388de TG |
221 | #ifdef CONFIG_X86_32 |
222 | # include "pgtable_32.h" | |
223 | #else | |
224 | # include "pgtable_64.h" | |
225 | #endif | |
6c386655 | 226 | |
195466dc JF |
227 | #ifndef __ASSEMBLY__ |
228 | ||
4891645e JF |
229 | /* local pte updates need not use xchg for locking */ |
230 | static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) | |
231 | { | |
232 | pte_t res = *ptep; | |
233 | ||
234 | /* Pure native function needs no input for mm, addr */ | |
235 | native_pte_clear(NULL, 0, ptep); | |
236 | return res; | |
237 | } | |
238 | ||
239 | static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | |
240 | pte_t *ptep , pte_t pte) | |
241 | { | |
242 | native_set_pte(ptep, pte); | |
243 | } | |
244 | ||
195466dc JF |
245 | #ifndef CONFIG_PARAVIRT |
246 | /* | |
247 | * Rules for using pte_update - it must be called after any PTE update which | |
248 | * has not been done using the set_pte / clear_pte interfaces. It is used by | |
249 | * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE | |
250 | * updates should either be sets, clears, or set_pte_atomic for P->P | |
251 | * transitions, which means this hook should only be called for user PTEs. | |
252 | * This hook implies a P->P protection or access change has taken place, which | |
253 | * requires a subsequent TLB flush. The notification can optionally be delayed | |
254 | * until the TLB flush event by using the pte_update_defer form of the | |
255 | * interface, but care must be taken to assure that the flush happens while | |
256 | * still holding the same page table lock so that the shadow and primary pages | |
257 | * do not become out of sync on SMP. | |
258 | */ | |
259 | #define pte_update(mm, addr, ptep) do { } while (0) | |
260 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | |
261 | #endif | |
262 | ||
195466dc JF |
263 | /* |
264 | * We only update the dirty/accessed state if we set | |
265 | * the dirty bit by hand in the kernel, since the hardware | |
266 | * will do the accessed bit for us, and we don't want to | |
267 | * race with other CPU's that might be updating the dirty | |
268 | * bit at the same time. | |
269 | */ | |
270 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | |
271 | #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ | |
272 | ({ \ | |
273 | int __changed = !pte_same(*(ptep), entry); \ | |
274 | if (__changed && dirty) { \ | |
275 | *ptep = entry; \ | |
276 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ | |
277 | flush_tlb_page(vma, address); \ | |
278 | } \ | |
279 | __changed; \ | |
280 | }) | |
281 | ||
282 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
283 | #define ptep_test_and_clear_young(vma, addr, ptep) ({ \ | |
284 | int __ret = 0; \ | |
285 | if (pte_young(*(ptep))) \ | |
286 | __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \ | |
287 | &(ptep)->pte); \ | |
288 | if (__ret) \ | |
289 | pte_update((vma)->vm_mm, addr, ptep); \ | |
290 | __ret; \ | |
291 | }) | |
292 | ||
293 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
294 | #define ptep_clear_flush_young(vma, address, ptep) \ | |
295 | ({ \ | |
296 | int __young; \ | |
297 | __young = ptep_test_and_clear_young((vma), (address), (ptep)); \ | |
298 | if (__young) \ | |
299 | flush_tlb_page(vma, address); \ | |
300 | __young; \ | |
301 | }) | |
302 | ||
303 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
304 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
305 | { | |
306 | pte_t pte = native_ptep_get_and_clear(ptep); | |
307 | pte_update(mm, addr, ptep); | |
308 | return pte; | |
309 | } | |
310 | ||
311 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | |
312 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | |
313 | { | |
314 | pte_t pte; | |
315 | if (full) { | |
316 | /* | |
317 | * Full address destruction in progress; paravirt does not | |
318 | * care about updates and native needs no locking | |
319 | */ | |
320 | pte = native_local_ptep_get_and_clear(ptep); | |
321 | } else { | |
322 | pte = ptep_get_and_clear(mm, addr, ptep); | |
323 | } | |
324 | return pte; | |
325 | } | |
326 | ||
327 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
328 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
329 | { | |
d8d89827 | 330 | clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); |
195466dc JF |
331 | pte_update(mm, addr, ptep); |
332 | } | |
333 | ||
195466dc JF |
334 | #include <asm-generic/pgtable.h> |
335 | #endif /* __ASSEMBLY__ */ | |
336 | ||
6c386655 | 337 | #endif /* _ASM_X86_PGTABLE_H */ |