Commit | Line | Data |
---|---|---|
f2f4bf5a | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 | 2 | /* |
d83ecf08 | 3 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
1da177e4 LT |
4 | * Copyright 2003 PathScale, Inc. |
5 | * Derived from include/asm-i386/pgtable.h | |
1da177e4 LT |
6 | */ |
7 | ||
8 | #ifndef __UM_PGTABLE_H | |
9 | #define __UM_PGTABLE_H | |
10 | ||
300ecf59 | 11 | #include <asm/fixmap.h> |
1da177e4 LT |
12 | |
13 | #define _PAGE_PRESENT 0x001 | |
14 | #define _PAGE_NEWPAGE 0x002 | |
9b4ee40e | 15 | #define _PAGE_NEWPROT 0x004 |
1da177e4 LT |
16 | #define _PAGE_RW 0x020 |
17 | #define _PAGE_USER 0x040 | |
18 | #define _PAGE_ACCESSED 0x080 | |
19 | #define _PAGE_DIRTY 0x100 | |
9b4ee40e | 20 | /* If _PAGE_PRESENT is clear, we use these: */ |
9b4ee40e PBG |
21 | #define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE; |
22 | pte_present gives true */ | |
1da177e4 LT |
23 | |
24 | #ifdef CONFIG_3_LEVEL_PGTABLES | |
37185b33 | 25 | #include <asm/pgtable-3level.h> |
1da177e4 | 26 | #else |
37185b33 | 27 | #include <asm/pgtable-2level.h> |
1da177e4 LT |
28 | #endif |
29 | ||
30 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |
31 | ||
1da177e4 LT |
32 | /* zero page used for uninitialized stuff */ |
33 | extern unsigned long *empty_zero_page; | |
34 | ||
1da177e4 LT |
35 | /* Just any arbitrary offset to the start of the vmalloc VM area: the |
36 | * current 8MB value just means that there will be a 8MB "hole" after the | |
37 | * physical memory until the kernel virtual memory starts. That means that | |
38 | * any out-of-bounds memory accesses will hopefully be caught. | |
39 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
40 | * area for the same reason. ;) | |
41 | */ | |
42 | ||
43 | extern unsigned long end_iomem; | |
44 | ||
45 | #define VMALLOC_OFFSET (__va_space) | |
46 | #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) | |
fe1cd987 | 47 | #define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK) |
a98a6d86 | 48 | #define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) |
c398df30 AW |
49 | #define MODULES_VADDR VMALLOC_START |
50 | #define MODULES_END VMALLOC_END | |
51 | #define MODULES_LEN (MODULES_VADDR - MODULES_END) | |
1da177e4 | 52 | |
1da177e4 LT |
53 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) |
54 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | |
55 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | |
c398df30 AW |
56 | #define __PAGE_KERNEL_EXEC \ |
57 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | |
1da177e4 LT |
58 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) |
59 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | |
60 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | |
61 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | |
62 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | |
c398df30 | 63 | #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) |
1da177e4 LT |
64 | |
65 | /* | |
d83ecf08 JD |
66 | * The i386 can't do page protection for execute, and considers that the same |
67 | * are read. | |
68 | * Also, write permissions imply read permissions. This is the closest we can | |
69 | * get.. | |
1da177e4 LT |
70 | */ |
71 | #define __P000 PAGE_NONE | |
72 | #define __P001 PAGE_READONLY | |
73 | #define __P010 PAGE_COPY | |
74 | #define __P011 PAGE_COPY | |
75 | #define __P100 PAGE_READONLY | |
76 | #define __P101 PAGE_READONLY | |
77 | #define __P110 PAGE_COPY | |
78 | #define __P111 PAGE_COPY | |
79 | ||
80 | #define __S000 PAGE_NONE | |
81 | #define __S001 PAGE_READONLY | |
82 | #define __S010 PAGE_SHARED | |
83 | #define __S011 PAGE_SHARED | |
84 | #define __S100 PAGE_READONLY | |
85 | #define __S101 PAGE_READONLY | |
86 | #define __S110 PAGE_SHARED | |
87 | #define __S111 PAGE_SHARED | |
88 | ||
1da177e4 | 89 | /* |
1da177e4 LT |
90 | * ZERO_PAGE is a global shared page that is always zero: used |
91 | * for zero-mapped memory areas etc.. | |
92 | */ | |
1da177e4 LT |
93 | #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) |
94 | ||
1da177e4 LT |
95 | #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) |
96 | ||
705e87c0 | 97 | #define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE)) |
1da177e4 | 98 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) |
d83ecf08 | 99 | |
1da177e4 LT |
100 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) |
101 | #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) | |
102 | ||
103 | #define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE) | |
104 | #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE) | |
105 | ||
106 | #define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE) | |
107 | #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE) | |
108 | ||
1da177e4 LT |
109 | #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) |
110 | ||
08964c56 | 111 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
1da177e4 | 112 | |
08964c56 JD |
113 | #define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) |
114 | ||
115 | /* | |
116 | * ================================= | |
117 | * Flags checking section. | |
118 | * ================================= | |
119 | */ | |
120 | ||
121 | static inline int pte_none(pte_t pte) | |
122 | { | |
123 | return pte_is_zero(pte); | |
124 | } | |
125 | ||
1da177e4 LT |
126 | /* |
127 | * The following only work if pte_present() is true. | |
128 | * Undefined behaviour if not.. | |
129 | */ | |
1da177e4 LT |
130 | static inline int pte_read(pte_t pte) |
131 | { | |
132 | return((pte_get_bits(pte, _PAGE_USER)) && | |
133 | !(pte_get_bits(pte, _PAGE_PROTNONE))); | |
134 | } | |
135 | ||
136 | static inline int pte_exec(pte_t pte){ | |
137 | return((pte_get_bits(pte, _PAGE_USER)) && | |
138 | !(pte_get_bits(pte, _PAGE_PROTNONE))); | |
139 | } | |
140 | ||
141 | static inline int pte_write(pte_t pte) | |
142 | { | |
143 | return((pte_get_bits(pte, _PAGE_RW)) && | |
144 | !(pte_get_bits(pte, _PAGE_PROTNONE))); | |
145 | } | |
146 | ||
1da177e4 LT |
147 | static inline int pte_dirty(pte_t pte) |
148 | { | |
149 | return pte_get_bits(pte, _PAGE_DIRTY); | |
150 | } | |
151 | ||
152 | static inline int pte_young(pte_t pte) | |
153 | { | |
154 | return pte_get_bits(pte, _PAGE_ACCESSED); | |
155 | } | |
156 | ||
157 | static inline int pte_newpage(pte_t pte) | |
158 | { | |
159 | return pte_get_bits(pte, _PAGE_NEWPAGE); | |
160 | } | |
161 | ||
162 | static inline int pte_newprot(pte_t pte) | |
163 | { | |
164 | return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT))); | |
165 | } | |
166 | ||
7e675137 NP |
167 | static inline int pte_special(pte_t pte) |
168 | { | |
169 | return 0; | |
170 | } | |
171 | ||
08964c56 JD |
172 | /* |
173 | * ================================= | |
174 | * Flags setting section. | |
175 | * ================================= | |
176 | */ | |
177 | ||
178 | static inline pte_t pte_mknewprot(pte_t pte) | |
179 | { | |
180 | pte_set_bits(pte, _PAGE_NEWPROT); | |
181 | return(pte); | |
182 | } | |
183 | ||
1da177e4 LT |
184 | static inline pte_t pte_mkclean(pte_t pte) |
185 | { | |
186 | pte_clear_bits(pte, _PAGE_DIRTY); | |
187 | return(pte); | |
188 | } | |
189 | ||
190 | static inline pte_t pte_mkold(pte_t pte) | |
191 | { | |
192 | pte_clear_bits(pte, _PAGE_ACCESSED); | |
193 | return(pte); | |
194 | } | |
195 | ||
196 | static inline pte_t pte_wrprotect(pte_t pte) | |
197 | { | |
8892d854 AI |
198 | if (likely(pte_get_bits(pte, _PAGE_RW))) |
199 | pte_clear_bits(pte, _PAGE_RW); | |
200 | else | |
201 | return pte; | |
1da177e4 LT |
202 | return(pte_mknewprot(pte)); |
203 | } | |
204 | ||
205 | static inline pte_t pte_mkread(pte_t pte) | |
206 | { | |
8892d854 AI |
207 | if (unlikely(pte_get_bits(pte, _PAGE_USER))) |
208 | return pte; | |
1463fdbc | 209 | pte_set_bits(pte, _PAGE_USER); |
1da177e4 LT |
210 | return(pte_mknewprot(pte)); |
211 | } | |
212 | ||
1da177e4 LT |
213 | static inline pte_t pte_mkdirty(pte_t pte) |
214 | { | |
215 | pte_set_bits(pte, _PAGE_DIRTY); | |
216 | return(pte); | |
217 | } | |
218 | ||
219 | static inline pte_t pte_mkyoung(pte_t pte) | |
220 | { | |
221 | pte_set_bits(pte, _PAGE_ACCESSED); | |
222 | return(pte); | |
223 | } | |
224 | ||
225 | static inline pte_t pte_mkwrite(pte_t pte) | |
226 | { | |
8892d854 AI |
227 | if (unlikely(pte_get_bits(pte, _PAGE_RW))) |
228 | return pte; | |
1da177e4 LT |
229 | pte_set_bits(pte, _PAGE_RW); |
230 | return(pte_mknewprot(pte)); | |
231 | } | |
232 | ||
233 | static inline pte_t pte_mkuptodate(pte_t pte) | |
234 | { | |
235 | pte_clear_bits(pte, _PAGE_NEWPAGE); | |
236 | if(pte_present(pte)) | |
237 | pte_clear_bits(pte, _PAGE_NEWPROT); | |
238 | return(pte); | |
239 | } | |
240 | ||
08964c56 JD |
241 | static inline pte_t pte_mknewpage(pte_t pte) |
242 | { | |
243 | pte_set_bits(pte, _PAGE_NEWPAGE); | |
244 | return(pte); | |
245 | } | |
246 | ||
7e675137 NP |
247 | static inline pte_t pte_mkspecial(pte_t pte) |
248 | { | |
249 | return(pte); | |
250 | } | |
251 | ||
08964c56 JD |
252 | static inline void set_pte(pte_t *pteptr, pte_t pteval) |
253 | { | |
254 | pte_copy(*pteptr, pteval); | |
255 | ||
256 | /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so | |
257 | * fix_range knows to unmap it. _PAGE_NEWPROT is specific to | |
258 | * mapped pages. | |
259 | */ | |
260 | ||
261 | *pteptr = pte_mknewpage(*pteptr); | |
262 | if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); | |
263 | } | |
ea70d791 BG |
264 | |
265 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |
266 | pte_t *pteptr, pte_t pteval) | |
267 | { | |
268 | set_pte(pteptr, pteval); | |
269 | } | |
08964c56 | 270 | |
f15b9000 RW |
271 | #define __HAVE_ARCH_PTE_SAME |
272 | static inline int pte_same(pte_t pte_a, pte_t pte_b) | |
273 | { | |
274 | return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE); | |
275 | } | |
276 | ||
1da177e4 LT |
277 | /* |
278 | * Conversion functions: convert a page and protection to a page entry, | |
279 | * and a page entry and page directory to the page they refer to. | |
280 | */ | |
281 | ||
d99c4022 PBG |
282 | #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) |
283 | #define __virt_to_page(virt) phys_to_page(__pa(virt)) | |
16da3068 | 284 | #define page_to_phys(page) pfn_to_phys(page_to_pfn(page)) |
d83ecf08 | 285 | #define virt_to_page(addr) __virt_to_page((const unsigned long) addr) |
d99c4022 PBG |
286 | |
287 | #define mk_pte(page, pgprot) \ | |
288 | ({ pte_t pte; \ | |
289 | \ | |
290 | pte_set_val(pte, page_to_phys(page), (pgprot)); \ | |
291 | if (pte_present(pte)) \ | |
292 | pte_mknewprot(pte_mknewpage(pte)); \ | |
293 | pte;}) | |
1da177e4 LT |
294 | |
295 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
296 | { | |
297 | pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot); | |
1da177e4 LT |
298 | return pte; |
299 | } | |
300 | ||
1da177e4 LT |
301 | /* |
302 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | |
303 | * | |
304 | * this macro returns the index of the entry in the pgd page which would | |
305 | * control the given virtual address | |
306 | */ | |
307 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | |
308 | ||
1da177e4 LT |
309 | /* |
310 | * pgd_offset() returns a (pgd_t *) | |
311 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | |
312 | */ | |
313 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | |
314 | ||
315 | /* | |
316 | * a shortcut which implies the use of the kernel's pgd, instead | |
317 | * of a process's | |
318 | */ | |
319 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
320 | ||
321 | /* | |
322 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] | |
323 | * | |
324 | * this macro returns the index of the entry in the pmd page which would | |
325 | * control the given virtual address | |
326 | */ | |
300ecf59 | 327 | #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
1da177e4 LT |
328 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
329 | ||
909e90d3 JD |
330 | #define pmd_page_vaddr(pmd) \ |
331 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | |
332 | ||
1da177e4 LT |
333 | /* |
334 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | |
335 | * | |
336 | * this macro returns the index of the entry in the pte page which would | |
337 | * control the given virtual address | |
338 | */ | |
339 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
340 | #define pte_offset_kernel(dir, address) \ | |
46a82b2d | 341 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) |
1da177e4 LT |
342 | #define pte_offset_map(dir, address) \ |
343 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) | |
1da177e4 | 344 | #define pte_unmap(pte) do { } while (0) |
1da177e4 | 345 | |
ca77b555 JD |
346 | struct mm_struct; |
347 | extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); | |
348 | ||
4b3073e1 | 349 | #define update_mmu_cache(vma,address,ptep) do ; while (0) |
1da177e4 LT |
350 | |
351 | /* Encode and de-code a swap entry */ | |
2b76ebaa | 352 | #define __swp_type(x) (((x).val >> 5) & 0x1f) |
1da177e4 LT |
353 | #define __swp_offset(x) ((x).val >> 11) |
354 | ||
355 | #define __swp_entry(type, offset) \ | |
2b76ebaa | 356 | ((swp_entry_t) { ((type) << 5) | ((offset) << 11) }) |
1da177e4 LT |
357 | #define __pte_to_swp_entry(pte) \ |
358 | ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) }) | |
359 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | |
360 | ||
361 | #define kern_addr_valid(addr) (1) | |
362 | ||
363 | #include <asm-generic/pgtable.h> | |
364 | ||
fe1cd987 AV |
365 | /* Clear a kernel PTE and flush it from the TLB */ |
366 | #define kpte_clear_flush(ptep, vaddr) \ | |
367 | do { \ | |
368 | pte_clear(&init_mm, (vaddr), (ptep)); \ | |
369 | __flush_tlb_one((vaddr)); \ | |
370 | } while (0) | |
371 | ||
04add672 | 372 | #endif |