Commit | Line | Data |
---|---|---|
45051539 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
dcfdae04 CM |
2 | /* |
3 | * arch/arm/include/asm/pgtable-3level.h | |
4 | * | |
5 | * Copyright (C) 2011 ARM Ltd. | |
6 | * Author: Catalin Marinas <catalin.marinas@arm.com> | |
dcfdae04 CM |
7 | */ |
8 | #ifndef _ASM_PGTABLE_3LEVEL_H | |
9 | #define _ASM_PGTABLE_3LEVEL_H | |
10 | ||
11 | /* | |
12 | * With LPAE, there are 3 levels of page tables. Each level has 512 entries of | |
13 | * 8 bytes each, occupying a 4K page. The first level table covers a range of | |
14 | * 512GB, each entry representing 1GB. Since we are limited to 4GB input | |
15 | * address range, only 4 entries in the PGD are used. | |
16 | * | |
17 | * There are enough spare bits in a page table entry for the kernel specific | |
18 | * state. | |
19 | */ | |
20 | #define PTRS_PER_PTE 512 | |
21 | #define PTRS_PER_PMD 512 | |
22 | #define PTRS_PER_PGD 4 | |
23 | ||
e38a5175 | 24 | #define PTE_HWTABLE_PTRS (0) |
dcfdae04 CM |
25 | #define PTE_HWTABLE_OFF (0) |
26 | #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64)) | |
27 | ||
28 | /* | |
29 | * PGDIR_SHIFT determines the size a top-level page table entry can map. | |
30 | */ | |
31 | #define PGDIR_SHIFT 30 | |
32 | ||
33 | /* | |
34 | * PMD_SHIFT determines the size a middle-level page table entry can map. | |
35 | */ | |
36 | #define PMD_SHIFT 21 | |
37 | ||
38 | #define PMD_SIZE (1UL << PMD_SHIFT) | |
926edcc7 | 39 | #define PMD_MASK (~((1 << PMD_SHIFT) - 1)) |
dcfdae04 | 40 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
926edcc7 | 41 | #define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1)) |
dcfdae04 CM |
42 | |
43 | /* | |
44 | * section address mask and size definitions. | |
45 | */ | |
46 | #define SECTION_SHIFT 21 | |
47 | #define SECTION_SIZE (1UL << SECTION_SHIFT) | |
926edcc7 | 48 | #define SECTION_MASK (~((1 << SECTION_SHIFT) - 1)) |
dcfdae04 CM |
49 | |
50 | #define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE) | |
51 | ||
1355e2a6 CM |
52 | /* |
53 | * Hugetlb definitions. | |
54 | */ | |
55 | #define HPAGE_SHIFT PMD_SHIFT | |
56 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | |
57 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | |
58 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | |
59 | ||
dcfdae04 CM |
60 | /* |
61 | * "Linux" PTE definitions for LPAE. | |
62 | * | |
63 | * These bits overlap with the hardware bits but the naming is preserved for | |
64 | * consistency with the classic page table format. | |
65 | */ | |
dbf62d50 WD |
66 | #define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */ |
67 | #define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */ | |
dcfdae04 | 68 | #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ |
dcfdae04 CM |
69 | #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ |
70 | #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */ | |
71 | #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ | |
ded94779 SC |
72 | #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) |
73 | #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) | |
26ffd0d4 | 74 | #define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */ |
ded94779 | 75 | #define L_PTE_RDONLY (_AT(pteval_t, 1) << 58) /* READ ONLY */ |
dcfdae04 | 76 | |
ded94779 SC |
77 | #define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) |
78 | #define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) | |
ded94779 SC |
79 | #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57) |
80 | #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58) | |
8d962507 | 81 | |
dcfdae04 CM |
82 | /* |
83 | * To be used in assembly code with the upper page attributes. | |
84 | */ | |
85 | #define L_PTE_XN_HIGH (1 << (54 - 32)) | |
86 | #define L_PTE_DIRTY_HIGH (1 << (55 - 32)) | |
87 | ||
88 | /* | |
89 | * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). | |
90 | */ | |
91 | #define L_PTE_MT_UNCACHED (_AT(pteval_t, 0) << 2) /* strongly ordered */ | |
92 | #define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 1) << 2) /* normal non-cacheable */ | |
93 | #define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 2) << 2) /* normal inner write-through */ | |
94 | #define L_PTE_MT_WRITEBACK (_AT(pteval_t, 3) << 2) /* normal inner write-back */ | |
95 | #define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 7) << 2) /* normal inner write-alloc */ | |
96 | #define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 4) << 2) /* device */ | |
97 | #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 4) << 2) /* device */ | |
98 | #define L_PTE_MT_DEV_WC (_AT(pteval_t, 1) << 2) /* normal non-cacheable */ | |
99 | #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 3) << 2) /* normal inner write-back */ | |
100 | #define L_PTE_MT_MASK (_AT(pteval_t, 7) << 2) | |
101 | ||
da028779 CM |
102 | /* |
103 | * Software PGD flags. | |
104 | */ | |
105 | #define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */ | |
106 | ||
cc577c26 CD |
107 | /* |
108 | * 2nd stage PTE definitions for LPAE. | |
109 | */ | |
4d9c5b89 CD |
110 | #define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */ |
111 | #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */ | |
112 | #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */ | |
113 | #define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */ | |
114 | #define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2) | |
cc577c26 | 115 | |
4d9c5b89 CD |
116 | #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ |
117 | #define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ | |
118 | ||
c6473555 | 119 | #define L_PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[1] */ |
4d9c5b89 | 120 | #define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ |
ad361f09 | 121 | |
cc577c26 CD |
122 | /* |
123 | * Hyp-mode PL2 PTE definitions for LPAE. | |
124 | */ | |
125 | #define L_PTE_HYP L_PTE_USER | |
126 | ||
da028779 CM |
127 | #ifndef __ASSEMBLY__ |
128 | ||
129 | #define pud_none(pud) (!pud_val(pud)) | |
130 | #define pud_bad(pud) (!(pud_val(pud) & 2)) | |
131 | #define pud_present(pud) (pud_val(pud)) | |
cc577c26 CD |
132 | #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ |
133 | PMD_TYPE_TABLE) | |
134 | #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ | |
135 | PMD_TYPE_SECT) | |
1fd15b87 | 136 | #define pmd_large(pmd) pmd_sect(pmd) |
da028779 CM |
137 | |
138 | #define pud_clear(pudp) \ | |
139 | do { \ | |
140 | *pudp = __pud(0); \ | |
141 | clean_pmd_entry(pudp); \ | |
142 | } while (0) | |
143 | ||
144 | #define set_pud(pudp, pud) \ | |
145 | do { \ | |
146 | *pudp = pud; \ | |
147 | flush_pmd_entry(pudp); \ | |
148 | } while (0) | |
149 | ||
150 | static inline pmd_t *pud_page_vaddr(pud_t pud) | |
151 | { | |
152 | return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); | |
153 | } | |
154 | ||
155 | /* Find an entry in the second-level page table.. */ | |
156 | #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) | |
157 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | |
158 | { | |
159 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); | |
160 | } | |
161 | ||
162 | #define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) | |
163 | ||
164 | #define copy_pmd(pmdpd,pmdps) \ | |
165 | do { \ | |
166 | *pmdpd = *pmdps; \ | |
167 | flush_pmd_entry(pmdpd); \ | |
168 | } while (0) | |
169 | ||
170 | #define pmd_clear(pmdp) \ | |
171 | do { \ | |
172 | *pmdp = __pmd(0); \ | |
173 | clean_pmd_entry(pmdp); \ | |
174 | } while (0) | |
175 | ||
dde1b651 SC |
176 | /* |
177 | * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes | |
178 | * that are written to a page table but not for ptes created with mk_pte. | |
179 | * | |
180 | * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to | |
181 | * hugetlb_cow, where it is compared with an entry in a page table. | |
182 | * This comparison test fails erroneously leading ultimately to a memory leak. | |
183 | * | |
184 | * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is | |
185 | * present before running the comparison. | |
186 | */ | |
187 | #define __HAVE_ARCH_PTE_SAME | |
188 | #define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \ | |
189 | : pte_val(pte_a)) \ | |
190 | == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \ | |
191 | : pte_val(pte_b))) | |
192 | ||
da028779 CM |
193 | #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext))) |
194 | ||
1355e2a6 CM |
195 | #define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT)) |
196 | #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) | |
197 | ||
f2950706 SC |
198 | #define pmd_isset(pmd, val) ((u32)(val) == (val) ? pmd_val(pmd) & (val) \ |
199 | : !!(pmd_val(pmd) & (val))) | |
200 | #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val))) | |
201 | ||
62453188 | 202 | #define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID)) |
f2950706 | 203 | #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF)) |
bd951303 SC |
204 | #define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL)) |
205 | static inline pte_t pte_mkspecial(pte_t pte) | |
206 | { | |
207 | pte_val(pte) |= L_PTE_SPECIAL; | |
208 | return pte; | |
209 | } | |
8d962507 | 210 | |
ded94779 SC |
211 | #define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY)) |
212 | #define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY)) | |
b8cd51af SC |
213 | #define pud_page(pud) pmd_page(__pmd(pud_val(pud))) |
214 | #define pud_write(pud) pmd_write(__pmd(pud_val(pud))) | |
8d962507 | 215 | |
a3a9ea65 SC |
216 | #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd)) |
217 | #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) | |
218 | ||
8d962507 | 219 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
f2950706 | 220 | #define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd)) |
8d962507 CM |
221 | #endif |
222 | ||
223 | #define PMD_BIT_FUNC(fn,op) \ | |
224 | static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } | |
225 | ||
ded94779 | 226 | PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY); |
8d962507 | 227 | PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); |
ded94779 SC |
228 | PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY); |
229 | PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY); | |
44842045 | 230 | PMD_BIT_FUNC(mkclean, &= ~L_PMD_SECT_DIRTY); |
8d962507 CM |
231 | PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); |
232 | ||
233 | #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) | |
234 | ||
235 | #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) | |
236 | #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) | |
237 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) | |
238 | ||
ef298cc5 KS |
239 | /* No hardware dirty/accessed bits -- generic_pmdp_establish() fits */ |
240 | #define pmdp_establish generic_pmdp_establish | |
241 | ||
56530f5d | 242 | /* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */ |
4d942466 MG |
243 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
244 | { | |
56530f5d | 245 | return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID); |
4d942466 | 246 | } |
8d962507 CM |
247 | |
248 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | |
249 | { | |
ded94779 SC |
250 | const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | L_PMD_SECT_RDONLY | |
251 | L_PMD_SECT_VALID | L_PMD_SECT_NONE; | |
8d962507 CM |
252 | pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); |
253 | return pmd; | |
254 | } | |
255 | ||
256 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |
257 | pmd_t *pmdp, pmd_t pmd) | |
258 | { | |
259 | BUG_ON(addr >= TASK_SIZE); | |
260 | ||
261 | /* create a faulting entry if PROT_NONE protected */ | |
ded94779 SC |
262 | if (pmd_val(pmd) & L_PMD_SECT_NONE) |
263 | pmd_val(pmd) &= ~L_PMD_SECT_VALID; | |
264 | ||
265 | if (pmd_write(pmd) && pmd_dirty(pmd)) | |
266 | pmd_val(pmd) &= ~PMD_SECT_AP2; | |
267 | else | |
268 | pmd_val(pmd) |= PMD_SECT_AP2; | |
8d962507 CM |
269 | |
270 | *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG); | |
271 | flush_pmd_entry(pmdp); | |
272 | } | |
273 | ||
da028779 CM |
274 | #endif /* __ASSEMBLY__ */ |
275 | ||
dcfdae04 | 276 | #endif /* _ASM_PGTABLE_3LEVEL_H */ |